aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJavier Martin <javier.martin@vista-silicon.com>2012-03-22 09:54:01 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-26 02:01:29 -0400
commit6bd081277ea03e2b165fc68534b61bc64db93990 (patch)
tree288ba74063dcc002a347cefcfb65e70bc5b63d22 /drivers
parentbeeaa103eecc7a132682c40867f0ef70655383a5 (diff)
dmaengine: imx-dma: merge old dma-v1.c with imx-dma.c
It is mainly a simple merge changing the prefix of some functions to fit the imx-dma namings. As there are no users of the old dma-v1.c api we can safely remove this file. Signed-off-by: Javier Martin <javier.martin@vista-silicon.com> Acked-by: Sascha Hauer <s.hauer@pengutronix.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/imx-dma.c604
2 files changed, 552 insertions, 53 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 65c61dba66d..b4a53a0eeaf 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -230,7 +230,6 @@ config IMX_SDMA
230 230
231config IMX_DMA 231config IMX_DMA
232 tristate "i.MX DMA support" 232 tristate "i.MX DMA support"
233 depends on IMX_HAVE_DMA_V1
234 select DMA_ENGINE 233 select DMA_ENGINE
235 help 234 help
236 Support the i.MX DMA engine. This engine is integrated into 235 Support the i.MX DMA engine. This engine is integrated into
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 304839a99ae..fbb1aaad612 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,7 +14,6 @@
14 * http://www.opensource.org/licenses/gpl-license.html 14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html 15 * http://www.gnu.org/copyleft/gpl.html
16 */ 16 */
17
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/types.h> 19#include <linux/types.h>
@@ -25,15 +24,89 @@
25#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/module.h> 29#include <linux/module.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <mach/dma-v1.h> 32#include <mach/dma.h>
33#include <mach/hardware.h> 33#include <mach/hardware.h>
34 34
35#include "dmaengine.h" 35#include "dmaengine.h"
36#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 36#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37#define IMX_DMA_CHANNELS 16
38
39#define DMA_MODE_READ 0
40#define DMA_MODE_WRITE 1
41#define DMA_MODE_MASK 1
42
43#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
44#define IMX_DMA_MEMSIZE_32 (0 << 4)
45#define IMX_DMA_MEMSIZE_8 (1 << 4)
46#define IMX_DMA_MEMSIZE_16 (2 << 4)
47#define IMX_DMA_TYPE_LINEAR (0 << 10)
48#define IMX_DMA_TYPE_2D (1 << 10)
49#define IMX_DMA_TYPE_FIFO (2 << 10)
50
51#define IMX_DMA_ERR_BURST (1 << 0)
52#define IMX_DMA_ERR_REQUEST (1 << 1)
53#define IMX_DMA_ERR_TRANSFER (1 << 2)
54#define IMX_DMA_ERR_BUFFER (1 << 3)
55#define IMX_DMA_ERR_TIMEOUT (1 << 4)
56
57#define DMA_DCR 0x00 /* Control Register */
58#define DMA_DISR 0x04 /* Interrupt status Register */
59#define DMA_DIMR 0x08 /* Interrupt mask Register */
60#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
61#define DMA_DRTOSR 0x10 /* Request timeout Register */
62#define DMA_DSESR 0x14 /* Transfer Error Status Register */
63#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
64#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
65#define DMA_WSRA 0x40 /* W-Size Register A */
66#define DMA_XSRA 0x44 /* X-Size Register A */
67#define DMA_YSRA 0x48 /* Y-Size Register A */
68#define DMA_WSRB 0x4c /* W-Size Register B */
69#define DMA_XSRB 0x50 /* X-Size Register B */
70#define DMA_YSRB 0x54 /* Y-Size Register B */
71#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
72#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
73#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
74#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
75#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
76#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
77#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
78#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
79#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
80
81#define DCR_DRST (1<<1)
82#define DCR_DEN (1<<0)
83#define DBTOCR_EN (1<<15)
84#define DBTOCR_CNT(x) ((x) & 0x7fff)
85#define CNTR_CNT(x) ((x) & 0xffffff)
86#define CCR_ACRPT (1<<14)
87#define CCR_DMOD_LINEAR (0x0 << 12)
88#define CCR_DMOD_2D (0x1 << 12)
89#define CCR_DMOD_FIFO (0x2 << 12)
90#define CCR_DMOD_EOBFIFO (0x3 << 12)
91#define CCR_SMOD_LINEAR (0x0 << 10)
92#define CCR_SMOD_2D (0x1 << 10)
93#define CCR_SMOD_FIFO (0x2 << 10)
94#define CCR_SMOD_EOBFIFO (0x3 << 10)
95#define CCR_MDIR_DEC (1<<9)
96#define CCR_MSEL_B (1<<8)
97#define CCR_DSIZ_32 (0x0 << 6)
98#define CCR_DSIZ_8 (0x1 << 6)
99#define CCR_DSIZ_16 (0x2 << 6)
100#define CCR_SSIZ_32 (0x0 << 4)
101#define CCR_SSIZ_8 (0x1 << 4)
102#define CCR_SSIZ_16 (0x2 << 4)
103#define CCR_REN (1<<3)
104#define CCR_RPT (1<<2)
105#define CCR_FRC (1<<1)
106#define CCR_CEN (1<<0)
107#define RTOR_EN (1<<15)
108#define RTOR_CLK (1<<14)
109#define RTOR_PSC (1<<13)
37 110
38enum imxdma_prep_type { 111enum imxdma_prep_type {
39 IMXDMA_DESC_MEMCPY, 112 IMXDMA_DESC_MEMCPY,
@@ -42,6 +115,39 @@ enum imxdma_prep_type {
42 IMXDMA_DESC_CYCLIC, 115 IMXDMA_DESC_CYCLIC,
43}; 116};
44 117
118/*
119 * struct imxdma_channel_internal - i.MX specific DMA extension
120 * @name: name specified by DMA client
121 * @irq_handler: client callback for end of transfer
122 * @err_handler: client callback for error condition
123 * @data: clients context data for callbacks
124 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
125 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
126 * @resbytes: total residual number of bytes to transfer
127 * (it can be lower or same as sum of SG mapped chunk sizes)
128 * @sgcount: number of chunks to be read/written
129 *
130 * Structure is used for IMX DMA processing. It would be probably good
131 * @struct dma_struct in the future for external interfacing and use
132 * @struct imxdma_channel_internal only as extension to it.
133 */
134
135struct imxdma_channel_internal {
136 void *data;
137 unsigned int dma_mode;
138 struct scatterlist *sg;
139 unsigned int resbytes;
140
141 int in_use;
142
143 u32 ccr_from_device;
144 u32 ccr_to_device;
145
146 struct timer_list watchdog;
147
148 int hw_chaining;
149};
150
45struct imxdma_desc { 151struct imxdma_desc {
46 struct list_head node; 152 struct list_head node;
47 struct dma_async_tx_descriptor desc; 153 struct dma_async_tx_descriptor desc;
@@ -64,9 +170,9 @@ struct imxdma_desc {
64}; 170};
65 171
66struct imxdma_channel { 172struct imxdma_channel {
173 struct imxdma_channel_internal internal;
67 struct imxdma_engine *imxdma; 174 struct imxdma_engine *imxdma;
68 unsigned int channel; 175 unsigned int channel;
69 unsigned int imxdma_channel;
70 176
71 struct tasklet_struct dma_tasklet; 177 struct tasklet_struct dma_tasklet;
72 struct list_head ld_free; 178 struct list_head ld_free;
@@ -84,13 +190,11 @@ struct imxdma_channel {
84 struct scatterlist *sg_list; 190 struct scatterlist *sg_list;
85}; 191};
86 192
87#define MAX_DMA_CHANNELS 8
88
89struct imxdma_engine { 193struct imxdma_engine {
90 struct device *dev; 194 struct device *dev;
91 struct device_dma_parameters dma_parms; 195 struct device_dma_parameters dma_parms;
92 struct dma_device dma_device; 196 struct dma_device dma_device;
93 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 197 struct imxdma_channel channel[IMX_DMA_CHANNELS];
94}; 198};
95 199
96static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 200static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
@@ -111,28 +215,381 @@ static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
111 return false; 215 return false;
112} 216}
113 217
114static void imxdma_irq_handler(int channel, void *data) 218/* TODO: put this inside any struct */
219static void __iomem *imx_dmav1_baseaddr;
220static struct clk *dma_clk;
221
222static void imx_dmav1_writel(unsigned val, unsigned offset)
223{
224 __raw_writel(val, imx_dmav1_baseaddr + offset);
225}
226
227static unsigned imx_dmav1_readl(unsigned offset)
115{ 228{
116 struct imxdma_channel *imxdmac = data; 229 return __raw_readl(imx_dmav1_baseaddr + offset);
230}
117 231
118 tasklet_schedule(&imxdmac->dma_tasklet); 232static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma)
233{
234 if (cpu_is_mx27())
235 return imxdma->hw_chaining;
236 else
237 return 0;
238}
239
240/*
241 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
242 */
243static inline int imxdma_sg_next(struct imxdma_channel *imxdmac, struct scatterlist *sg)
244{
245 struct imxdma_channel_internal *imxdma = &imxdmac->internal;
246 unsigned long now;
247
248 now = min(imxdma->resbytes, sg->length);
249 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
250 imxdma->resbytes -= now;
251
252 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
253 imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
254 else
255 imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
256
257 imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
258
259 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
260 "size 0x%08x\n", imxdmac->channel,
261 imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
262 imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
263 imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
264
265 return now;
266}
267
268static int
269imxdma_setup_single_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address,
270 unsigned int dma_length, unsigned int dev_addr,
271 unsigned int dmamode)
272{
273 int channel = imxdmac->channel;
274
275 imxdmac->internal.sg = NULL;
276 imxdmac->internal.dma_mode = dmamode;
277
278 if (!dma_address) {
279 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
280 channel);
281 return -EINVAL;
282 }
283
284 if (!dma_length) {
285 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
286 channel);
287 return -EINVAL;
288 }
289
290 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
291 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
292 "dev_addr=0x%08x for read\n",
293 channel, __func__, (unsigned int)dma_address,
294 dma_length, dev_addr);
295
296 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
297 imx_dmav1_writel(dma_address, DMA_DAR(channel));
298 imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel));
299 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
300 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
301 "dev_addr=0x%08x for write\n",
302 channel, __func__, (unsigned int)dma_address,
303 dma_length, dev_addr);
304
305 imx_dmav1_writel(dma_address, DMA_SAR(channel));
306 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
307 imx_dmav1_writel(imxdmac->internal.ccr_to_device,
308 DMA_CCR(channel));
309 } else {
310 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
311 channel);
312 return -EINVAL;
313 }
314
315 imx_dmav1_writel(dma_length, DMA_CNTR(channel));
316
317 return 0;
318}
319
320static void imxdma_enable_hw(struct imxdma_channel *imxdmac)
321{
322 int channel = imxdmac->channel;
323 unsigned long flags;
324
325 pr_debug("imxdma%d: imx_dma_enable\n", channel);
326
327 if (imxdmac->internal.in_use)
328 return;
329
330 local_irq_save(flags);
331
332 imx_dmav1_writel(1 << channel, DMA_DISR);
333 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
334 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
335 CCR_ACRPT, DMA_CCR(channel));
336
337 if ((cpu_is_mx21() || cpu_is_mx27()) &&
338 imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) {
339 imxdmac->internal.sg = sg_next(imxdmac->internal.sg);
340 if (imxdmac->internal.sg) {
341 u32 tmp;
342 imxdma_sg_next(imxdmac, imxdmac->internal.sg);
343 tmp = imx_dmav1_readl(DMA_CCR(channel));
344 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
345 DMA_CCR(channel));
346 }
347 }
348 imxdmac->internal.in_use = 1;
349
350 local_irq_restore(flags);
351}
352
353static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
354{
355 int channel = imxdmac->channel;
356 unsigned long flags;
357
358 pr_debug("imxdma%d: imx_dma_disable\n", channel);
359
360 if (imxdma_hw_chain(&imxdmac->internal))
361 del_timer(&imxdmac->internal.watchdog);
362
363 local_irq_save(flags);
364 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
365 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
366 DMA_CCR(channel));
367 imx_dmav1_writel(1 << channel, DMA_DISR);
368 imxdmac->internal.in_use = 0;
369 local_irq_restore(flags);
370}
371
372static int
373imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_port,
374 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
375{
376 int channel = imxdmac->channel;
377 u32 dreq = 0;
378
379 imxdmac->internal.hw_chaining = 0;
380
381 if (hw_chaining) {
382 imxdmac->internal.hw_chaining = 1;
383 if (!imxdma_hw_chain(&imxdmac->internal))
384 return -EINVAL;
385 }
386
387 if (dmareq)
388 dreq = CCR_REN;
389
390 imxdmac->internal.ccr_from_device = config_port | (config_mem << 2) | dreq;
391 imxdmac->internal.ccr_to_device = config_mem | (config_port << 2) | dreq;
392
393 imx_dmav1_writel(dmareq, DMA_RSSR(channel));
394
395 return 0;
396}
397
398static int
399imxdma_setup_sg_hw(struct imxdma_channel *imxdmac,
400 struct scatterlist *sg, unsigned int sgcount,
401 unsigned int dma_length, unsigned int dev_addr,
402 unsigned int dmamode)
403{
404 int channel = imxdmac->channel;
405
406 if (imxdmac->internal.in_use)
407 return -EBUSY;
408
409 imxdmac->internal.sg = sg;
410 imxdmac->internal.dma_mode = dmamode;
411 imxdmac->internal.resbytes = dma_length;
412
413 if (!sg || !sgcount) {
414 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
415 channel);
416 return -EINVAL;
417 }
418
419 if (!sg->length) {
420 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
421 channel);
422 return -EINVAL;
423 }
424
425 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
426 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
427 "dev_addr=0x%08x for read\n",
428 channel, __func__, sg, sgcount, dma_length, dev_addr);
429
430 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
431 imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel));
432 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
433 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
434 "dev_addr=0x%08x for write\n",
435 channel, __func__, sg, sgcount, dma_length, dev_addr);
436
437 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
438 imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel));
439 } else {
440 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
441 channel);
442 return -EINVAL;
443 }
444
445 imxdma_sg_next(imxdmac, sg);
446
447 return 0;
119} 448}
120 449
121static void imxdma_err_handler(int channel, void *data, int error) 450static void imxdma_watchdog(unsigned long data)
122{ 451{
123 struct imxdma_channel *imxdmac = data; 452 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
453 int channel = imxdmac->channel;
124 454
455 imx_dmav1_writel(0, DMA_CCR(channel));
456 imxdmac->internal.in_use = 0;
457 imxdmac->internal.sg = NULL;
458
459 /* Tasklet watchdog error handler */
125 tasklet_schedule(&imxdmac->dma_tasklet); 460 tasklet_schedule(&imxdmac->dma_tasklet);
461 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
462}
463
464static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
465{
466 struct imxdma_engine *imxdma = dev_id;
467 struct imxdma_channel_internal *internal;
468 unsigned int err_mask;
469 int i, disr;
470 int errcode;
471
472 disr = imx_dmav1_readl(DMA_DISR);
473
474 err_mask = imx_dmav1_readl(DMA_DBTOSR) |
475 imx_dmav1_readl(DMA_DRTOSR) |
476 imx_dmav1_readl(DMA_DSESR) |
477 imx_dmav1_readl(DMA_DBOSR);
478
479 if (!err_mask)
480 return IRQ_HANDLED;
481
482 imx_dmav1_writel(disr & err_mask, DMA_DISR);
483
484 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
485 if (!(err_mask & (1 << i)))
486 continue;
487 internal = &imxdma->channel[i].internal;
488 errcode = 0;
489
490 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
491 imx_dmav1_writel(1 << i, DMA_DBTOSR);
492 errcode |= IMX_DMA_ERR_BURST;
493 }
494 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
495 imx_dmav1_writel(1 << i, DMA_DRTOSR);
496 errcode |= IMX_DMA_ERR_REQUEST;
497 }
498 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
499 imx_dmav1_writel(1 << i, DMA_DSESR);
500 errcode |= IMX_DMA_ERR_TRANSFER;
501 }
502 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
503 imx_dmav1_writel(1 << i, DMA_DBOSR);
504 errcode |= IMX_DMA_ERR_BUFFER;
505 }
506 /* Tasklet error handler */
507 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
508
509 printk(KERN_WARNING
510 "DMA timeout on channel %d -%s%s%s%s\n", i,
511 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
512 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
513 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
514 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
515 }
516 return IRQ_HANDLED;
126} 517}
127 518
128static void imxdma_progression(int channel, void *data, 519static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
129 struct scatterlist *sg)
130{ 520{
131 struct imxdma_channel *imxdmac = data; 521 struct imxdma_channel_internal *imxdma = &imxdmac->internal;
522 int chno = imxdmac->channel;
523
524 if (imxdma->sg) {
525 u32 tmp;
526 imxdma->sg = sg_next(imxdma->sg);
527
528 if (imxdma->sg) {
529 imxdma_sg_next(imxdmac, imxdma->sg);
530
531 tmp = imx_dmav1_readl(DMA_CCR(chno));
532
533 if (imxdma_hw_chain(imxdma)) {
534 /* FIXME: The timeout should probably be
535 * configurable
536 */
537 mod_timer(&imxdma->watchdog,
538 jiffies + msecs_to_jiffies(500));
539
540 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
541 imx_dmav1_writel(tmp, DMA_CCR(chno));
542 } else {
543 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
544 tmp |= CCR_CEN;
545 }
546
547 imx_dmav1_writel(tmp, DMA_CCR(chno));
548
549 if (imxdma_chan_is_doing_cyclic(imxdmac))
550 /* Tasklet progression */
551 tasklet_schedule(&imxdmac->dma_tasklet);
132 552
553 return;
554 }
555
556 if (imxdma_hw_chain(imxdma)) {
557 del_timer(&imxdma->watchdog);
558 return;
559 }
560 }
561
562 imx_dmav1_writel(0, DMA_CCR(chno));
563 imxdma->in_use = 0;
564 /* Tasklet irq */
133 tasklet_schedule(&imxdmac->dma_tasklet); 565 tasklet_schedule(&imxdmac->dma_tasklet);
134} 566}
135 567
568static irqreturn_t dma_irq_handler(int irq, void *dev_id)
569{
570 struct imxdma_engine *imxdma = dev_id;
571 struct imxdma_channel_internal *internal;
572 int i, disr;
573
574 if (cpu_is_mx21() || cpu_is_mx27())
575 imxdma_err_handler(irq, dev_id);
576
577 disr = imx_dmav1_readl(DMA_DISR);
578
579 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
580 disr);
581
582 imx_dmav1_writel(disr, DMA_DISR);
583 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
584 if (disr & (1 << i)) {
585 internal = &imxdma->channel[i].internal;
586 dma_irq_handle_channel(&imxdma->channel[i]);
587 }
588 }
589
590 return IRQ_HANDLED;
591}
592
136static int imxdma_xfer_desc(struct imxdma_desc *d) 593static int imxdma_xfer_desc(struct imxdma_desc *d)
137{ 594{
138 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 595 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
@@ -141,31 +598,24 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
141 /* Configure and enable */ 598 /* Configure and enable */
142 switch (d->type) { 599 switch (d->type) {
143 case IMXDMA_DESC_MEMCPY: 600 case IMXDMA_DESC_MEMCPY:
144 ret = imx_dma_config_channel(imxdmac->imxdma_channel, 601 ret = imxdma_config_channel_hw(imxdmac,
145 d->config_port, d->config_mem, 0, 0); 602 d->config_port, d->config_mem, 0, 0);
146 if (ret < 0) 603 if (ret < 0)
147 return ret; 604 return ret;
148 ret = imx_dma_setup_single(imxdmac->imxdma_channel, d->src, 605 ret = imxdma_setup_single_hw(imxdmac, d->src,
149 d->len, d->dest, d->dmamode); 606 d->len, d->dest, d->dmamode);
150 if (ret < 0) 607 if (ret < 0)
151 return ret; 608 return ret;
152 break; 609 break;
610
611 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
153 case IMXDMA_DESC_CYCLIC: 612 case IMXDMA_DESC_CYCLIC:
154 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
155 imxdma_progression);
156 if (ret < 0)
157 return ret;
158 /*
159 * We fall through here since cyclic transfer is the same as
160 * slave_sg adding a progression handler and a specific sg
161 * configuration which is done in 'imxdma_prep_dma_cyclic'.
162 */
163 case IMXDMA_DESC_SLAVE_SG: 613 case IMXDMA_DESC_SLAVE_SG:
164 if (d->dmamode == DMA_MODE_READ) 614 if (d->dmamode == DMA_MODE_READ)
165 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, 615 ret = imxdma_setup_sg_hw(imxdmac, d->sg,
166 d->sgcount, d->len, d->src, d->dmamode); 616 d->sgcount, d->len, d->src, d->dmamode);
167 else 617 else
168 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, d->sg, 618 ret = imxdma_setup_sg_hw(imxdmac, d->sg,
169 d->sgcount, d->len, d->dest, d->dmamode); 619 d->sgcount, d->len, d->dest, d->dmamode);
170 if (ret < 0) 620 if (ret < 0)
171 return ret; 621 return ret;
@@ -173,7 +623,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
173 default: 623 default:
174 return -EINVAL; 624 return -EINVAL;
175 } 625 }
176 imx_dma_enable(imxdmac->imxdma_channel); 626 imxdma_enable_hw(imxdmac);
177 return 0; 627 return 0;
178} 628}
179 629
@@ -225,7 +675,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
225 675
226 switch (cmd) { 676 switch (cmd) {
227 case DMA_TERMINATE_ALL: 677 case DMA_TERMINATE_ALL:
228 imx_dma_disable(imxdmac->imxdma_channel); 678 imxdma_disable_hw(imxdmac);
229 679
230 spin_lock_irqsave(&imxdmac->lock, flags); 680 spin_lock_irqsave(&imxdmac->lock, flags);
231 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 681 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
@@ -255,16 +705,16 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
255 mode = IMX_DMA_MEMSIZE_32; 705 mode = IMX_DMA_MEMSIZE_32;
256 break; 706 break;
257 } 707 }
258 ret = imx_dma_config_channel(imxdmac->imxdma_channel, 708 ret = imxdma_config_channel_hw(imxdmac,
259 mode | IMX_DMA_TYPE_FIFO, 709 mode | IMX_DMA_TYPE_FIFO,
260 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 710 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
261 imxdmac->dma_request, 1); 711 imxdmac->dma_request, 1);
262 712
263 if (ret) 713 if (ret)
264 return ret; 714 return ret;
265 715 /* Set burst length */
266 imx_dma_config_burstlen(imxdmac->imxdma_channel, 716 imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
267 imxdmac->watermark_level * imxdmac->word_size); 717 DMA_BLR(imxdmac->channel));
268 718
269 return 0; 719 return 0;
270 default: 720 default:
@@ -333,7 +783,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
333 783
334 spin_lock_irqsave(&imxdmac->lock, flags); 784 spin_lock_irqsave(&imxdmac->lock, flags);
335 785
336 imx_dma_disable(imxdmac->imxdma_channel); 786 imxdma_disable_hw(imxdmac);
337 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 787 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
338 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 788 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
339 789
@@ -520,10 +970,51 @@ static void imxdma_issue_pending(struct dma_chan *chan)
520} 970}
521 971
522static int __init imxdma_probe(struct platform_device *pdev) 972static int __init imxdma_probe(struct platform_device *pdev)
523{ 973 {
524 struct imxdma_engine *imxdma; 974 struct imxdma_engine *imxdma;
525 int ret, i; 975 int ret, i;
526 976
977 if (cpu_is_mx1())
978 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
979 else if (cpu_is_mx21())
980 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
981 else if (cpu_is_mx27())
982 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
983 else
984 return 0;
985
986 dma_clk = clk_get(NULL, "dma");
987 if (IS_ERR(dma_clk))
988 return PTR_ERR(dma_clk);
989 clk_enable(dma_clk);
990
991 /* reset DMA module */
992 imx_dmav1_writel(DCR_DRST, DMA_DCR);
993
994 if (cpu_is_mx1()) {
995 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
996 if (ret) {
997 pr_crit("Can't register IRQ for DMA\n");
998 return ret;
999 }
1000
1001 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
1002 if (ret) {
1003 pr_crit("Can't register ERRIRQ for DMA\n");
1004 free_irq(MX1_DMA_INT, NULL);
1005 return ret;
1006 }
1007 }
1008
1009 /* enable DMA module */
1010 imx_dmav1_writel(DCR_DEN, DMA_DCR);
1011
1012 /* clear all interrupts */
1013 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1014
1015 /* disable interrupts */
1016 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1017
527 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); 1018 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
528 if (!imxdma) 1019 if (!imxdma)
529 return -ENOMEM; 1020 return -ENOMEM;
@@ -535,19 +1026,22 @@ static int __init imxdma_probe(struct platform_device *pdev)
535 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); 1026 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
536 1027
537 /* Initialize channel parameters */ 1028 /* Initialize channel parameters */
538 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1029 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
539 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1030 struct imxdma_channel *imxdmac = &imxdma->channel[i];
540 1031 memset(&imxdmac->internal, 0, sizeof(imxdmac->internal));
541 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", 1032 if (cpu_is_mx21() || cpu_is_mx27()) {
542 DMA_PRIO_MEDIUM); 1033 ret = request_irq(MX2x_INT_DMACH0 + i,
543 if ((int)imxdmac->channel < 0) { 1034 dma_irq_handler, 0, "DMA", imxdma);
544 ret = -ENODEV; 1035 if (ret) {
545 goto err_init; 1036 pr_crit("Can't register IRQ %d for DMA channel %d\n",
1037 MX2x_INT_DMACH0 + i, i);
1038 goto err_init;
1039 }
1040 init_timer(&imxdmac->internal.watchdog);
1041 imxdmac->internal.watchdog.function = &imxdma_watchdog;
1042 imxdmac->internal.watchdog.data = (unsigned long)imxdmac;
546 } 1043 }
547 1044
548 imx_dma_setup_handlers(imxdmac->imxdma_channel,
549 imxdma_irq_handler, imxdma_err_handler, imxdmac);
550
551 imxdmac->imxdma = imxdma; 1045 imxdmac->imxdma = imxdma;
552 spin_lock_init(&imxdmac->lock); 1046 spin_lock_init(&imxdmac->lock);
553 1047
@@ -593,9 +1087,13 @@ static int __init imxdma_probe(struct platform_device *pdev)
593 return 0; 1087 return 0;
594 1088
595err_init: 1089err_init:
596 while (--i >= 0) { 1090
597 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1091 if (cpu_is_mx21() || cpu_is_mx27()) {
598 imx_dma_free(imxdmac->imxdma_channel); 1092 while (--i >= 0)
1093 free_irq(MX2x_INT_DMACH0 + i, NULL);
1094 } else if cpu_is_mx1() {
1095 free_irq(MX1_DMA_INT, NULL);
1096 free_irq(MX1_DMA_ERR, NULL);
599 } 1097 }
600 1098
601 kfree(imxdma); 1099 kfree(imxdma);
@@ -609,10 +1107,12 @@ static int __exit imxdma_remove(struct platform_device *pdev)
609 1107
610 dma_async_device_unregister(&imxdma->dma_device); 1108 dma_async_device_unregister(&imxdma->dma_device);
611 1109
612 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1110 if (cpu_is_mx21() || cpu_is_mx27()) {
613 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1111 for (i = 0; i < IMX_DMA_CHANNELS; i++)
614 1112 free_irq(MX2x_INT_DMACH0 + i, NULL);
615 imx_dma_free(imxdmac->imxdma_channel); 1113 } else if cpu_is_mx1() {
1114 free_irq(MX1_DMA_INT, NULL);
1115 free_irq(MX1_DMA_ERR, NULL);
616 } 1116 }
617 1117
618 kfree(imxdma); 1118 kfree(imxdma);