aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/dma.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/plat-omap/dma.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r--arch/arm/plat-omap/dma.c716
1 files changed, 321 insertions, 395 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index ec7eddf9e525..c22217c2ee5f 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -15,6 +15,10 @@
15 * 15 *
16 * Support functions for the OMAP internal DMA channels. 16 * Support functions for the OMAP internal DMA channels.
17 * 17 *
18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19 * Converted DMA library into DMA platform driver.
20 * - G, Manjunath Kondaiah <manjugk@ti.com>
21 *
18 * This program is free software; you can redistribute it and/or modify 22 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as 23 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation. 24 * published by the Free Software Foundation.
@@ -30,6 +34,7 @@
30#include <linux/irq.h> 34#include <linux/irq.h>
31#include <linux/io.h> 35#include <linux/io.h>
32#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/delay.h>
33 38
34#include <asm/system.h> 39#include <asm/system.h>
35#include <mach/hardware.h> 40#include <mach/hardware.h>
@@ -48,11 +53,15 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
48#endif 53#endif
49 54
50#define OMAP_DMA_ACTIVE 0x01 55#define OMAP_DMA_ACTIVE 0x01
51#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe 56#define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
52 57
53#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) 58#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
54 59
60static struct omap_system_dma_plat_info *p;
61static struct omap_dma_dev_attr *d;
62
55static int enable_1510_mode; 63static int enable_1510_mode;
64static u32 errata;
56 65
57static struct omap_dma_global_context_registers { 66static struct omap_dma_global_context_registers {
58 u32 dma_irqenable_l0; 67 u32 dma_irqenable_l0;
@@ -60,27 +69,6 @@ static struct omap_dma_global_context_registers {
60 u32 dma_gcr; 69 u32 dma_gcr;
61} omap_dma_global_context; 70} omap_dma_global_context;
62 71
63struct omap_dma_lch {
64 int next_lch;
65 int dev_id;
66 u16 saved_csr;
67 u16 enabled_irqs;
68 const char *dev_name;
69 void (*callback)(int lch, u16 ch_status, void *data);
70 void *data;
71
72#ifndef CONFIG_ARCH_OMAP1
73 /* required for Dynamic chaining */
74 int prev_linked_ch;
75 int next_linked_ch;
76 int state;
77 int chain_id;
78
79 int status;
80#endif
81 long flags;
82};
83
84struct dma_link_info { 72struct dma_link_info {
85 int *linked_dmach_q; 73 int *linked_dmach_q;
86 int no_of_lchs_linked; 74 int no_of_lchs_linked;
@@ -136,15 +124,6 @@ static int omap_dma_reserve_channels;
136 124
137static spinlock_t dma_chan_lock; 125static spinlock_t dma_chan_lock;
138static struct omap_dma_lch *dma_chan; 126static struct omap_dma_lch *dma_chan;
139static void __iomem *omap_dma_base;
140
141static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
142 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
143 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
144 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
145 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
146 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
147};
148 127
149static inline void disable_lnk(int lch); 128static inline void disable_lnk(int lch);
150static void omap_disable_channel_irq(int lch); 129static void omap_disable_channel_irq(int lch);
@@ -153,27 +132,9 @@ static inline void omap_enable_channel_irq(int lch);
153#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ 132#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
154 __func__); 133 __func__);
155 134
156#define dma_read(reg) \
157({ \
158 u32 __val; \
159 if (cpu_class_is_omap1()) \
160 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg); \
161 else \
162 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg); \
163 __val; \
164})
165
166#define dma_write(val, reg) \
167({ \
168 if (cpu_class_is_omap1()) \
169 __raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
170 else \
171 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg); \
172})
173
174#ifdef CONFIG_ARCH_OMAP15XX 135#ifdef CONFIG_ARCH_OMAP15XX
175/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ 136/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
176int omap_dma_in_1510_mode(void) 137static int omap_dma_in_1510_mode(void)
177{ 138{
178 return enable_1510_mode; 139 return enable_1510_mode;
179} 140}
@@ -205,16 +166,6 @@ static inline void set_gdma_dev(int req, int dev)
205#define set_gdma_dev(req, dev) do {} while (0) 166#define set_gdma_dev(req, dev) do {} while (0)
206#endif 167#endif
207 168
208/* Omap1 only */
209static void clear_lch_regs(int lch)
210{
211 int i;
212 void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
213
214 for (i = 0; i < 0x2c; i += 2)
215 __raw_writew(0, lch_base + i);
216}
217
218void omap_set_dma_priority(int lch, int dst_port, int priority) 169void omap_set_dma_priority(int lch, int dst_port, int priority)
219{ 170{
220 unsigned long reg; 171 unsigned long reg;
@@ -247,12 +198,12 @@ void omap_set_dma_priority(int lch, int dst_port, int priority)
247 if (cpu_class_is_omap2()) { 198 if (cpu_class_is_omap2()) {
248 u32 ccr; 199 u32 ccr;
249 200
250 ccr = dma_read(CCR(lch)); 201 ccr = p->dma_read(CCR, lch);
251 if (priority) 202 if (priority)
252 ccr |= (1 << 6); 203 ccr |= (1 << 6);
253 else 204 else
254 ccr &= ~(1 << 6); 205 ccr &= ~(1 << 6);
255 dma_write(ccr, CCR(lch)); 206 p->dma_write(ccr, CCR, lch);
256 } 207 }
257} 208}
258EXPORT_SYMBOL(omap_set_dma_priority); 209EXPORT_SYMBOL(omap_set_dma_priority);
@@ -263,31 +214,31 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
263{ 214{
264 u32 l; 215 u32 l;
265 216
266 l = dma_read(CSDP(lch)); 217 l = p->dma_read(CSDP, lch);
267 l &= ~0x03; 218 l &= ~0x03;
268 l |= data_type; 219 l |= data_type;
269 dma_write(l, CSDP(lch)); 220 p->dma_write(l, CSDP, lch);
270 221
271 if (cpu_class_is_omap1()) { 222 if (cpu_class_is_omap1()) {
272 u16 ccr; 223 u16 ccr;
273 224
274 ccr = dma_read(CCR(lch)); 225 ccr = p->dma_read(CCR, lch);
275 ccr &= ~(1 << 5); 226 ccr &= ~(1 << 5);
276 if (sync_mode == OMAP_DMA_SYNC_FRAME) 227 if (sync_mode == OMAP_DMA_SYNC_FRAME)
277 ccr |= 1 << 5; 228 ccr |= 1 << 5;
278 dma_write(ccr, CCR(lch)); 229 p->dma_write(ccr, CCR, lch);
279 230
280 ccr = dma_read(CCR2(lch)); 231 ccr = p->dma_read(CCR2, lch);
281 ccr &= ~(1 << 2); 232 ccr &= ~(1 << 2);
282 if (sync_mode == OMAP_DMA_SYNC_BLOCK) 233 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
283 ccr |= 1 << 2; 234 ccr |= 1 << 2;
284 dma_write(ccr, CCR2(lch)); 235 p->dma_write(ccr, CCR2, lch);
285 } 236 }
286 237
287 if (cpu_class_is_omap2() && dma_trigger) { 238 if (cpu_class_is_omap2() && dma_trigger) {
288 u32 val; 239 u32 val;
289 240
290 val = dma_read(CCR(lch)); 241 val = p->dma_read(CCR, lch);
291 242
292 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ 243 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
293 val &= ~((1 << 23) | (3 << 19) | 0x1f); 244 val &= ~((1 << 23) | (3 << 19) | 0x1f);
@@ -312,11 +263,11 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
312 } else { 263 } else {
313 val &= ~(1 << 24); /* dest synch */ 264 val &= ~(1 << 24); /* dest synch */
314 } 265 }
315 dma_write(val, CCR(lch)); 266 p->dma_write(val, CCR, lch);
316 } 267 }
317 268
318 dma_write(elem_count, CEN(lch)); 269 p->dma_write(elem_count, CEN, lch);
319 dma_write(frame_count, CFN(lch)); 270 p->dma_write(frame_count, CFN, lch);
320} 271}
321EXPORT_SYMBOL(omap_set_dma_transfer_params); 272EXPORT_SYMBOL(omap_set_dma_transfer_params);
322 273
@@ -327,7 +278,7 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
327 if (cpu_class_is_omap1()) { 278 if (cpu_class_is_omap1()) {
328 u16 w; 279 u16 w;
329 280
330 w = dma_read(CCR2(lch)); 281 w = p->dma_read(CCR2, lch);
331 w &= ~0x03; 282 w &= ~0x03;
332 283
333 switch (mode) { 284 switch (mode) {
@@ -342,23 +293,22 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
342 default: 293 default:
343 BUG(); 294 BUG();
344 } 295 }
345 dma_write(w, CCR2(lch)); 296 p->dma_write(w, CCR2, lch);
346 297
347 w = dma_read(LCH_CTRL(lch)); 298 w = p->dma_read(LCH_CTRL, lch);
348 w &= ~0x0f; 299 w &= ~0x0f;
349 /* Default is channel type 2D */ 300 /* Default is channel type 2D */
350 if (mode) { 301 if (mode) {
351 dma_write((u16)color, COLOR_L(lch)); 302 p->dma_write(color, COLOR, lch);
352 dma_write((u16)(color >> 16), COLOR_U(lch));
353 w |= 1; /* Channel type G */ 303 w |= 1; /* Channel type G */
354 } 304 }
355 dma_write(w, LCH_CTRL(lch)); 305 p->dma_write(w, LCH_CTRL, lch);
356 } 306 }
357 307
358 if (cpu_class_is_omap2()) { 308 if (cpu_class_is_omap2()) {
359 u32 val; 309 u32 val;
360 310
361 val = dma_read(CCR(lch)); 311 val = p->dma_read(CCR, lch);
362 val &= ~((1 << 17) | (1 << 16)); 312 val &= ~((1 << 17) | (1 << 16));
363 313
364 switch (mode) { 314 switch (mode) {
@@ -373,10 +323,10 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
373 default: 323 default:
374 BUG(); 324 BUG();
375 } 325 }
376 dma_write(val, CCR(lch)); 326 p->dma_write(val, CCR, lch);
377 327
378 color &= 0xffffff; 328 color &= 0xffffff;
379 dma_write(color, COLOR(lch)); 329 p->dma_write(color, COLOR, lch);
380 } 330 }
381} 331}
382EXPORT_SYMBOL(omap_set_dma_color_mode); 332EXPORT_SYMBOL(omap_set_dma_color_mode);
@@ -386,10 +336,10 @@ void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
386 if (cpu_class_is_omap2()) { 336 if (cpu_class_is_omap2()) {
387 u32 csdp; 337 u32 csdp;
388 338
389 csdp = dma_read(CSDP(lch)); 339 csdp = p->dma_read(CSDP, lch);
390 csdp &= ~(0x3 << 16); 340 csdp &= ~(0x3 << 16);
391 csdp |= (mode << 16); 341 csdp |= (mode << 16);
392 dma_write(csdp, CSDP(lch)); 342 p->dma_write(csdp, CSDP, lch);
393 } 343 }
394} 344}
395EXPORT_SYMBOL(omap_set_dma_write_mode); 345EXPORT_SYMBOL(omap_set_dma_write_mode);
@@ -399,10 +349,10 @@ void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
399 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { 349 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
400 u32 l; 350 u32 l;
401 351
402 l = dma_read(LCH_CTRL(lch)); 352 l = p->dma_read(LCH_CTRL, lch);
403 l &= ~0x7; 353 l &= ~0x7;
404 l |= mode; 354 l |= mode;
405 dma_write(l, LCH_CTRL(lch)); 355 p->dma_write(l, LCH_CTRL, lch);
406 } 356 }
407} 357}
408EXPORT_SYMBOL(omap_set_dma_channel_mode); 358EXPORT_SYMBOL(omap_set_dma_channel_mode);
@@ -417,27 +367,21 @@ void omap_set_dma_src_params(int lch, int src_port, int src_amode,
417 if (cpu_class_is_omap1()) { 367 if (cpu_class_is_omap1()) {
418 u16 w; 368 u16 w;
419 369
420 w = dma_read(CSDP(lch)); 370 w = p->dma_read(CSDP, lch);
421 w &= ~(0x1f << 2); 371 w &= ~(0x1f << 2);
422 w |= src_port << 2; 372 w |= src_port << 2;
423 dma_write(w, CSDP(lch)); 373 p->dma_write(w, CSDP, lch);
424 } 374 }
425 375
426 l = dma_read(CCR(lch)); 376 l = p->dma_read(CCR, lch);
427 l &= ~(0x03 << 12); 377 l &= ~(0x03 << 12);
428 l |= src_amode << 12; 378 l |= src_amode << 12;
429 dma_write(l, CCR(lch)); 379 p->dma_write(l, CCR, lch);
430 380
431 if (cpu_class_is_omap1()) { 381 p->dma_write(src_start, CSSA, lch);
432 dma_write(src_start >> 16, CSSA_U(lch));
433 dma_write((u16)src_start, CSSA_L(lch));
434 }
435 382
436 if (cpu_class_is_omap2()) 383 p->dma_write(src_ei, CSEI, lch);
437 dma_write(src_start, CSSA(lch)); 384 p->dma_write(src_fi, CSFI, lch);
438
439 dma_write(src_ei, CSEI(lch));
440 dma_write(src_fi, CSFI(lch));
441} 385}
442EXPORT_SYMBOL(omap_set_dma_src_params); 386EXPORT_SYMBOL(omap_set_dma_src_params);
443 387
@@ -465,8 +409,8 @@ void omap_set_dma_src_index(int lch, int eidx, int fidx)
465 if (cpu_class_is_omap2()) 409 if (cpu_class_is_omap2())
466 return; 410 return;
467 411
468 dma_write(eidx, CSEI(lch)); 412 p->dma_write(eidx, CSEI, lch);
469 dma_write(fidx, CSFI(lch)); 413 p->dma_write(fidx, CSFI, lch);
470} 414}
471EXPORT_SYMBOL(omap_set_dma_src_index); 415EXPORT_SYMBOL(omap_set_dma_src_index);
472 416
@@ -474,11 +418,11 @@ void omap_set_dma_src_data_pack(int lch, int enable)
474{ 418{
475 u32 l; 419 u32 l;
476 420
477 l = dma_read(CSDP(lch)); 421 l = p->dma_read(CSDP, lch);
478 l &= ~(1 << 6); 422 l &= ~(1 << 6);
479 if (enable) 423 if (enable)
480 l |= (1 << 6); 424 l |= (1 << 6);
481 dma_write(l, CSDP(lch)); 425 p->dma_write(l, CSDP, lch);
482} 426}
483EXPORT_SYMBOL(omap_set_dma_src_data_pack); 427EXPORT_SYMBOL(omap_set_dma_src_data_pack);
484 428
@@ -487,7 +431,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
487 unsigned int burst = 0; 431 unsigned int burst = 0;
488 u32 l; 432 u32 l;
489 433
490 l = dma_read(CSDP(lch)); 434 l = p->dma_read(CSDP, lch);
491 l &= ~(0x03 << 7); 435 l &= ~(0x03 << 7);
492 436
493 switch (burst_mode) { 437 switch (burst_mode) {
@@ -523,7 +467,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
523 } 467 }
524 468
525 l |= (burst << 7); 469 l |= (burst << 7);
526 dma_write(l, CSDP(lch)); 470 p->dma_write(l, CSDP, lch);
527} 471}
528EXPORT_SYMBOL(omap_set_dma_src_burst_mode); 472EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
529 473
@@ -535,27 +479,21 @@ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
535 u32 l; 479 u32 l;
536 480
537 if (cpu_class_is_omap1()) { 481 if (cpu_class_is_omap1()) {
538 l = dma_read(CSDP(lch)); 482 l = p->dma_read(CSDP, lch);
539 l &= ~(0x1f << 9); 483 l &= ~(0x1f << 9);
540 l |= dest_port << 9; 484 l |= dest_port << 9;
541 dma_write(l, CSDP(lch)); 485 p->dma_write(l, CSDP, lch);
542 } 486 }
543 487
544 l = dma_read(CCR(lch)); 488 l = p->dma_read(CCR, lch);
545 l &= ~(0x03 << 14); 489 l &= ~(0x03 << 14);
546 l |= dest_amode << 14; 490 l |= dest_amode << 14;
547 dma_write(l, CCR(lch)); 491 p->dma_write(l, CCR, lch);
548 492
549 if (cpu_class_is_omap1()) { 493 p->dma_write(dest_start, CDSA, lch);
550 dma_write(dest_start >> 16, CDSA_U(lch));
551 dma_write(dest_start, CDSA_L(lch));
552 }
553
554 if (cpu_class_is_omap2())
555 dma_write(dest_start, CDSA(lch));
556 494
557 dma_write(dst_ei, CDEI(lch)); 495 p->dma_write(dst_ei, CDEI, lch);
558 dma_write(dst_fi, CDFI(lch)); 496 p->dma_write(dst_fi, CDFI, lch);
559} 497}
560EXPORT_SYMBOL(omap_set_dma_dest_params); 498EXPORT_SYMBOL(omap_set_dma_dest_params);
561 499
@@ -564,8 +502,8 @@ void omap_set_dma_dest_index(int lch, int eidx, int fidx)
564 if (cpu_class_is_omap2()) 502 if (cpu_class_is_omap2())
565 return; 503 return;
566 504
567 dma_write(eidx, CDEI(lch)); 505 p->dma_write(eidx, CDEI, lch);
568 dma_write(fidx, CDFI(lch)); 506 p->dma_write(fidx, CDFI, lch);
569} 507}
570EXPORT_SYMBOL(omap_set_dma_dest_index); 508EXPORT_SYMBOL(omap_set_dma_dest_index);
571 509
@@ -573,11 +511,11 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
573{ 511{
574 u32 l; 512 u32 l;
575 513
576 l = dma_read(CSDP(lch)); 514 l = p->dma_read(CSDP, lch);
577 l &= ~(1 << 13); 515 l &= ~(1 << 13);
578 if (enable) 516 if (enable)
579 l |= 1 << 13; 517 l |= 1 << 13;
580 dma_write(l, CSDP(lch)); 518 p->dma_write(l, CSDP, lch);
581} 519}
582EXPORT_SYMBOL(omap_set_dma_dest_data_pack); 520EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
583 521
@@ -586,7 +524,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
586 unsigned int burst = 0; 524 unsigned int burst = 0;
587 u32 l; 525 u32 l;
588 526
589 l = dma_read(CSDP(lch)); 527 l = p->dma_read(CSDP, lch);
590 l &= ~(0x03 << 14); 528 l &= ~(0x03 << 14);
591 529
592 switch (burst_mode) { 530 switch (burst_mode) {
@@ -619,7 +557,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
619 return; 557 return;
620 } 558 }
621 l |= (burst << 14); 559 l |= (burst << 14);
622 dma_write(l, CSDP(lch)); 560 p->dma_write(l, CSDP, lch);
623} 561}
624EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); 562EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
625 563
@@ -629,18 +567,18 @@ static inline void omap_enable_channel_irq(int lch)
629 567
630 /* Clear CSR */ 568 /* Clear CSR */
631 if (cpu_class_is_omap1()) 569 if (cpu_class_is_omap1())
632 status = dma_read(CSR(lch)); 570 status = p->dma_read(CSR, lch);
633 else if (cpu_class_is_omap2()) 571 else if (cpu_class_is_omap2())
634 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); 572 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
635 573
636 /* Enable some nice interrupts. */ 574 /* Enable some nice interrupts. */
637 dma_write(dma_chan[lch].enabled_irqs, CICR(lch)); 575 p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
638} 576}
639 577
640static void omap_disable_channel_irq(int lch) 578static void omap_disable_channel_irq(int lch)
641{ 579{
642 if (cpu_class_is_omap2()) 580 if (cpu_class_is_omap2())
643 dma_write(0, CICR(lch)); 581 p->dma_write(0, CICR, lch);
644} 582}
645 583
646void omap_enable_dma_irq(int lch, u16 bits) 584void omap_enable_dma_irq(int lch, u16 bits)
@@ -659,7 +597,7 @@ static inline void enable_lnk(int lch)
659{ 597{
660 u32 l; 598 u32 l;
661 599
662 l = dma_read(CLNK_CTRL(lch)); 600 l = p->dma_read(CLNK_CTRL, lch);
663 601
664 if (cpu_class_is_omap1()) 602 if (cpu_class_is_omap1())
665 l &= ~(1 << 14); 603 l &= ~(1 << 14);
@@ -674,18 +612,18 @@ static inline void enable_lnk(int lch)
674 l = dma_chan[lch].next_linked_ch | (1 << 15); 612 l = dma_chan[lch].next_linked_ch | (1 << 15);
675#endif 613#endif
676 614
677 dma_write(l, CLNK_CTRL(lch)); 615 p->dma_write(l, CLNK_CTRL, lch);
678} 616}
679 617
680static inline void disable_lnk(int lch) 618static inline void disable_lnk(int lch)
681{ 619{
682 u32 l; 620 u32 l;
683 621
684 l = dma_read(CLNK_CTRL(lch)); 622 l = p->dma_read(CLNK_CTRL, lch);
685 623
686 /* Disable interrupts */ 624 /* Disable interrupts */
687 if (cpu_class_is_omap1()) { 625 if (cpu_class_is_omap1()) {
688 dma_write(0, CICR(lch)); 626 p->dma_write(0, CICR, lch);
689 /* Set the STOP_LNK bit */ 627 /* Set the STOP_LNK bit */
690 l |= 1 << 14; 628 l |= 1 << 14;
691 } 629 }
@@ -696,7 +634,7 @@ static inline void disable_lnk(int lch)
696 l &= ~(1 << 15); 634 l &= ~(1 << 15);
697 } 635 }
698 636
699 dma_write(l, CLNK_CTRL(lch)); 637 p->dma_write(l, CLNK_CTRL, lch);
700 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; 638 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
701} 639}
702 640
@@ -709,9 +647,9 @@ static inline void omap2_enable_irq_lch(int lch)
709 return; 647 return;
710 648
711 spin_lock_irqsave(&dma_chan_lock, flags); 649 spin_lock_irqsave(&dma_chan_lock, flags);
712 val = dma_read(IRQENABLE_L0); 650 val = p->dma_read(IRQENABLE_L0, lch);
713 val |= 1 << lch; 651 val |= 1 << lch;
714 dma_write(val, IRQENABLE_L0); 652 p->dma_write(val, IRQENABLE_L0, lch);
715 spin_unlock_irqrestore(&dma_chan_lock, flags); 653 spin_unlock_irqrestore(&dma_chan_lock, flags);
716} 654}
717 655
@@ -724,9 +662,9 @@ static inline void omap2_disable_irq_lch(int lch)
724 return; 662 return;
725 663
726 spin_lock_irqsave(&dma_chan_lock, flags); 664 spin_lock_irqsave(&dma_chan_lock, flags);
727 val = dma_read(IRQENABLE_L0); 665 val = p->dma_read(IRQENABLE_L0, lch);
728 val &= ~(1 << lch); 666 val &= ~(1 << lch);
729 dma_write(val, IRQENABLE_L0); 667 p->dma_write(val, IRQENABLE_L0, lch);
730 spin_unlock_irqrestore(&dma_chan_lock, flags); 668 spin_unlock_irqrestore(&dma_chan_lock, flags);
731} 669}
732 670
@@ -753,8 +691,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
753 chan = dma_chan + free_ch; 691 chan = dma_chan + free_ch;
754 chan->dev_id = dev_id; 692 chan->dev_id = dev_id;
755 693
756 if (cpu_class_is_omap1()) 694 if (p->clear_lch_regs)
757 clear_lch_regs(free_ch); 695 p->clear_lch_regs(free_ch);
758 696
759 if (cpu_class_is_omap2()) 697 if (cpu_class_is_omap2())
760 omap_clear_dma(free_ch); 698 omap_clear_dma(free_ch);
@@ -791,17 +729,17 @@ int omap_request_dma(int dev_id, const char *dev_name,
791 * Disable the 1510 compatibility mode and set the sync device 729 * Disable the 1510 compatibility mode and set the sync device
792 * id. 730 * id.
793 */ 731 */
794 dma_write(dev_id | (1 << 10), CCR(free_ch)); 732 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
795 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { 733 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
796 dma_write(dev_id, CCR(free_ch)); 734 p->dma_write(dev_id, CCR, free_ch);
797 } 735 }
798 736
799 if (cpu_class_is_omap2()) { 737 if (cpu_class_is_omap2()) {
800 omap2_enable_irq_lch(free_ch); 738 omap2_enable_irq_lch(free_ch);
801 omap_enable_channel_irq(free_ch); 739 omap_enable_channel_irq(free_ch);
802 /* Clear the CSR register and IRQ status register */ 740 /* Clear the CSR register and IRQ status register */
803 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch)); 741 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
804 dma_write(1 << free_ch, IRQSTATUS_L0); 742 p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
805 } 743 }
806 744
807 *dma_ch_out = free_ch; 745 *dma_ch_out = free_ch;
@@ -822,23 +760,23 @@ void omap_free_dma(int lch)
822 760
823 if (cpu_class_is_omap1()) { 761 if (cpu_class_is_omap1()) {
824 /* Disable all DMA interrupts for the channel. */ 762 /* Disable all DMA interrupts for the channel. */
825 dma_write(0, CICR(lch)); 763 p->dma_write(0, CICR, lch);
826 /* Make sure the DMA transfer is stopped. */ 764 /* Make sure the DMA transfer is stopped. */
827 dma_write(0, CCR(lch)); 765 p->dma_write(0, CCR, lch);
828 } 766 }
829 767
830 if (cpu_class_is_omap2()) { 768 if (cpu_class_is_omap2()) {
831 omap2_disable_irq_lch(lch); 769 omap2_disable_irq_lch(lch);
832 770
833 /* Clear the CSR register and IRQ status register */ 771 /* Clear the CSR register and IRQ status register */
834 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); 772 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
835 dma_write(1 << lch, IRQSTATUS_L0); 773 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
836 774
837 /* Disable all DMA interrupts for the channel. */ 775 /* Disable all DMA interrupts for the channel. */
838 dma_write(0, CICR(lch)); 776 p->dma_write(0, CICR, lch);
839 777
840 /* Make sure the DMA transfer is stopped. */ 778 /* Make sure the DMA transfer is stopped. */
841 dma_write(0, CCR(lch)); 779 p->dma_write(0, CCR, lch);
842 omap_clear_dma(lch); 780 omap_clear_dma(lch);
843 } 781 }
844 782
@@ -879,7 +817,7 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
879 reg |= (0x3 & tparams) << 12; 817 reg |= (0x3 & tparams) << 12;
880 reg |= (arb_rate & 0xff) << 16; 818 reg |= (arb_rate & 0xff) << 16;
881 819
882 dma_write(reg, GCR); 820 p->dma_write(reg, GCR, 0);
883} 821}
884EXPORT_SYMBOL(omap_dma_set_global_params); 822EXPORT_SYMBOL(omap_dma_set_global_params);
885 823
@@ -902,14 +840,14 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
902 printk(KERN_ERR "Invalid channel id\n"); 840 printk(KERN_ERR "Invalid channel id\n");
903 return -EINVAL; 841 return -EINVAL;
904 } 842 }
905 l = dma_read(CCR(lch)); 843 l = p->dma_read(CCR, lch);
906 l &= ~((1 << 6) | (1 << 26)); 844 l &= ~((1 << 6) | (1 << 26));
907 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 845 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
908 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); 846 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
909 else 847 else
910 l |= ((read_prio & 0x1) << 6); 848 l |= ((read_prio & 0x1) << 6);
911 849
912 dma_write(l, CCR(lch)); 850 p->dma_write(l, CCR, lch);
913 851
914 return 0; 852 return 0;
915} 853}
@@ -924,25 +862,7 @@ void omap_clear_dma(int lch)
924 unsigned long flags; 862 unsigned long flags;
925 863
926 local_irq_save(flags); 864 local_irq_save(flags);
927 865 p->clear_dma(lch);
928 if (cpu_class_is_omap1()) {
929 u32 l;
930
931 l = dma_read(CCR(lch));
932 l &= ~OMAP_DMA_CCR_EN;
933 dma_write(l, CCR(lch));
934
935 /* Clear pending interrupts */
936 l = dma_read(CSR(lch));
937 }
938
939 if (cpu_class_is_omap2()) {
940 int i;
941 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
942 for (i = 0; i < 0x44; i += 4)
943 __raw_writel(0, lch_base + i);
944 }
945
946 local_irq_restore(flags); 866 local_irq_restore(flags);
947} 867}
948EXPORT_SYMBOL(omap_clear_dma); 868EXPORT_SYMBOL(omap_clear_dma);
@@ -956,13 +876,13 @@ void omap_start_dma(int lch)
956 * before starting dma transfer. 876 * before starting dma transfer.
957 */ 877 */
958 if (cpu_is_omap15xx()) 878 if (cpu_is_omap15xx())
959 dma_write(0, CPC(lch)); 879 p->dma_write(0, CPC, lch);
960 else 880 else
961 dma_write(0, CDAC(lch)); 881 p->dma_write(0, CDAC, lch);
962 882
963 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 883 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
964 int next_lch, cur_lch; 884 int next_lch, cur_lch;
965 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 885 char dma_chan_link_map[dma_lch_count];
966 886
967 dma_chan_link_map[lch] = 1; 887 dma_chan_link_map[lch] = 1;
968 /* Set the link register of the first channel */ 888 /* Set the link register of the first channel */
@@ -984,26 +904,18 @@ void omap_start_dma(int lch)
984 904
985 cur_lch = next_lch; 905 cur_lch = next_lch;
986 } while (next_lch != -1); 906 } while (next_lch != -1);
987 } else if (cpu_is_omap242x() || 907 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
988 (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { 908 p->dma_write(lch, CLNK_CTRL, lch);
989
990 /* Errata: Need to write lch even if not using chaining */
991 dma_write(lch, CLNK_CTRL(lch));
992 }
993 909
994 omap_enable_channel_irq(lch); 910 omap_enable_channel_irq(lch);
995 911
996 l = dma_read(CCR(lch)); 912 l = p->dma_read(CCR, lch);
997
998 /*
999 * Errata: On ES2.0 BUFFERING disable must be set.
1000 * This will always fail on ES1.0
1001 */
1002 if (cpu_is_omap24xx())
1003 l |= OMAP_DMA_CCR_EN;
1004 913
914 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
915 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1005 l |= OMAP_DMA_CCR_EN; 916 l |= OMAP_DMA_CCR_EN;
1006 dma_write(l, CCR(lch)); 917
918 p->dma_write(l, CCR, lch);
1007 919
1008 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 920 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1009} 921}
@@ -1015,15 +927,46 @@ void omap_stop_dma(int lch)
1015 927
1016 /* Disable all interrupts on the channel */ 928 /* Disable all interrupts on the channel */
1017 if (cpu_class_is_omap1()) 929 if (cpu_class_is_omap1())
1018 dma_write(0, CICR(lch)); 930 p->dma_write(0, CICR, lch);
1019 931
1020 l = dma_read(CCR(lch)); 932 l = p->dma_read(CCR, lch);
1021 l &= ~OMAP_DMA_CCR_EN; 933 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1022 dma_write(l, CCR(lch)); 934 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
935 int i = 0;
936 u32 sys_cf;
937
938 /* Configure No-Standby */
939 l = p->dma_read(OCP_SYSCONFIG, lch);
940 sys_cf = l;
941 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
942 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
943 p->dma_write(l , OCP_SYSCONFIG, 0);
944
945 l = p->dma_read(CCR, lch);
946 l &= ~OMAP_DMA_CCR_EN;
947 p->dma_write(l, CCR, lch);
948
949 /* Wait for sDMA FIFO drain */
950 l = p->dma_read(CCR, lch);
951 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
952 OMAP_DMA_CCR_WR_ACTIVE))) {
953 udelay(5);
954 i++;
955 l = p->dma_read(CCR, lch);
956 }
957 if (i >= 100)
958 printk(KERN_ERR "DMA drain did not complete on "
959 "lch %d\n", lch);
960 /* Restore OCP_SYSCONFIG */
961 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
962 } else {
963 l &= ~OMAP_DMA_CCR_EN;
964 p->dma_write(l, CCR, lch);
965 }
1023 966
1024 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 967 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1025 int next_lch, cur_lch = lch; 968 int next_lch, cur_lch = lch;
1026 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 969 char dma_chan_link_map[dma_lch_count];
1027 970
1028 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); 971 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1029 do { 972 do {
@@ -1076,7 +1019,7 @@ EXPORT_SYMBOL(omap_set_dma_callback);
1076 * If the channel is running the caller must disable interrupts prior calling 1019 * If the channel is running the caller must disable interrupts prior calling
1077 * this function and process the returned value before re-enabling interrupt to 1020 * this function and process the returned value before re-enabling interrupt to
1078 * prevent races with the interrupt handler. Note that in continuous mode there 1021 * prevent races with the interrupt handler. Note that in continuous mode there
1079 * is a chance for CSSA_L register overflow inbetween the two reads resulting 1022 * is a chance for CSSA_L register overflow between the two reads resulting
1080 * in incorrect return value. 1023 * in incorrect return value.
1081 */ 1024 */
1082dma_addr_t omap_get_dma_src_pos(int lch) 1025dma_addr_t omap_get_dma_src_pos(int lch)
@@ -1084,19 +1027,15 @@ dma_addr_t omap_get_dma_src_pos(int lch)
1084 dma_addr_t offset = 0; 1027 dma_addr_t offset = 0;
1085 1028
1086 if (cpu_is_omap15xx()) 1029 if (cpu_is_omap15xx())
1087 offset = dma_read(CPC(lch)); 1030 offset = p->dma_read(CPC, lch);
1088 else 1031 else
1089 offset = dma_read(CSAC(lch)); 1032 offset = p->dma_read(CSAC, lch);
1090 1033
1091 /* 1034 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1092 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1035 offset = p->dma_read(CSAC, lch);
1093 * read before the DMA controller finished disabling the channel.
1094 */
1095 if (!cpu_is_omap15xx() && offset == 0)
1096 offset = dma_read(CSAC(lch));
1097 1036
1098 if (cpu_class_is_omap1()) 1037 if (cpu_class_is_omap1())
1099 offset |= (dma_read(CSSA_U(lch)) << 16); 1038 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1100 1039
1101 return offset; 1040 return offset;
1102} 1041}
@@ -1107,7 +1046,7 @@ EXPORT_SYMBOL(omap_get_dma_src_pos);
1107 * If the channel is running the caller must disable interrupts prior calling 1046 * If the channel is running the caller must disable interrupts prior calling
1108 * this function and process the returned value before re-enabling interrupt to 1047 * this function and process the returned value before re-enabling interrupt to
1109 * prevent races with the interrupt handler. Note that in continuous mode there 1048 * prevent races with the interrupt handler. Note that in continuous mode there
1110 * is a chance for CDSA_L register overflow inbetween the two reads resulting 1049 * is a chance for CDSA_L register overflow between the two reads resulting
1111 * in incorrect return value. 1050 * in incorrect return value.
1112 */ 1051 */
1113dma_addr_t omap_get_dma_dst_pos(int lch) 1052dma_addr_t omap_get_dma_dst_pos(int lch)
@@ -1115,19 +1054,19 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
1115 dma_addr_t offset = 0; 1054 dma_addr_t offset = 0;
1116 1055
1117 if (cpu_is_omap15xx()) 1056 if (cpu_is_omap15xx())
1118 offset = dma_read(CPC(lch)); 1057 offset = p->dma_read(CPC, lch);
1119 else 1058 else
1120 offset = dma_read(CDAC(lch)); 1059 offset = p->dma_read(CDAC, lch);
1121 1060
1122 /* 1061 /*
1123 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1062 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1124 * read before the DMA controller finished disabling the channel. 1063 * read before the DMA controller finished disabling the channel.
1125 */ 1064 */
1126 if (!cpu_is_omap15xx() && offset == 0) 1065 if (!cpu_is_omap15xx() && offset == 0)
1127 offset = dma_read(CDAC(lch)); 1066 offset = p->dma_read(CDAC, lch);
1128 1067
1129 if (cpu_class_is_omap1()) 1068 if (cpu_class_is_omap1())
1130 offset |= (dma_read(CDSA_U(lch)) << 16); 1069 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1131 1070
1132 return offset; 1071 return offset;
1133} 1072}
@@ -1135,7 +1074,7 @@ EXPORT_SYMBOL(omap_get_dma_dst_pos);
1135 1074
1136int omap_get_dma_active_status(int lch) 1075int omap_get_dma_active_status(int lch)
1137{ 1076{
1138 return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0; 1077 return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1139} 1078}
1140EXPORT_SYMBOL(omap_get_dma_active_status); 1079EXPORT_SYMBOL(omap_get_dma_active_status);
1141 1080
@@ -1148,7 +1087,7 @@ int omap_dma_running(void)
1148 return 1; 1087 return 1;
1149 1088
1150 for (lch = 0; lch < dma_chan_count; lch++) 1089 for (lch = 0; lch < dma_chan_count; lch++)
1151 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) 1090 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1152 return 1; 1091 return 1;
1153 1092
1154 return 0; 1093 return 0;
@@ -1163,8 +1102,8 @@ void omap_dma_link_lch(int lch_head, int lch_queue)
1163{ 1102{
1164 if (omap_dma_in_1510_mode()) { 1103 if (omap_dma_in_1510_mode()) {
1165 if (lch_head == lch_queue) { 1104 if (lch_head == lch_queue) {
1166 dma_write(dma_read(CCR(lch_head)) | (3 << 8), 1105 p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1167 CCR(lch_head)); 1106 CCR, lch_head);
1168 return; 1107 return;
1169 } 1108 }
1170 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1109 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1190,8 +1129,8 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1190{ 1129{
1191 if (omap_dma_in_1510_mode()) { 1130 if (omap_dma_in_1510_mode()) {
1192 if (lch_head == lch_queue) { 1131 if (lch_head == lch_queue) {
1193 dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), 1132 p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1194 CCR(lch_head)); 1133 CCR, lch_head);
1195 return; 1134 return;
1196 } 1135 }
1197 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1136 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1217,8 +1156,6 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1217} 1156}
1218EXPORT_SYMBOL(omap_dma_unlink_lch); 1157EXPORT_SYMBOL(omap_dma_unlink_lch);
1219 1158
1220/*----------------------------------------------------------------------------*/
1221
1222#ifndef CONFIG_ARCH_OMAP1 1159#ifndef CONFIG_ARCH_OMAP1
1223/* Create chain of DMA channesls */ 1160/* Create chain of DMA channesls */
1224static void create_dma_lch_chain(int lch_head, int lch_queue) 1161static void create_dma_lch_chain(int lch_head, int lch_queue)
@@ -1243,15 +1180,15 @@ static void create_dma_lch_chain(int lch_head, int lch_queue)
1243 lch_queue; 1180 lch_queue;
1244 } 1181 }
1245 1182
1246 l = dma_read(CLNK_CTRL(lch_head)); 1183 l = p->dma_read(CLNK_CTRL, lch_head);
1247 l &= ~(0x1f); 1184 l &= ~(0x1f);
1248 l |= lch_queue; 1185 l |= lch_queue;
1249 dma_write(l, CLNK_CTRL(lch_head)); 1186 p->dma_write(l, CLNK_CTRL, lch_head);
1250 1187
1251 l = dma_read(CLNK_CTRL(lch_queue)); 1188 l = p->dma_read(CLNK_CTRL, lch_queue);
1252 l &= ~(0x1f); 1189 l &= ~(0x1f);
1253 l |= (dma_chan[lch_queue].next_linked_ch); 1190 l |= (dma_chan[lch_queue].next_linked_ch);
1254 dma_write(l, CLNK_CTRL(lch_queue)); 1191 p->dma_write(l, CLNK_CTRL, lch_queue);
1255} 1192}
1256 1193
1257/** 1194/**
@@ -1527,13 +1464,13 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1527 1464
1528 /* Set the params to the free channel */ 1465 /* Set the params to the free channel */
1529 if (src_start != 0) 1466 if (src_start != 0)
1530 dma_write(src_start, CSSA(lch)); 1467 p->dma_write(src_start, CSSA, lch);
1531 if (dest_start != 0) 1468 if (dest_start != 0)
1532 dma_write(dest_start, CDSA(lch)); 1469 p->dma_write(dest_start, CDSA, lch);
1533 1470
1534 /* Write the buffer size */ 1471 /* Write the buffer size */
1535 dma_write(elem_count, CEN(lch)); 1472 p->dma_write(elem_count, CEN, lch);
1536 dma_write(frame_count, CFN(lch)); 1473 p->dma_write(frame_count, CFN, lch);
1537 1474
1538 /* 1475 /*
1539 * If the chain is dynamically linked, 1476 * If the chain is dynamically linked,
@@ -1566,8 +1503,8 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1566 enable_lnk(dma_chan[lch].prev_linked_ch); 1503 enable_lnk(dma_chan[lch].prev_linked_ch);
1567 dma_chan[lch].state = DMA_CH_QUEUED; 1504 dma_chan[lch].state = DMA_CH_QUEUED;
1568 start_dma = 0; 1505 start_dma = 0;
1569 if (0 == ((1 << 7) & dma_read( 1506 if (0 == ((1 << 7) & p->dma_read(
1570 CCR(dma_chan[lch].prev_linked_ch)))) { 1507 CCR, dma_chan[lch].prev_linked_ch))) {
1571 disable_lnk(dma_chan[lch]. 1508 disable_lnk(dma_chan[lch].
1572 prev_linked_ch); 1509 prev_linked_ch);
1573 pr_debug("\n prev ch is stopped\n"); 1510 pr_debug("\n prev ch is stopped\n");
@@ -1583,7 +1520,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1583 } 1520 }
1584 omap_enable_channel_irq(lch); 1521 omap_enable_channel_irq(lch);
1585 1522
1586 l = dma_read(CCR(lch)); 1523 l = p->dma_read(CCR, lch);
1587 1524
1588 if ((0 == (l & (1 << 24)))) 1525 if ((0 == (l & (1 << 24))))
1589 l &= ~(1 << 25); 1526 l &= ~(1 << 25);
@@ -1594,12 +1531,12 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1594 l |= (1 << 7); 1531 l |= (1 << 7);
1595 dma_chan[lch].state = DMA_CH_STARTED; 1532 dma_chan[lch].state = DMA_CH_STARTED;
1596 pr_debug("starting %d\n", lch); 1533 pr_debug("starting %d\n", lch);
1597 dma_write(l, CCR(lch)); 1534 p->dma_write(l, CCR, lch);
1598 } else 1535 } else
1599 start_dma = 0; 1536 start_dma = 0;
1600 } else { 1537 } else {
1601 if (0 == (l & (1 << 7))) 1538 if (0 == (l & (1 << 7)))
1602 dma_write(l, CCR(lch)); 1539 p->dma_write(l, CCR, lch);
1603 } 1540 }
1604 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 1541 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1605 } 1542 }
@@ -1644,7 +1581,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1644 omap_enable_channel_irq(channels[0]); 1581 omap_enable_channel_irq(channels[0]);
1645 } 1582 }
1646 1583
1647 l = dma_read(CCR(channels[0])); 1584 l = p->dma_read(CCR, channels[0]);
1648 l |= (1 << 7); 1585 l |= (1 << 7);
1649 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; 1586 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1650 dma_chan[channels[0]].state = DMA_CH_STARTED; 1587 dma_chan[channels[0]].state = DMA_CH_STARTED;
@@ -1653,7 +1590,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1653 l &= ~(1 << 25); 1590 l &= ~(1 << 25);
1654 else 1591 else
1655 l |= (1 << 25); 1592 l |= (1 << 25);
1656 dma_write(l, CCR(channels[0])); 1593 p->dma_write(l, CCR, channels[0]);
1657 1594
1658 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; 1595 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1659 1596
@@ -1673,7 +1610,7 @@ int omap_stop_dma_chain_transfers(int chain_id)
1673{ 1610{
1674 int *channels; 1611 int *channels;
1675 u32 l, i; 1612 u32 l, i;
1676 u32 sys_cf; 1613 u32 sys_cf = 0;
1677 1614
1678 /* Check for input params */ 1615 /* Check for input params */
1679 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { 1616 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
@@ -1688,22 +1625,20 @@ int omap_stop_dma_chain_transfers(int chain_id)
1688 } 1625 }
1689 channels = dma_linked_lch[chain_id].linked_dmach_q; 1626 channels = dma_linked_lch[chain_id].linked_dmach_q;
1690 1627
1691 /* 1628 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1692 * DMA Errata: 1629 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1693 * Special programming model needed to disable DMA before end of block 1630 l = sys_cf;
1694 */ 1631 /* Middle mode reg set no Standby */
1695 sys_cf = dma_read(OCP_SYSCONFIG); 1632 l &= ~((1 << 12)|(1 << 13));
1696 l = sys_cf; 1633 p->dma_write(l, OCP_SYSCONFIG, 0);
1697 /* Middle mode reg set no Standby */ 1634 }
1698 l &= ~((1 << 12)|(1 << 13));
1699 dma_write(l, OCP_SYSCONFIG);
1700 1635
1701 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { 1636 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1702 1637
1703 /* Stop the Channel transmission */ 1638 /* Stop the Channel transmission */
1704 l = dma_read(CCR(channels[i])); 1639 l = p->dma_read(CCR, channels[i]);
1705 l &= ~(1 << 7); 1640 l &= ~(1 << 7);
1706 dma_write(l, CCR(channels[i])); 1641 p->dma_write(l, CCR, channels[i]);
1707 1642
1708 /* Disable the link in all the channels */ 1643 /* Disable the link in all the channels */
1709 disable_lnk(channels[i]); 1644 disable_lnk(channels[i]);
@@ -1715,8 +1650,8 @@ int omap_stop_dma_chain_transfers(int chain_id)
1715 /* Reset the Queue pointers */ 1650 /* Reset the Queue pointers */
1716 OMAP_DMA_CHAIN_QINIT(chain_id); 1651 OMAP_DMA_CHAIN_QINIT(chain_id);
1717 1652
1718 /* Errata - put in the old value */ 1653 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1719 dma_write(sys_cf, OCP_SYSCONFIG); 1654 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1720 1655
1721 return 0; 1656 return 0;
1722} 1657}
@@ -1758,8 +1693,8 @@ int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1758 /* Get the current channel */ 1693 /* Get the current channel */
1759 lch = channels[dma_linked_lch[chain_id].q_head]; 1694 lch = channels[dma_linked_lch[chain_id].q_head];
1760 1695
1761 *ei = dma_read(CCEN(lch)); 1696 *ei = p->dma_read(CCEN, lch);
1762 *fi = dma_read(CCFN(lch)); 1697 *fi = p->dma_read(CCFN, lch);
1763 1698
1764 return 0; 1699 return 0;
1765} 1700}
@@ -1796,7 +1731,7 @@ int omap_get_dma_chain_dst_pos(int chain_id)
1796 /* Get the current channel */ 1731 /* Get the current channel */
1797 lch = channels[dma_linked_lch[chain_id].q_head]; 1732 lch = channels[dma_linked_lch[chain_id].q_head];
1798 1733
1799 return dma_read(CDAC(lch)); 1734 return p->dma_read(CDAC, lch);
1800} 1735}
1801EXPORT_SYMBOL(omap_get_dma_chain_dst_pos); 1736EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1802 1737
@@ -1830,7 +1765,7 @@ int omap_get_dma_chain_src_pos(int chain_id)
1830 /* Get the current channel */ 1765 /* Get the current channel */
1831 lch = channels[dma_linked_lch[chain_id].q_head]; 1766 lch = channels[dma_linked_lch[chain_id].q_head];
1832 1767
1833 return dma_read(CSAC(lch)); 1768 return p->dma_read(CSAC, lch);
1834} 1769}
1835EXPORT_SYMBOL(omap_get_dma_chain_src_pos); 1770EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1836#endif /* ifndef CONFIG_ARCH_OMAP1 */ 1771#endif /* ifndef CONFIG_ARCH_OMAP1 */
@@ -1847,7 +1782,7 @@ static int omap1_dma_handle_ch(int ch)
1847 csr = dma_chan[ch].saved_csr; 1782 csr = dma_chan[ch].saved_csr;
1848 dma_chan[ch].saved_csr = 0; 1783 dma_chan[ch].saved_csr = 0;
1849 } else 1784 } else
1850 csr = dma_read(CSR(ch)); 1785 csr = p->dma_read(CSR, ch);
1851 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { 1786 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1852 dma_chan[ch + 6].saved_csr = csr >> 7; 1787 dma_chan[ch + 6].saved_csr = csr >> 7;
1853 csr &= 0x7f; 1788 csr &= 0x7f;
@@ -1900,13 +1835,13 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1900 1835
1901static int omap2_dma_handle_ch(int ch) 1836static int omap2_dma_handle_ch(int ch)
1902{ 1837{
1903 u32 status = dma_read(CSR(ch)); 1838 u32 status = p->dma_read(CSR, ch);
1904 1839
1905 if (!status) { 1840 if (!status) {
1906 if (printk_ratelimit()) 1841 if (printk_ratelimit())
1907 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", 1842 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1908 ch); 1843 ch);
1909 dma_write(1 << ch, IRQSTATUS_L0); 1844 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1910 return 0; 1845 return 0;
1911 } 1846 }
1912 if (unlikely(dma_chan[ch].dev_id == -1)) { 1847 if (unlikely(dma_chan[ch].dev_id == -1)) {
@@ -1922,17 +1857,12 @@ static int omap2_dma_handle_ch(int ch)
1922 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) { 1857 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1923 printk(KERN_INFO "DMA transaction error with device %d\n", 1858 printk(KERN_INFO "DMA transaction error with device %d\n",
1924 dma_chan[ch].dev_id); 1859 dma_chan[ch].dev_id);
1925 if (cpu_class_is_omap2()) { 1860 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1926 /*
1927 * Errata: sDMA Channel is not disabled
1928 * after a transaction error. So we explicitely
1929 * disable the channel
1930 */
1931 u32 ccr; 1861 u32 ccr;
1932 1862
1933 ccr = dma_read(CCR(ch)); 1863 ccr = p->dma_read(CCR, ch);
1934 ccr &= ~OMAP_DMA_CCR_EN; 1864 ccr &= ~OMAP_DMA_CCR_EN;
1935 dma_write(ccr, CCR(ch)); 1865 p->dma_write(ccr, CCR, ch);
1936 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; 1866 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1937 } 1867 }
1938 } 1868 }
@@ -1943,14 +1873,16 @@ static int omap2_dma_handle_ch(int ch)
1943 printk(KERN_INFO "DMA misaligned error with device %d\n", 1873 printk(KERN_INFO "DMA misaligned error with device %d\n",
1944 dma_chan[ch].dev_id); 1874 dma_chan[ch].dev_id);
1945 1875
1946 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); 1876 p->dma_write(status, CSR, ch);
1947 dma_write(1 << ch, IRQSTATUS_L0); 1877 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1878 /* read back the register to flush the write */
1879 p->dma_read(IRQSTATUS_L0, ch);
1948 1880
1949 /* If the ch is not chained then chain_id will be -1 */ 1881 /* If the ch is not chained then chain_id will be -1 */
1950 if (dma_chan[ch].chain_id != -1) { 1882 if (dma_chan[ch].chain_id != -1) {
1951 int chain_id = dma_chan[ch].chain_id; 1883 int chain_id = dma_chan[ch].chain_id;
1952 dma_chan[ch].state = DMA_CH_NOTSTARTED; 1884 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1953 if (dma_read(CLNK_CTRL(ch)) & (1 << 15)) 1885 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1954 dma_chan[dma_chan[ch].next_linked_ch].state = 1886 dma_chan[dma_chan[ch].next_linked_ch].state =
1955 DMA_CH_STARTED; 1887 DMA_CH_STARTED;
1956 if (dma_linked_lch[chain_id].chain_mode == 1888 if (dma_linked_lch[chain_id].chain_mode ==
@@ -1960,11 +1892,10 @@ static int omap2_dma_handle_ch(int ch)
1960 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id)) 1892 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1961 OMAP_DMA_CHAIN_INCQHEAD(chain_id); 1893 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1962 1894
1963 status = dma_read(CSR(ch)); 1895 status = p->dma_read(CSR, ch);
1896 p->dma_write(status, CSR, ch);
1964 } 1897 }
1965 1898
1966 dma_write(status, CSR(ch));
1967
1968 if (likely(dma_chan[ch].callback != NULL)) 1899 if (likely(dma_chan[ch].callback != NULL))
1969 dma_chan[ch].callback(ch, status, dma_chan[ch].data); 1900 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1970 1901
@@ -1977,13 +1908,13 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1977 u32 val, enable_reg; 1908 u32 val, enable_reg;
1978 int i; 1909 int i;
1979 1910
1980 val = dma_read(IRQSTATUS_L0); 1911 val = p->dma_read(IRQSTATUS_L0, 0);
1981 if (val == 0) { 1912 if (val == 0) {
1982 if (printk_ratelimit()) 1913 if (printk_ratelimit())
1983 printk(KERN_WARNING "Spurious DMA IRQ\n"); 1914 printk(KERN_WARNING "Spurious DMA IRQ\n");
1984 return IRQ_HANDLED; 1915 return IRQ_HANDLED;
1985 } 1916 }
1986 enable_reg = dma_read(IRQENABLE_L0); 1917 enable_reg = p->dma_read(IRQENABLE_L0, 0);
1987 val &= enable_reg; /* Dispatch only relevant interrupts */ 1918 val &= enable_reg; /* Dispatch only relevant interrupts */
1988 for (i = 0; i < dma_lch_count && val != 0; i++) { 1919 for (i = 0; i < dma_lch_count && val != 0; i++) {
1989 if (val & 1) 1920 if (val & 1)
@@ -2009,119 +1940,66 @@ static struct irqaction omap24xx_dma_irq;
2009void omap_dma_global_context_save(void) 1940void omap_dma_global_context_save(void)
2010{ 1941{
2011 omap_dma_global_context.dma_irqenable_l0 = 1942 omap_dma_global_context.dma_irqenable_l0 =
2012 dma_read(IRQENABLE_L0); 1943 p->dma_read(IRQENABLE_L0, 0);
2013 omap_dma_global_context.dma_ocp_sysconfig = 1944 omap_dma_global_context.dma_ocp_sysconfig =
2014 dma_read(OCP_SYSCONFIG); 1945 p->dma_read(OCP_SYSCONFIG, 0);
2015 omap_dma_global_context.dma_gcr = dma_read(GCR); 1946 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
2016} 1947}
2017 1948
2018void omap_dma_global_context_restore(void) 1949void omap_dma_global_context_restore(void)
2019{ 1950{
2020 int ch; 1951 int ch;
2021 1952
2022 dma_write(omap_dma_global_context.dma_gcr, GCR); 1953 p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2023 dma_write(omap_dma_global_context.dma_ocp_sysconfig, 1954 p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2024 OCP_SYSCONFIG); 1955 OCP_SYSCONFIG, 0);
2025 dma_write(omap_dma_global_context.dma_irqenable_l0, 1956 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
2026 IRQENABLE_L0); 1957 IRQENABLE_L0, 0);
2027 1958
2028 /* 1959 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2029 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared 1960 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2030 * after secure sram context save and restore. Hence we need to
2031 * manually clear those IRQs to avoid spurious interrupts. This
2032 * affects only secure devices.
2033 */
2034 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2035 dma_write(0x3 , IRQSTATUS_L0);
2036 1961
2037 for (ch = 0; ch < dma_chan_count; ch++) 1962 for (ch = 0; ch < dma_chan_count; ch++)
2038 if (dma_chan[ch].dev_id != -1) 1963 if (dma_chan[ch].dev_id != -1)
2039 omap_clear_dma(ch); 1964 omap_clear_dma(ch);
2040} 1965}
2041 1966
2042/*----------------------------------------------------------------------------*/ 1967static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2043
2044static int __init omap_init_dma(void)
2045{ 1968{
2046 unsigned long base; 1969 int ch, ret = 0;
2047 int ch, r; 1970 int dma_irq;
2048 1971 char irq_name[4];
2049 if (cpu_class_is_omap1()) { 1972 int irq_rel;
2050 base = OMAP1_DMA_BASE; 1973
2051 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; 1974 p = pdev->dev.platform_data;
2052 } else if (cpu_is_omap24xx()) { 1975 if (!p) {
2053 base = OMAP24XX_DMA4_BASE; 1976 dev_err(&pdev->dev, "%s: System DMA initialized without"
2054 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT; 1977 "platform data\n", __func__);
2055 } else if (cpu_is_omap34xx()) { 1978 return -EINVAL;
2056 base = OMAP34XX_DMA4_BASE;
2057 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2058 } else if (cpu_is_omap44xx()) {
2059 base = OMAP44XX_DMA4_BASE;
2060 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2061 } else {
2062 pr_err("DMA init failed for unsupported omap\n");
2063 return -ENODEV;
2064 } 1979 }
2065 1980
2066 omap_dma_base = ioremap(base, SZ_4K); 1981 d = p->dma_attr;
2067 BUG_ON(!omap_dma_base); 1982 errata = p->errata;
2068 1983
2069 if (cpu_class_is_omap2() && omap_dma_reserve_channels 1984 if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2070 && (omap_dma_reserve_channels <= dma_lch_count)) 1985 && (omap_dma_reserve_channels <= dma_lch_count))
2071 dma_lch_count = omap_dma_reserve_channels; 1986 d->lch_count = omap_dma_reserve_channels;
2072 1987
2073 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count, 1988 dma_lch_count = d->lch_count;
2074 GFP_KERNEL); 1989 dma_chan_count = dma_lch_count;
2075 if (!dma_chan) { 1990 dma_chan = d->chan;
2076 r = -ENOMEM; 1991 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2077 goto out_unmap;
2078 }
2079 1992
2080 if (cpu_class_is_omap2()) { 1993 if (cpu_class_is_omap2()) {
2081 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) * 1994 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2082 dma_lch_count, GFP_KERNEL); 1995 dma_lch_count, GFP_KERNEL);
2083 if (!dma_linked_lch) { 1996 if (!dma_linked_lch) {
2084 r = -ENOMEM; 1997 ret = -ENOMEM;
2085 goto out_free; 1998 goto exit_dma_lch_fail;
2086 } 1999 }
2087 } 2000 }
2088 2001
2089 if (cpu_is_omap15xx()) {
2090 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2091 dma_chan_count = 9;
2092 enable_1510_mode = 1;
2093 } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2094 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2095 dma_read(HW_ID));
2096 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2097 (dma_read(CAPS_0_U) << 16) |
2098 dma_read(CAPS_0_L),
2099 (dma_read(CAPS_1_U) << 16) |
2100 dma_read(CAPS_1_L),
2101 dma_read(CAPS_2), dma_read(CAPS_3),
2102 dma_read(CAPS_4));
2103 if (!enable_1510_mode) {
2104 u16 w;
2105
2106 /* Disable OMAP 3.0/3.1 compatibility mode. */
2107 w = dma_read(GSCR);
2108 w |= 1 << 3;
2109 dma_write(w, GSCR);
2110 dma_chan_count = 16;
2111 } else
2112 dma_chan_count = 9;
2113 } else if (cpu_class_is_omap2()) {
2114 u8 revision = dma_read(REVISION) & 0xff;
2115 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2116 revision >> 4, revision & 0xf);
2117 dma_chan_count = dma_lch_count;
2118 } else {
2119 dma_chan_count = 0;
2120 return 0;
2121 }
2122
2123 spin_lock_init(&dma_chan_lock); 2002 spin_lock_init(&dma_chan_lock);
2124
2125 for (ch = 0; ch < dma_chan_count; ch++) { 2003 for (ch = 0; ch < dma_chan_count; ch++) {
2126 omap_clear_dma(ch); 2004 omap_clear_dma(ch);
2127 if (cpu_class_is_omap2()) 2005 if (cpu_class_is_omap2())
@@ -2138,20 +2016,23 @@ static int __init omap_init_dma(void)
2138 * request_irq() doesn't like dev_id (ie. ch) being 2016 * request_irq() doesn't like dev_id (ie. ch) being
2139 * zero, so we have to kludge around this. 2017 * zero, so we have to kludge around this.
2140 */ 2018 */
2141 r = request_irq(omap1_dma_irq[ch], 2019 sprintf(&irq_name[0], "%d", ch);
2020 dma_irq = platform_get_irq_byname(pdev, irq_name);
2021
2022 if (dma_irq < 0) {
2023 ret = dma_irq;
2024 goto exit_dma_irq_fail;
2025 }
2026
2027 /* INT_DMA_LCD is handled in lcd_dma.c */
2028 if (dma_irq == INT_DMA_LCD)
2029 continue;
2030
2031 ret = request_irq(dma_irq,
2142 omap1_dma_irq_handler, 0, "DMA", 2032 omap1_dma_irq_handler, 0, "DMA",
2143 (void *) (ch + 1)); 2033 (void *) (ch + 1));
2144 if (r != 0) { 2034 if (ret != 0)
2145 int i; 2035 goto exit_dma_irq_fail;
2146
2147 printk(KERN_ERR "unable to request IRQ %d "
2148 "for DMA (error %d)\n",
2149 omap1_dma_irq[ch], r);
2150 for (i = 0; i < ch; i++)
2151 free_irq(omap1_dma_irq[i],
2152 (void *) (i + 1));
2153 goto out_free;
2154 }
2155 } 2036 }
2156 } 2037 }
2157 2038
@@ -2160,46 +2041,91 @@ static int __init omap_init_dma(void)
2160 DMA_DEFAULT_FIFO_DEPTH, 0); 2041 DMA_DEFAULT_FIFO_DEPTH, 0);
2161 2042
2162 if (cpu_class_is_omap2()) { 2043 if (cpu_class_is_omap2()) {
2163 int irq; 2044 strcpy(irq_name, "0");
2164 if (cpu_is_omap44xx()) 2045 dma_irq = platform_get_irq_byname(pdev, irq_name);
2165 irq = OMAP44XX_IRQ_SDMA_0; 2046 if (dma_irq < 0) {
2166 else 2047 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2167 irq = INT_24XX_SDMA_IRQ0; 2048 goto exit_dma_lch_fail;
2168 setup_irq(irq, &omap24xx_dma_irq); 2049 }
2169 } 2050 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2170 2051 if (ret) {
2171 if (cpu_is_omap34xx() || cpu_is_omap44xx()) { 2052 dev_err(&pdev->dev, "set_up failed for IRQ %d"
2172 /* Enable smartidle idlemodes and autoidle */ 2053 "for DMA (error %d)\n", dma_irq, ret);
2173 u32 v = dma_read(OCP_SYSCONFIG); 2054 goto exit_dma_lch_fail;
2174 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2175 DMA_SYSCONFIG_SIDLEMODE_MASK |
2176 DMA_SYSCONFIG_AUTOIDLE);
2177 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2178 DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2179 DMA_SYSCONFIG_AUTOIDLE);
2180 dma_write(v , OCP_SYSCONFIG);
2181 /* reserve dma channels 0 and 1 in high security devices */
2182 if (cpu_is_omap34xx() &&
2183 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2184 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2185 "HS ROM code\n");
2186 dma_chan[0].dev_id = 0;
2187 dma_chan[1].dev_id = 1;
2188 } 2055 }
2189 } 2056 }
2190 2057
2058 /* reserve dma channels 0 and 1 in high security devices */
2059 if (cpu_is_omap34xx() &&
2060 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2061 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2062 "HS ROM code\n");
2063 dma_chan[0].dev_id = 0;
2064 dma_chan[1].dev_id = 1;
2065 }
2066 p->show_dma_caps();
2191 return 0; 2067 return 0;
2192 2068
2193out_free: 2069exit_dma_irq_fail:
2070 dev_err(&pdev->dev, "unable to request IRQ %d"
2071 "for DMA (error %d)\n", dma_irq, ret);
2072 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2073 dma_irq = platform_get_irq(pdev, irq_rel);
2074 free_irq(dma_irq, (void *)(irq_rel + 1));
2075 }
2076
2077exit_dma_lch_fail:
2078 kfree(p);
2079 kfree(d);
2194 kfree(dma_chan); 2080 kfree(dma_chan);
2081 return ret;
2082}
2195 2083
2196out_unmap: 2084static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2197 iounmap(omap_dma_base); 2085{
2086 int dma_irq;
2198 2087
2199 return r; 2088 if (cpu_class_is_omap2()) {
2089 char irq_name[4];
2090 strcpy(irq_name, "0");
2091 dma_irq = platform_get_irq_byname(pdev, irq_name);
2092 remove_irq(dma_irq, &omap24xx_dma_irq);
2093 } else {
2094 int irq_rel = 0;
2095 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2096 dma_irq = platform_get_irq(pdev, irq_rel);
2097 free_irq(dma_irq, (void *)(irq_rel + 1));
2098 }
2099 }
2100 kfree(p);
2101 kfree(d);
2102 kfree(dma_chan);
2103 return 0;
2104}
2105
2106static struct platform_driver omap_system_dma_driver = {
2107 .probe = omap_system_dma_probe,
2108 .remove = omap_system_dma_remove,
2109 .driver = {
2110 .name = "omap_dma_system"
2111 },
2112};
2113
2114static int __init omap_system_dma_init(void)
2115{
2116 return platform_driver_register(&omap_system_dma_driver);
2117}
2118arch_initcall(omap_system_dma_init);
2119
2120static void __exit omap_system_dma_exit(void)
2121{
2122 platform_driver_unregister(&omap_system_dma_driver);
2200} 2123}
2201 2124
2202arch_initcall(omap_init_dma); 2125MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2126MODULE_LICENSE("GPL");
2127MODULE_ALIAS("platform:" DRIVER_NAME);
2128MODULE_AUTHOR("Texas Instruments Inc");
2203 2129
2204/* 2130/*
2205 * Reserve the omap SDMA channels using cmdline bootarg 2131 * Reserve the omap SDMA channels using cmdline bootarg