aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r--arch/arm/plat-omap/dma.c695
1 files changed, 291 insertions, 404 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 2c2826571d45..c4b2b478b1a5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -15,6 +15,10 @@
15 * 15 *
16 * Support functions for the OMAP internal DMA channels. 16 * Support functions for the OMAP internal DMA channels.
17 * 17 *
18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19 * Converted DMA library into DMA platform driver.
20 * - G, Manjunath Kondaiah <manjugk@ti.com>
21 *
18 * This program is free software; you can redistribute it and/or modify 22 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as 23 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation. 24 * published by the Free Software Foundation.
@@ -53,7 +57,11 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
53 57
54#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) 58#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
55 59
60static struct omap_system_dma_plat_info *p;
61static struct omap_dma_dev_attr *d;
62
56static int enable_1510_mode; 63static int enable_1510_mode;
64static u32 errata;
57 65
58static struct omap_dma_global_context_registers { 66static struct omap_dma_global_context_registers {
59 u32 dma_irqenable_l0; 67 u32 dma_irqenable_l0;
@@ -61,27 +69,6 @@ static struct omap_dma_global_context_registers {
61 u32 dma_gcr; 69 u32 dma_gcr;
62} omap_dma_global_context; 70} omap_dma_global_context;
63 71
64struct omap_dma_lch {
65 int next_lch;
66 int dev_id;
67 u16 saved_csr;
68 u16 enabled_irqs;
69 const char *dev_name;
70 void (*callback)(int lch, u16 ch_status, void *data);
71 void *data;
72
73#ifndef CONFIG_ARCH_OMAP1
74 /* required for Dynamic chaining */
75 int prev_linked_ch;
76 int next_linked_ch;
77 int state;
78 int chain_id;
79
80 int status;
81#endif
82 long flags;
83};
84
85struct dma_link_info { 72struct dma_link_info {
86 int *linked_dmach_q; 73 int *linked_dmach_q;
87 int no_of_lchs_linked; 74 int no_of_lchs_linked;
@@ -137,15 +124,6 @@ static int omap_dma_reserve_channels;
137 124
138static spinlock_t dma_chan_lock; 125static spinlock_t dma_chan_lock;
139static struct omap_dma_lch *dma_chan; 126static struct omap_dma_lch *dma_chan;
140static void __iomem *omap_dma_base;
141
142static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
143 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
144 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
145 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
146 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
147 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
148};
149 127
150static inline void disable_lnk(int lch); 128static inline void disable_lnk(int lch);
151static void omap_disable_channel_irq(int lch); 129static void omap_disable_channel_irq(int lch);
@@ -154,24 +132,6 @@ static inline void omap_enable_channel_irq(int lch);
154#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ 132#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
155 __func__); 133 __func__);
156 134
157#define dma_read(reg) \
158({ \
159 u32 __val; \
160 if (cpu_class_is_omap1()) \
161 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg); \
162 else \
163 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg); \
164 __val; \
165})
166
167#define dma_write(val, reg) \
168({ \
169 if (cpu_class_is_omap1()) \
170 __raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
171 else \
172 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg); \
173})
174
175#ifdef CONFIG_ARCH_OMAP15XX 135#ifdef CONFIG_ARCH_OMAP15XX
176/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ 136/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
177int omap_dma_in_1510_mode(void) 137int omap_dma_in_1510_mode(void)
@@ -206,16 +166,6 @@ static inline void set_gdma_dev(int req, int dev)
206#define set_gdma_dev(req, dev) do {} while (0) 166#define set_gdma_dev(req, dev) do {} while (0)
207#endif 167#endif
208 168
209/* Omap1 only */
210static void clear_lch_regs(int lch)
211{
212 int i;
213 void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
214
215 for (i = 0; i < 0x2c; i += 2)
216 __raw_writew(0, lch_base + i);
217}
218
219void omap_set_dma_priority(int lch, int dst_port, int priority) 169void omap_set_dma_priority(int lch, int dst_port, int priority)
220{ 170{
221 unsigned long reg; 171 unsigned long reg;
@@ -248,12 +198,12 @@ void omap_set_dma_priority(int lch, int dst_port, int priority)
248 if (cpu_class_is_omap2()) { 198 if (cpu_class_is_omap2()) {
249 u32 ccr; 199 u32 ccr;
250 200
251 ccr = dma_read(CCR(lch)); 201 ccr = p->dma_read(CCR, lch);
252 if (priority) 202 if (priority)
253 ccr |= (1 << 6); 203 ccr |= (1 << 6);
254 else 204 else
255 ccr &= ~(1 << 6); 205 ccr &= ~(1 << 6);
256 dma_write(ccr, CCR(lch)); 206 p->dma_write(ccr, CCR, lch);
257 } 207 }
258} 208}
259EXPORT_SYMBOL(omap_set_dma_priority); 209EXPORT_SYMBOL(omap_set_dma_priority);
@@ -264,31 +214,31 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
264{ 214{
265 u32 l; 215 u32 l;
266 216
267 l = dma_read(CSDP(lch)); 217 l = p->dma_read(CSDP, lch);
268 l &= ~0x03; 218 l &= ~0x03;
269 l |= data_type; 219 l |= data_type;
270 dma_write(l, CSDP(lch)); 220 p->dma_write(l, CSDP, lch);
271 221
272 if (cpu_class_is_omap1()) { 222 if (cpu_class_is_omap1()) {
273 u16 ccr; 223 u16 ccr;
274 224
275 ccr = dma_read(CCR(lch)); 225 ccr = p->dma_read(CCR, lch);
276 ccr &= ~(1 << 5); 226 ccr &= ~(1 << 5);
277 if (sync_mode == OMAP_DMA_SYNC_FRAME) 227 if (sync_mode == OMAP_DMA_SYNC_FRAME)
278 ccr |= 1 << 5; 228 ccr |= 1 << 5;
279 dma_write(ccr, CCR(lch)); 229 p->dma_write(ccr, CCR, lch);
280 230
281 ccr = dma_read(CCR2(lch)); 231 ccr = p->dma_read(CCR2, lch);
282 ccr &= ~(1 << 2); 232 ccr &= ~(1 << 2);
283 if (sync_mode == OMAP_DMA_SYNC_BLOCK) 233 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
284 ccr |= 1 << 2; 234 ccr |= 1 << 2;
285 dma_write(ccr, CCR2(lch)); 235 p->dma_write(ccr, CCR2, lch);
286 } 236 }
287 237
288 if (cpu_class_is_omap2() && dma_trigger) { 238 if (cpu_class_is_omap2() && dma_trigger) {
289 u32 val; 239 u32 val;
290 240
291 val = dma_read(CCR(lch)); 241 val = p->dma_read(CCR, lch);
292 242
293 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ 243 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
294 val &= ~((1 << 23) | (3 << 19) | 0x1f); 244 val &= ~((1 << 23) | (3 << 19) | 0x1f);
@@ -313,11 +263,11 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
313 } else { 263 } else {
314 val &= ~(1 << 24); /* dest synch */ 264 val &= ~(1 << 24); /* dest synch */
315 } 265 }
316 dma_write(val, CCR(lch)); 266 p->dma_write(val, CCR, lch);
317 } 267 }
318 268
319 dma_write(elem_count, CEN(lch)); 269 p->dma_write(elem_count, CEN, lch);
320 dma_write(frame_count, CFN(lch)); 270 p->dma_write(frame_count, CFN, lch);
321} 271}
322EXPORT_SYMBOL(omap_set_dma_transfer_params); 272EXPORT_SYMBOL(omap_set_dma_transfer_params);
323 273
@@ -328,7 +278,7 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
328 if (cpu_class_is_omap1()) { 278 if (cpu_class_is_omap1()) {
329 u16 w; 279 u16 w;
330 280
331 w = dma_read(CCR2(lch)); 281 w = p->dma_read(CCR2, lch);
332 w &= ~0x03; 282 w &= ~0x03;
333 283
334 switch (mode) { 284 switch (mode) {
@@ -343,23 +293,22 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
343 default: 293 default:
344 BUG(); 294 BUG();
345 } 295 }
346 dma_write(w, CCR2(lch)); 296 p->dma_write(w, CCR2, lch);
347 297
348 w = dma_read(LCH_CTRL(lch)); 298 w = p->dma_read(LCH_CTRL, lch);
349 w &= ~0x0f; 299 w &= ~0x0f;
350 /* Default is channel type 2D */ 300 /* Default is channel type 2D */
351 if (mode) { 301 if (mode) {
352 dma_write((u16)color, COLOR_L(lch)); 302 p->dma_write(color, COLOR, lch);
353 dma_write((u16)(color >> 16), COLOR_U(lch));
354 w |= 1; /* Channel type G */ 303 w |= 1; /* Channel type G */
355 } 304 }
356 dma_write(w, LCH_CTRL(lch)); 305 p->dma_write(w, LCH_CTRL, lch);
357 } 306 }
358 307
359 if (cpu_class_is_omap2()) { 308 if (cpu_class_is_omap2()) {
360 u32 val; 309 u32 val;
361 310
362 val = dma_read(CCR(lch)); 311 val = p->dma_read(CCR, lch);
363 val &= ~((1 << 17) | (1 << 16)); 312 val &= ~((1 << 17) | (1 << 16));
364 313
365 switch (mode) { 314 switch (mode) {
@@ -374,10 +323,10 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
374 default: 323 default:
375 BUG(); 324 BUG();
376 } 325 }
377 dma_write(val, CCR(lch)); 326 p->dma_write(val, CCR, lch);
378 327
379 color &= 0xffffff; 328 color &= 0xffffff;
380 dma_write(color, COLOR(lch)); 329 p->dma_write(color, COLOR, lch);
381 } 330 }
382} 331}
383EXPORT_SYMBOL(omap_set_dma_color_mode); 332EXPORT_SYMBOL(omap_set_dma_color_mode);
@@ -387,10 +336,10 @@ void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
387 if (cpu_class_is_omap2()) { 336 if (cpu_class_is_omap2()) {
388 u32 csdp; 337 u32 csdp;
389 338
390 csdp = dma_read(CSDP(lch)); 339 csdp = p->dma_read(CSDP, lch);
391 csdp &= ~(0x3 << 16); 340 csdp &= ~(0x3 << 16);
392 csdp |= (mode << 16); 341 csdp |= (mode << 16);
393 dma_write(csdp, CSDP(lch)); 342 p->dma_write(csdp, CSDP, lch);
394 } 343 }
395} 344}
396EXPORT_SYMBOL(omap_set_dma_write_mode); 345EXPORT_SYMBOL(omap_set_dma_write_mode);
@@ -400,10 +349,10 @@ void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
400 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { 349 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
401 u32 l; 350 u32 l;
402 351
403 l = dma_read(LCH_CTRL(lch)); 352 l = p->dma_read(LCH_CTRL, lch);
404 l &= ~0x7; 353 l &= ~0x7;
405 l |= mode; 354 l |= mode;
406 dma_write(l, LCH_CTRL(lch)); 355 p->dma_write(l, LCH_CTRL, lch);
407 } 356 }
408} 357}
409EXPORT_SYMBOL(omap_set_dma_channel_mode); 358EXPORT_SYMBOL(omap_set_dma_channel_mode);
@@ -418,27 +367,21 @@ void omap_set_dma_src_params(int lch, int src_port, int src_amode,
418 if (cpu_class_is_omap1()) { 367 if (cpu_class_is_omap1()) {
419 u16 w; 368 u16 w;
420 369
421 w = dma_read(CSDP(lch)); 370 w = p->dma_read(CSDP, lch);
422 w &= ~(0x1f << 2); 371 w &= ~(0x1f << 2);
423 w |= src_port << 2; 372 w |= src_port << 2;
424 dma_write(w, CSDP(lch)); 373 p->dma_write(w, CSDP, lch);
425 } 374 }
426 375
427 l = dma_read(CCR(lch)); 376 l = p->dma_read(CCR, lch);
428 l &= ~(0x03 << 12); 377 l &= ~(0x03 << 12);
429 l |= src_amode << 12; 378 l |= src_amode << 12;
430 dma_write(l, CCR(lch)); 379 p->dma_write(l, CCR, lch);
431
432 if (cpu_class_is_omap1()) {
433 dma_write(src_start >> 16, CSSA_U(lch));
434 dma_write((u16)src_start, CSSA_L(lch));
435 }
436 380
437 if (cpu_class_is_omap2()) 381 p->dma_write(src_start, CSSA, lch);
438 dma_write(src_start, CSSA(lch));
439 382
440 dma_write(src_ei, CSEI(lch)); 383 p->dma_write(src_ei, CSEI, lch);
441 dma_write(src_fi, CSFI(lch)); 384 p->dma_write(src_fi, CSFI, lch);
442} 385}
443EXPORT_SYMBOL(omap_set_dma_src_params); 386EXPORT_SYMBOL(omap_set_dma_src_params);
444 387
@@ -466,8 +409,8 @@ void omap_set_dma_src_index(int lch, int eidx, int fidx)
466 if (cpu_class_is_omap2()) 409 if (cpu_class_is_omap2())
467 return; 410 return;
468 411
469 dma_write(eidx, CSEI(lch)); 412 p->dma_write(eidx, CSEI, lch);
470 dma_write(fidx, CSFI(lch)); 413 p->dma_write(fidx, CSFI, lch);
471} 414}
472EXPORT_SYMBOL(omap_set_dma_src_index); 415EXPORT_SYMBOL(omap_set_dma_src_index);
473 416
@@ -475,11 +418,11 @@ void omap_set_dma_src_data_pack(int lch, int enable)
475{ 418{
476 u32 l; 419 u32 l;
477 420
478 l = dma_read(CSDP(lch)); 421 l = p->dma_read(CSDP, lch);
479 l &= ~(1 << 6); 422 l &= ~(1 << 6);
480 if (enable) 423 if (enable)
481 l |= (1 << 6); 424 l |= (1 << 6);
482 dma_write(l, CSDP(lch)); 425 p->dma_write(l, CSDP, lch);
483} 426}
484EXPORT_SYMBOL(omap_set_dma_src_data_pack); 427EXPORT_SYMBOL(omap_set_dma_src_data_pack);
485 428
@@ -488,7 +431,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
488 unsigned int burst = 0; 431 unsigned int burst = 0;
489 u32 l; 432 u32 l;
490 433
491 l = dma_read(CSDP(lch)); 434 l = p->dma_read(CSDP, lch);
492 l &= ~(0x03 << 7); 435 l &= ~(0x03 << 7);
493 436
494 switch (burst_mode) { 437 switch (burst_mode) {
@@ -524,7 +467,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
524 } 467 }
525 468
526 l |= (burst << 7); 469 l |= (burst << 7);
527 dma_write(l, CSDP(lch)); 470 p->dma_write(l, CSDP, lch);
528} 471}
529EXPORT_SYMBOL(omap_set_dma_src_burst_mode); 472EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
530 473
@@ -536,27 +479,21 @@ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
536 u32 l; 479 u32 l;
537 480
538 if (cpu_class_is_omap1()) { 481 if (cpu_class_is_omap1()) {
539 l = dma_read(CSDP(lch)); 482 l = p->dma_read(CSDP, lch);
540 l &= ~(0x1f << 9); 483 l &= ~(0x1f << 9);
541 l |= dest_port << 9; 484 l |= dest_port << 9;
542 dma_write(l, CSDP(lch)); 485 p->dma_write(l, CSDP, lch);
543 } 486 }
544 487
545 l = dma_read(CCR(lch)); 488 l = p->dma_read(CCR, lch);
546 l &= ~(0x03 << 14); 489 l &= ~(0x03 << 14);
547 l |= dest_amode << 14; 490 l |= dest_amode << 14;
548 dma_write(l, CCR(lch)); 491 p->dma_write(l, CCR, lch);
549
550 if (cpu_class_is_omap1()) {
551 dma_write(dest_start >> 16, CDSA_U(lch));
552 dma_write(dest_start, CDSA_L(lch));
553 }
554 492
555 if (cpu_class_is_omap2()) 493 p->dma_write(dest_start, CDSA, lch);
556 dma_write(dest_start, CDSA(lch));
557 494
558 dma_write(dst_ei, CDEI(lch)); 495 p->dma_write(dst_ei, CDEI, lch);
559 dma_write(dst_fi, CDFI(lch)); 496 p->dma_write(dst_fi, CDFI, lch);
560} 497}
561EXPORT_SYMBOL(omap_set_dma_dest_params); 498EXPORT_SYMBOL(omap_set_dma_dest_params);
562 499
@@ -565,8 +502,8 @@ void omap_set_dma_dest_index(int lch, int eidx, int fidx)
565 if (cpu_class_is_omap2()) 502 if (cpu_class_is_omap2())
566 return; 503 return;
567 504
568 dma_write(eidx, CDEI(lch)); 505 p->dma_write(eidx, CDEI, lch);
569 dma_write(fidx, CDFI(lch)); 506 p->dma_write(fidx, CDFI, lch);
570} 507}
571EXPORT_SYMBOL(omap_set_dma_dest_index); 508EXPORT_SYMBOL(omap_set_dma_dest_index);
572 509
@@ -574,11 +511,11 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
574{ 511{
575 u32 l; 512 u32 l;
576 513
577 l = dma_read(CSDP(lch)); 514 l = p->dma_read(CSDP, lch);
578 l &= ~(1 << 13); 515 l &= ~(1 << 13);
579 if (enable) 516 if (enable)
580 l |= 1 << 13; 517 l |= 1 << 13;
581 dma_write(l, CSDP(lch)); 518 p->dma_write(l, CSDP, lch);
582} 519}
583EXPORT_SYMBOL(omap_set_dma_dest_data_pack); 520EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
584 521
@@ -587,7 +524,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
587 unsigned int burst = 0; 524 unsigned int burst = 0;
588 u32 l; 525 u32 l;
589 526
590 l = dma_read(CSDP(lch)); 527 l = p->dma_read(CSDP, lch);
591 l &= ~(0x03 << 14); 528 l &= ~(0x03 << 14);
592 529
593 switch (burst_mode) { 530 switch (burst_mode) {
@@ -620,7 +557,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
620 return; 557 return;
621 } 558 }
622 l |= (burst << 14); 559 l |= (burst << 14);
623 dma_write(l, CSDP(lch)); 560 p->dma_write(l, CSDP, lch);
624} 561}
625EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); 562EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
626 563
@@ -630,18 +567,18 @@ static inline void omap_enable_channel_irq(int lch)
630 567
631 /* Clear CSR */ 568 /* Clear CSR */
632 if (cpu_class_is_omap1()) 569 if (cpu_class_is_omap1())
633 status = dma_read(CSR(lch)); 570 status = p->dma_read(CSR, lch);
634 else if (cpu_class_is_omap2()) 571 else if (cpu_class_is_omap2())
635 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); 572 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
636 573
637 /* Enable some nice interrupts. */ 574 /* Enable some nice interrupts. */
638 dma_write(dma_chan[lch].enabled_irqs, CICR(lch)); 575 p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
639} 576}
640 577
641static void omap_disable_channel_irq(int lch) 578static void omap_disable_channel_irq(int lch)
642{ 579{
643 if (cpu_class_is_omap2()) 580 if (cpu_class_is_omap2())
644 dma_write(0, CICR(lch)); 581 p->dma_write(0, CICR, lch);
645} 582}
646 583
647void omap_enable_dma_irq(int lch, u16 bits) 584void omap_enable_dma_irq(int lch, u16 bits)
@@ -660,7 +597,7 @@ static inline void enable_lnk(int lch)
660{ 597{
661 u32 l; 598 u32 l;
662 599
663 l = dma_read(CLNK_CTRL(lch)); 600 l = p->dma_read(CLNK_CTRL, lch);
664 601
665 if (cpu_class_is_omap1()) 602 if (cpu_class_is_omap1())
666 l &= ~(1 << 14); 603 l &= ~(1 << 14);
@@ -675,18 +612,18 @@ static inline void enable_lnk(int lch)
675 l = dma_chan[lch].next_linked_ch | (1 << 15); 612 l = dma_chan[lch].next_linked_ch | (1 << 15);
676#endif 613#endif
677 614
678 dma_write(l, CLNK_CTRL(lch)); 615 p->dma_write(l, CLNK_CTRL, lch);
679} 616}
680 617
681static inline void disable_lnk(int lch) 618static inline void disable_lnk(int lch)
682{ 619{
683 u32 l; 620 u32 l;
684 621
685 l = dma_read(CLNK_CTRL(lch)); 622 l = p->dma_read(CLNK_CTRL, lch);
686 623
687 /* Disable interrupts */ 624 /* Disable interrupts */
688 if (cpu_class_is_omap1()) { 625 if (cpu_class_is_omap1()) {
689 dma_write(0, CICR(lch)); 626 p->dma_write(0, CICR, lch);
690 /* Set the STOP_LNK bit */ 627 /* Set the STOP_LNK bit */
691 l |= 1 << 14; 628 l |= 1 << 14;
692 } 629 }
@@ -697,7 +634,7 @@ static inline void disable_lnk(int lch)
697 l &= ~(1 << 15); 634 l &= ~(1 << 15);
698 } 635 }
699 636
700 dma_write(l, CLNK_CTRL(lch)); 637 p->dma_write(l, CLNK_CTRL, lch);
701 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; 638 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
702} 639}
703 640
@@ -710,9 +647,9 @@ static inline void omap2_enable_irq_lch(int lch)
710 return; 647 return;
711 648
712 spin_lock_irqsave(&dma_chan_lock, flags); 649 spin_lock_irqsave(&dma_chan_lock, flags);
713 val = dma_read(IRQENABLE_L0); 650 val = p->dma_read(IRQENABLE_L0, lch);
714 val |= 1 << lch; 651 val |= 1 << lch;
715 dma_write(val, IRQENABLE_L0); 652 p->dma_write(val, IRQENABLE_L0, lch);
716 spin_unlock_irqrestore(&dma_chan_lock, flags); 653 spin_unlock_irqrestore(&dma_chan_lock, flags);
717} 654}
718 655
@@ -725,9 +662,9 @@ static inline void omap2_disable_irq_lch(int lch)
725 return; 662 return;
726 663
727 spin_lock_irqsave(&dma_chan_lock, flags); 664 spin_lock_irqsave(&dma_chan_lock, flags);
728 val = dma_read(IRQENABLE_L0); 665 val = p->dma_read(IRQENABLE_L0, lch);
729 val &= ~(1 << lch); 666 val &= ~(1 << lch);
730 dma_write(val, IRQENABLE_L0); 667 p->dma_write(val, IRQENABLE_L0, lch);
731 spin_unlock_irqrestore(&dma_chan_lock, flags); 668 spin_unlock_irqrestore(&dma_chan_lock, flags);
732} 669}
733 670
@@ -754,8 +691,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
754 chan = dma_chan + free_ch; 691 chan = dma_chan + free_ch;
755 chan->dev_id = dev_id; 692 chan->dev_id = dev_id;
756 693
757 if (cpu_class_is_omap1()) 694 if (p->clear_lch_regs)
758 clear_lch_regs(free_ch); 695 p->clear_lch_regs(free_ch);
759 696
760 if (cpu_class_is_omap2()) 697 if (cpu_class_is_omap2())
761 omap_clear_dma(free_ch); 698 omap_clear_dma(free_ch);
@@ -792,17 +729,17 @@ int omap_request_dma(int dev_id, const char *dev_name,
792 * Disable the 1510 compatibility mode and set the sync device 729 * Disable the 1510 compatibility mode and set the sync device
793 * id. 730 * id.
794 */ 731 */
795 dma_write(dev_id | (1 << 10), CCR(free_ch)); 732 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
796 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { 733 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
797 dma_write(dev_id, CCR(free_ch)); 734 p->dma_write(dev_id, CCR, free_ch);
798 } 735 }
799 736
800 if (cpu_class_is_omap2()) { 737 if (cpu_class_is_omap2()) {
801 omap2_enable_irq_lch(free_ch); 738 omap2_enable_irq_lch(free_ch);
802 omap_enable_channel_irq(free_ch); 739 omap_enable_channel_irq(free_ch);
803 /* Clear the CSR register and IRQ status register */ 740 /* Clear the CSR register and IRQ status register */
804 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch)); 741 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
805 dma_write(1 << free_ch, IRQSTATUS_L0); 742 p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
806 } 743 }
807 744
808 *dma_ch_out = free_ch; 745 *dma_ch_out = free_ch;
@@ -823,23 +760,23 @@ void omap_free_dma(int lch)
823 760
824 if (cpu_class_is_omap1()) { 761 if (cpu_class_is_omap1()) {
825 /* Disable all DMA interrupts for the channel. */ 762 /* Disable all DMA interrupts for the channel. */
826 dma_write(0, CICR(lch)); 763 p->dma_write(0, CICR, lch);
827 /* Make sure the DMA transfer is stopped. */ 764 /* Make sure the DMA transfer is stopped. */
828 dma_write(0, CCR(lch)); 765 p->dma_write(0, CCR, lch);
829 } 766 }
830 767
831 if (cpu_class_is_omap2()) { 768 if (cpu_class_is_omap2()) {
832 omap2_disable_irq_lch(lch); 769 omap2_disable_irq_lch(lch);
833 770
834 /* Clear the CSR register and IRQ status register */ 771 /* Clear the CSR register and IRQ status register */
835 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); 772 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
836 dma_write(1 << lch, IRQSTATUS_L0); 773 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
837 774
838 /* Disable all DMA interrupts for the channel. */ 775 /* Disable all DMA interrupts for the channel. */
839 dma_write(0, CICR(lch)); 776 p->dma_write(0, CICR, lch);
840 777
841 /* Make sure the DMA transfer is stopped. */ 778 /* Make sure the DMA transfer is stopped. */
842 dma_write(0, CCR(lch)); 779 p->dma_write(0, CCR, lch);
843 omap_clear_dma(lch); 780 omap_clear_dma(lch);
844 } 781 }
845 782
@@ -880,7 +817,7 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
880 reg |= (0x3 & tparams) << 12; 817 reg |= (0x3 & tparams) << 12;
881 reg |= (arb_rate & 0xff) << 16; 818 reg |= (arb_rate & 0xff) << 16;
882 819
883 dma_write(reg, GCR); 820 p->dma_write(reg, GCR, 0);
884} 821}
885EXPORT_SYMBOL(omap_dma_set_global_params); 822EXPORT_SYMBOL(omap_dma_set_global_params);
886 823
@@ -903,14 +840,14 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
903 printk(KERN_ERR "Invalid channel id\n"); 840 printk(KERN_ERR "Invalid channel id\n");
904 return -EINVAL; 841 return -EINVAL;
905 } 842 }
906 l = dma_read(CCR(lch)); 843 l = p->dma_read(CCR, lch);
907 l &= ~((1 << 6) | (1 << 26)); 844 l &= ~((1 << 6) | (1 << 26));
908 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 845 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
909 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); 846 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
910 else 847 else
911 l |= ((read_prio & 0x1) << 6); 848 l |= ((read_prio & 0x1) << 6);
912 849
913 dma_write(l, CCR(lch)); 850 p->dma_write(l, CCR, lch);
914 851
915 return 0; 852 return 0;
916} 853}
@@ -925,25 +862,7 @@ void omap_clear_dma(int lch)
925 unsigned long flags; 862 unsigned long flags;
926 863
927 local_irq_save(flags); 864 local_irq_save(flags);
928 865 p->clear_dma(lch);
929 if (cpu_class_is_omap1()) {
930 u32 l;
931
932 l = dma_read(CCR(lch));
933 l &= ~OMAP_DMA_CCR_EN;
934 dma_write(l, CCR(lch));
935
936 /* Clear pending interrupts */
937 l = dma_read(CSR(lch));
938 }
939
940 if (cpu_class_is_omap2()) {
941 int i;
942 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
943 for (i = 0; i < 0x44; i += 4)
944 __raw_writel(0, lch_base + i);
945 }
946
947 local_irq_restore(flags); 866 local_irq_restore(flags);
948} 867}
949EXPORT_SYMBOL(omap_clear_dma); 868EXPORT_SYMBOL(omap_clear_dma);
@@ -957,13 +876,13 @@ void omap_start_dma(int lch)
957 * before starting dma transfer. 876 * before starting dma transfer.
958 */ 877 */
959 if (cpu_is_omap15xx()) 878 if (cpu_is_omap15xx())
960 dma_write(0, CPC(lch)); 879 p->dma_write(0, CPC, lch);
961 else 880 else
962 dma_write(0, CDAC(lch)); 881 p->dma_write(0, CDAC, lch);
963 882
964 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 883 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
965 int next_lch, cur_lch; 884 int next_lch, cur_lch;
966 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 885 char dma_chan_link_map[dma_lch_count];
967 886
968 dma_chan_link_map[lch] = 1; 887 dma_chan_link_map[lch] = 1;
969 /* Set the link register of the first channel */ 888 /* Set the link register of the first channel */
@@ -985,32 +904,18 @@ void omap_start_dma(int lch)
985 904
986 cur_lch = next_lch; 905 cur_lch = next_lch;
987 } while (next_lch != -1); 906 } while (next_lch != -1);
988 } else if (cpu_is_omap242x() || 907 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
989 (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { 908 p->dma_write(lch, CLNK_CTRL, lch);
990
991 /* Errata: Need to write lch even if not using chaining */
992 dma_write(lch, CLNK_CTRL(lch));
993 }
994 909
995 omap_enable_channel_irq(lch); 910 omap_enable_channel_irq(lch);
996 911
997 l = dma_read(CCR(lch)); 912 l = p->dma_read(CCR, lch);
998
999 /*
1000 * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
1001 * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
1002 * bursting is enabled. This might result in data gets stalled in
1003 * FIFO at the end of the block.
1004 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
1005 * guarantee no data will stay in the DMA FIFO in case inter frame
1006 * buffering occurs.
1007 */
1008 if (cpu_is_omap2420() ||
1009 (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
1010 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1011 913
914 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
915 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1012 l |= OMAP_DMA_CCR_EN; 916 l |= OMAP_DMA_CCR_EN;
1013 dma_write(l, CCR(lch)); 917
918 p->dma_write(l, CCR, lch);
1014 919
1015 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 920 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1016} 921}
@@ -1022,46 +927,46 @@ void omap_stop_dma(int lch)
1022 927
1023 /* Disable all interrupts on the channel */ 928 /* Disable all interrupts on the channel */
1024 if (cpu_class_is_omap1()) 929 if (cpu_class_is_omap1())
1025 dma_write(0, CICR(lch)); 930 p->dma_write(0, CICR, lch);
1026 931
1027 l = dma_read(CCR(lch)); 932 l = p->dma_read(CCR, lch);
1028 /* OMAP3 Errata i541: sDMA FIFO draining does not finish */ 933 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1029 if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { 934 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1030 int i = 0; 935 int i = 0;
1031 u32 sys_cf; 936 u32 sys_cf;
1032 937
1033 /* Configure No-Standby */ 938 /* Configure No-Standby */
1034 l = dma_read(OCP_SYSCONFIG); 939 l = p->dma_read(OCP_SYSCONFIG, lch);
1035 sys_cf = l; 940 sys_cf = l;
1036 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; 941 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1037 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); 942 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1038 dma_write(l , OCP_SYSCONFIG); 943 p->dma_write(l , OCP_SYSCONFIG, 0);
1039 944
1040 l = dma_read(CCR(lch)); 945 l = p->dma_read(CCR, lch);
1041 l &= ~OMAP_DMA_CCR_EN; 946 l &= ~OMAP_DMA_CCR_EN;
1042 dma_write(l, CCR(lch)); 947 p->dma_write(l, CCR, lch);
1043 948
1044 /* Wait for sDMA FIFO drain */ 949 /* Wait for sDMA FIFO drain */
1045 l = dma_read(CCR(lch)); 950 l = p->dma_read(CCR, lch);
1046 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | 951 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1047 OMAP_DMA_CCR_WR_ACTIVE))) { 952 OMAP_DMA_CCR_WR_ACTIVE))) {
1048 udelay(5); 953 udelay(5);
1049 i++; 954 i++;
1050 l = dma_read(CCR(lch)); 955 l = p->dma_read(CCR, lch);
1051 } 956 }
1052 if (i >= 100) 957 if (i >= 100)
1053 printk(KERN_ERR "DMA drain did not complete on " 958 printk(KERN_ERR "DMA drain did not complete on "
1054 "lch %d\n", lch); 959 "lch %d\n", lch);
1055 /* Restore OCP_SYSCONFIG */ 960 /* Restore OCP_SYSCONFIG */
1056 dma_write(sys_cf, OCP_SYSCONFIG); 961 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
1057 } else { 962 } else {
1058 l &= ~OMAP_DMA_CCR_EN; 963 l &= ~OMAP_DMA_CCR_EN;
1059 dma_write(l, CCR(lch)); 964 p->dma_write(l, CCR, lch);
1060 } 965 }
1061 966
1062 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 967 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1063 int next_lch, cur_lch = lch; 968 int next_lch, cur_lch = lch;
1064 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 969 char dma_chan_link_map[dma_lch_count];
1065 970
1066 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); 971 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1067 do { 972 do {
@@ -1122,19 +1027,15 @@ dma_addr_t omap_get_dma_src_pos(int lch)
1122 dma_addr_t offset = 0; 1027 dma_addr_t offset = 0;
1123 1028
1124 if (cpu_is_omap15xx()) 1029 if (cpu_is_omap15xx())
1125 offset = dma_read(CPC(lch)); 1030 offset = p->dma_read(CPC, lch);
1126 else 1031 else
1127 offset = dma_read(CSAC(lch)); 1032 offset = p->dma_read(CSAC, lch);
1128 1033
1129 /* 1034 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1130 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1035 offset = p->dma_read(CSAC, lch);
1131 * read before the DMA controller finished disabling the channel.
1132 */
1133 if (!cpu_is_omap15xx() && offset == 0)
1134 offset = dma_read(CSAC(lch));
1135 1036
1136 if (cpu_class_is_omap1()) 1037 if (cpu_class_is_omap1())
1137 offset |= (dma_read(CSSA_U(lch)) << 16); 1038 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1138 1039
1139 return offset; 1040 return offset;
1140} 1041}
@@ -1153,19 +1054,19 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
1153 dma_addr_t offset = 0; 1054 dma_addr_t offset = 0;
1154 1055
1155 if (cpu_is_omap15xx()) 1056 if (cpu_is_omap15xx())
1156 offset = dma_read(CPC(lch)); 1057 offset = p->dma_read(CPC, lch);
1157 else 1058 else
1158 offset = dma_read(CDAC(lch)); 1059 offset = p->dma_read(CDAC, lch);
1159 1060
1160 /* 1061 /*
1161 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1062 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1162 * read before the DMA controller finished disabling the channel. 1063 * read before the DMA controller finished disabling the channel.
1163 */ 1064 */
1164 if (!cpu_is_omap15xx() && offset == 0) 1065 if (!cpu_is_omap15xx() && offset == 0)
1165 offset = dma_read(CDAC(lch)); 1066 offset = p->dma_read(CDAC, lch);
1166 1067
1167 if (cpu_class_is_omap1()) 1068 if (cpu_class_is_omap1())
1168 offset |= (dma_read(CDSA_U(lch)) << 16); 1069 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1169 1070
1170 return offset; 1071 return offset;
1171} 1072}
@@ -1173,7 +1074,7 @@ EXPORT_SYMBOL(omap_get_dma_dst_pos);
1173 1074
1174int omap_get_dma_active_status(int lch) 1075int omap_get_dma_active_status(int lch)
1175{ 1076{
1176 return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0; 1077 return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1177} 1078}
1178EXPORT_SYMBOL(omap_get_dma_active_status); 1079EXPORT_SYMBOL(omap_get_dma_active_status);
1179 1080
@@ -1186,7 +1087,7 @@ int omap_dma_running(void)
1186 return 1; 1087 return 1;
1187 1088
1188 for (lch = 0; lch < dma_chan_count; lch++) 1089 for (lch = 0; lch < dma_chan_count; lch++)
1189 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) 1090 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1190 return 1; 1091 return 1;
1191 1092
1192 return 0; 1093 return 0;
@@ -1201,8 +1102,8 @@ void omap_dma_link_lch(int lch_head, int lch_queue)
1201{ 1102{
1202 if (omap_dma_in_1510_mode()) { 1103 if (omap_dma_in_1510_mode()) {
1203 if (lch_head == lch_queue) { 1104 if (lch_head == lch_queue) {
1204 dma_write(dma_read(CCR(lch_head)) | (3 << 8), 1105 p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1205 CCR(lch_head)); 1106 CCR, lch_head);
1206 return; 1107 return;
1207 } 1108 }
1208 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1109 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1228,8 +1129,8 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1228{ 1129{
1229 if (omap_dma_in_1510_mode()) { 1130 if (omap_dma_in_1510_mode()) {
1230 if (lch_head == lch_queue) { 1131 if (lch_head == lch_queue) {
1231 dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), 1132 p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1232 CCR(lch_head)); 1133 CCR, lch_head);
1233 return; 1134 return;
1234 } 1135 }
1235 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1136 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1255,8 +1156,6 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1255} 1156}
1256EXPORT_SYMBOL(omap_dma_unlink_lch); 1157EXPORT_SYMBOL(omap_dma_unlink_lch);
1257 1158
1258/*----------------------------------------------------------------------------*/
1259
1260#ifndef CONFIG_ARCH_OMAP1 1159#ifndef CONFIG_ARCH_OMAP1
1261/* Create chain of DMA channesls */ 1160/* Create chain of DMA channesls */
1262static void create_dma_lch_chain(int lch_head, int lch_queue) 1161static void create_dma_lch_chain(int lch_head, int lch_queue)
@@ -1281,15 +1180,15 @@ static void create_dma_lch_chain(int lch_head, int lch_queue)
1281 lch_queue; 1180 lch_queue;
1282 } 1181 }
1283 1182
1284 l = dma_read(CLNK_CTRL(lch_head)); 1183 l = p->dma_read(CLNK_CTRL, lch_head);
1285 l &= ~(0x1f); 1184 l &= ~(0x1f);
1286 l |= lch_queue; 1185 l |= lch_queue;
1287 dma_write(l, CLNK_CTRL(lch_head)); 1186 p->dma_write(l, CLNK_CTRL, lch_head);
1288 1187
1289 l = dma_read(CLNK_CTRL(lch_queue)); 1188 l = p->dma_read(CLNK_CTRL, lch_queue);
1290 l &= ~(0x1f); 1189 l &= ~(0x1f);
1291 l |= (dma_chan[lch_queue].next_linked_ch); 1190 l |= (dma_chan[lch_queue].next_linked_ch);
1292 dma_write(l, CLNK_CTRL(lch_queue)); 1191 p->dma_write(l, CLNK_CTRL, lch_queue);
1293} 1192}
1294 1193
1295/** 1194/**
@@ -1565,13 +1464,13 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1565 1464
1566 /* Set the params to the free channel */ 1465 /* Set the params to the free channel */
1567 if (src_start != 0) 1466 if (src_start != 0)
1568 dma_write(src_start, CSSA(lch)); 1467 p->dma_write(src_start, CSSA, lch);
1569 if (dest_start != 0) 1468 if (dest_start != 0)
1570 dma_write(dest_start, CDSA(lch)); 1469 p->dma_write(dest_start, CDSA, lch);
1571 1470
1572 /* Write the buffer size */ 1471 /* Write the buffer size */
1573 dma_write(elem_count, CEN(lch)); 1472 p->dma_write(elem_count, CEN, lch);
1574 dma_write(frame_count, CFN(lch)); 1473 p->dma_write(frame_count, CFN, lch);
1575 1474
1576 /* 1475 /*
1577 * If the chain is dynamically linked, 1476 * If the chain is dynamically linked,
@@ -1604,8 +1503,8 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1604 enable_lnk(dma_chan[lch].prev_linked_ch); 1503 enable_lnk(dma_chan[lch].prev_linked_ch);
1605 dma_chan[lch].state = DMA_CH_QUEUED; 1504 dma_chan[lch].state = DMA_CH_QUEUED;
1606 start_dma = 0; 1505 start_dma = 0;
1607 if (0 == ((1 << 7) & dma_read( 1506 if (0 == ((1 << 7) & p->dma_read(
1608 CCR(dma_chan[lch].prev_linked_ch)))) { 1507 CCR, dma_chan[lch].prev_linked_ch))) {
1609 disable_lnk(dma_chan[lch]. 1508 disable_lnk(dma_chan[lch].
1610 prev_linked_ch); 1509 prev_linked_ch);
1611 pr_debug("\n prev ch is stopped\n"); 1510 pr_debug("\n prev ch is stopped\n");
@@ -1621,7 +1520,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1621 } 1520 }
1622 omap_enable_channel_irq(lch); 1521 omap_enable_channel_irq(lch);
1623 1522
1624 l = dma_read(CCR(lch)); 1523 l = p->dma_read(CCR, lch);
1625 1524
1626 if ((0 == (l & (1 << 24)))) 1525 if ((0 == (l & (1 << 24))))
1627 l &= ~(1 << 25); 1526 l &= ~(1 << 25);
@@ -1632,12 +1531,12 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1632 l |= (1 << 7); 1531 l |= (1 << 7);
1633 dma_chan[lch].state = DMA_CH_STARTED; 1532 dma_chan[lch].state = DMA_CH_STARTED;
1634 pr_debug("starting %d\n", lch); 1533 pr_debug("starting %d\n", lch);
1635 dma_write(l, CCR(lch)); 1534 p->dma_write(l, CCR, lch);
1636 } else 1535 } else
1637 start_dma = 0; 1536 start_dma = 0;
1638 } else { 1537 } else {
1639 if (0 == (l & (1 << 7))) 1538 if (0 == (l & (1 << 7)))
1640 dma_write(l, CCR(lch)); 1539 p->dma_write(l, CCR, lch);
1641 } 1540 }
1642 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 1541 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1643 } 1542 }
@@ -1682,7 +1581,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1682 omap_enable_channel_irq(channels[0]); 1581 omap_enable_channel_irq(channels[0]);
1683 } 1582 }
1684 1583
1685 l = dma_read(CCR(channels[0])); 1584 l = p->dma_read(CCR, channels[0]);
1686 l |= (1 << 7); 1585 l |= (1 << 7);
1687 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; 1586 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1688 dma_chan[channels[0]].state = DMA_CH_STARTED; 1587 dma_chan[channels[0]].state = DMA_CH_STARTED;
@@ -1691,7 +1590,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1691 l &= ~(1 << 25); 1590 l &= ~(1 << 25);
1692 else 1591 else
1693 l |= (1 << 25); 1592 l |= (1 << 25);
1694 dma_write(l, CCR(channels[0])); 1593 p->dma_write(l, CCR, channels[0]);
1695 1594
1696 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; 1595 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1697 1596
@@ -1711,7 +1610,7 @@ int omap_stop_dma_chain_transfers(int chain_id)
1711{ 1610{
1712 int *channels; 1611 int *channels;
1713 u32 l, i; 1612 u32 l, i;
1714 u32 sys_cf; 1613 u32 sys_cf = 0;
1715 1614
1716 /* Check for input params */ 1615 /* Check for input params */
1717 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { 1616 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
@@ -1726,22 +1625,20 @@ int omap_stop_dma_chain_transfers(int chain_id)
1726 } 1625 }
1727 channels = dma_linked_lch[chain_id].linked_dmach_q; 1626 channels = dma_linked_lch[chain_id].linked_dmach_q;
1728 1627
1729 /* 1628 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1730 * DMA Errata: 1629 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1731 * Special programming model needed to disable DMA before end of block 1630 l = sys_cf;
1732 */ 1631 /* Middle mode reg set no Standby */
1733 sys_cf = dma_read(OCP_SYSCONFIG); 1632 l &= ~((1 << 12)|(1 << 13));
1734 l = sys_cf; 1633 p->dma_write(l, OCP_SYSCONFIG, 0);
1735 /* Middle mode reg set no Standby */ 1634 }
1736 l &= ~((1 << 12)|(1 << 13));
1737 dma_write(l, OCP_SYSCONFIG);
1738 1635
1739 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { 1636 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1740 1637
1741 /* Stop the Channel transmission */ 1638 /* Stop the Channel transmission */
1742 l = dma_read(CCR(channels[i])); 1639 l = p->dma_read(CCR, channels[i]);
1743 l &= ~(1 << 7); 1640 l &= ~(1 << 7);
1744 dma_write(l, CCR(channels[i])); 1641 p->dma_write(l, CCR, channels[i]);
1745 1642
1746 /* Disable the link in all the channels */ 1643 /* Disable the link in all the channels */
1747 disable_lnk(channels[i]); 1644 disable_lnk(channels[i]);
@@ -1753,8 +1650,8 @@ int omap_stop_dma_chain_transfers(int chain_id)
1753 /* Reset the Queue pointers */ 1650 /* Reset the Queue pointers */
1754 OMAP_DMA_CHAIN_QINIT(chain_id); 1651 OMAP_DMA_CHAIN_QINIT(chain_id);
1755 1652
1756 /* Errata - put in the old value */ 1653 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1757 dma_write(sys_cf, OCP_SYSCONFIG); 1654 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1758 1655
1759 return 0; 1656 return 0;
1760} 1657}
@@ -1796,8 +1693,8 @@ int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1796 /* Get the current channel */ 1693 /* Get the current channel */
1797 lch = channels[dma_linked_lch[chain_id].q_head]; 1694 lch = channels[dma_linked_lch[chain_id].q_head];
1798 1695
1799 *ei = dma_read(CCEN(lch)); 1696 *ei = p->dma_read(CCEN, lch);
1800 *fi = dma_read(CCFN(lch)); 1697 *fi = p->dma_read(CCFN, lch);
1801 1698
1802 return 0; 1699 return 0;
1803} 1700}
@@ -1834,7 +1731,7 @@ int omap_get_dma_chain_dst_pos(int chain_id)
1834 /* Get the current channel */ 1731 /* Get the current channel */
1835 lch = channels[dma_linked_lch[chain_id].q_head]; 1732 lch = channels[dma_linked_lch[chain_id].q_head];
1836 1733
1837 return dma_read(CDAC(lch)); 1734 return p->dma_read(CDAC, lch);
1838} 1735}
1839EXPORT_SYMBOL(omap_get_dma_chain_dst_pos); 1736EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1840 1737
@@ -1868,7 +1765,7 @@ int omap_get_dma_chain_src_pos(int chain_id)
1868 /* Get the current channel */ 1765 /* Get the current channel */
1869 lch = channels[dma_linked_lch[chain_id].q_head]; 1766 lch = channels[dma_linked_lch[chain_id].q_head];
1870 1767
1871 return dma_read(CSAC(lch)); 1768 return p->dma_read(CSAC, lch);
1872} 1769}
1873EXPORT_SYMBOL(omap_get_dma_chain_src_pos); 1770EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1874#endif /* ifndef CONFIG_ARCH_OMAP1 */ 1771#endif /* ifndef CONFIG_ARCH_OMAP1 */
@@ -1885,7 +1782,7 @@ static int omap1_dma_handle_ch(int ch)
1885 csr = dma_chan[ch].saved_csr; 1782 csr = dma_chan[ch].saved_csr;
1886 dma_chan[ch].saved_csr = 0; 1783 dma_chan[ch].saved_csr = 0;
1887 } else 1784 } else
1888 csr = dma_read(CSR(ch)); 1785 csr = p->dma_read(CSR, ch);
1889 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { 1786 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1890 dma_chan[ch + 6].saved_csr = csr >> 7; 1787 dma_chan[ch + 6].saved_csr = csr >> 7;
1891 csr &= 0x7f; 1788 csr &= 0x7f;
@@ -1938,13 +1835,13 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1938 1835
1939static int omap2_dma_handle_ch(int ch) 1836static int omap2_dma_handle_ch(int ch)
1940{ 1837{
1941 u32 status = dma_read(CSR(ch)); 1838 u32 status = p->dma_read(CSR, ch);
1942 1839
1943 if (!status) { 1840 if (!status) {
1944 if (printk_ratelimit()) 1841 if (printk_ratelimit())
1945 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", 1842 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1946 ch); 1843 ch);
1947 dma_write(1 << ch, IRQSTATUS_L0); 1844 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1948 return 0; 1845 return 0;
1949 } 1846 }
1950 if (unlikely(dma_chan[ch].dev_id == -1)) { 1847 if (unlikely(dma_chan[ch].dev_id == -1)) {
@@ -1960,17 +1857,12 @@ static int omap2_dma_handle_ch(int ch)
1960 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) { 1857 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1961 printk(KERN_INFO "DMA transaction error with device %d\n", 1858 printk(KERN_INFO "DMA transaction error with device %d\n",
1962 dma_chan[ch].dev_id); 1859 dma_chan[ch].dev_id);
1963 if (cpu_class_is_omap2()) { 1860 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1964 /*
1965 * Errata: sDMA Channel is not disabled
1966 * after a transaction error. So we explicitely
1967 * disable the channel
1968 */
1969 u32 ccr; 1861 u32 ccr;
1970 1862
1971 ccr = dma_read(CCR(ch)); 1863 ccr = p->dma_read(CCR, ch);
1972 ccr &= ~OMAP_DMA_CCR_EN; 1864 ccr &= ~OMAP_DMA_CCR_EN;
1973 dma_write(ccr, CCR(ch)); 1865 p->dma_write(ccr, CCR, ch);
1974 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; 1866 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1975 } 1867 }
1976 } 1868 }
@@ -1981,16 +1873,16 @@ static int omap2_dma_handle_ch(int ch)
1981 printk(KERN_INFO "DMA misaligned error with device %d\n", 1873 printk(KERN_INFO "DMA misaligned error with device %d\n",
1982 dma_chan[ch].dev_id); 1874 dma_chan[ch].dev_id);
1983 1875
1984 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); 1876 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
1985 dma_write(1 << ch, IRQSTATUS_L0); 1877 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1986 /* read back the register to flush the write */ 1878 /* read back the register to flush the write */
1987 dma_read(IRQSTATUS_L0); 1879 p->dma_read(IRQSTATUS_L0, ch);
1988 1880
1989 /* If the ch is not chained then chain_id will be -1 */ 1881 /* If the ch is not chained then chain_id will be -1 */
1990 if (dma_chan[ch].chain_id != -1) { 1882 if (dma_chan[ch].chain_id != -1) {
1991 int chain_id = dma_chan[ch].chain_id; 1883 int chain_id = dma_chan[ch].chain_id;
1992 dma_chan[ch].state = DMA_CH_NOTSTARTED; 1884 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1993 if (dma_read(CLNK_CTRL(ch)) & (1 << 15)) 1885 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1994 dma_chan[dma_chan[ch].next_linked_ch].state = 1886 dma_chan[dma_chan[ch].next_linked_ch].state =
1995 DMA_CH_STARTED; 1887 DMA_CH_STARTED;
1996 if (dma_linked_lch[chain_id].chain_mode == 1888 if (dma_linked_lch[chain_id].chain_mode ==
@@ -2000,10 +1892,10 @@ static int omap2_dma_handle_ch(int ch)
2000 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id)) 1892 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2001 OMAP_DMA_CHAIN_INCQHEAD(chain_id); 1893 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2002 1894
2003 status = dma_read(CSR(ch)); 1895 status = p->dma_read(CSR, ch);
2004 } 1896 }
2005 1897
2006 dma_write(status, CSR(ch)); 1898 p->dma_write(status, CSR, ch);
2007 1899
2008 if (likely(dma_chan[ch].callback != NULL)) 1900 if (likely(dma_chan[ch].callback != NULL))
2009 dma_chan[ch].callback(ch, status, dma_chan[ch].data); 1901 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
@@ -2017,13 +1909,13 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2017 u32 val, enable_reg; 1909 u32 val, enable_reg;
2018 int i; 1910 int i;
2019 1911
2020 val = dma_read(IRQSTATUS_L0); 1912 val = p->dma_read(IRQSTATUS_L0, 0);
2021 if (val == 0) { 1913 if (val == 0) {
2022 if (printk_ratelimit()) 1914 if (printk_ratelimit())
2023 printk(KERN_WARNING "Spurious DMA IRQ\n"); 1915 printk(KERN_WARNING "Spurious DMA IRQ\n");
2024 return IRQ_HANDLED; 1916 return IRQ_HANDLED;
2025 } 1917 }
2026 enable_reg = dma_read(IRQENABLE_L0); 1918 enable_reg = p->dma_read(IRQENABLE_L0, 0);
2027 val &= enable_reg; /* Dispatch only relevant interrupts */ 1919 val &= enable_reg; /* Dispatch only relevant interrupts */
2028 for (i = 0; i < dma_lch_count && val != 0; i++) { 1920 for (i = 0; i < dma_lch_count && val != 0; i++) {
2029 if (val & 1) 1921 if (val & 1)
@@ -2049,119 +1941,66 @@ static struct irqaction omap24xx_dma_irq;
2049void omap_dma_global_context_save(void) 1941void omap_dma_global_context_save(void)
2050{ 1942{
2051 omap_dma_global_context.dma_irqenable_l0 = 1943 omap_dma_global_context.dma_irqenable_l0 =
2052 dma_read(IRQENABLE_L0); 1944 p->dma_read(IRQENABLE_L0, 0);
2053 omap_dma_global_context.dma_ocp_sysconfig = 1945 omap_dma_global_context.dma_ocp_sysconfig =
2054 dma_read(OCP_SYSCONFIG); 1946 p->dma_read(OCP_SYSCONFIG, 0);
2055 omap_dma_global_context.dma_gcr = dma_read(GCR); 1947 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
2056} 1948}
2057 1949
2058void omap_dma_global_context_restore(void) 1950void omap_dma_global_context_restore(void)
2059{ 1951{
2060 int ch; 1952 int ch;
2061 1953
2062 dma_write(omap_dma_global_context.dma_gcr, GCR); 1954 p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2063 dma_write(omap_dma_global_context.dma_ocp_sysconfig, 1955 p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2064 OCP_SYSCONFIG); 1956 OCP_SYSCONFIG, 0);
2065 dma_write(omap_dma_global_context.dma_irqenable_l0, 1957 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
2066 IRQENABLE_L0); 1958 IRQENABLE_L0, 0);
2067 1959
2068 /* 1960 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2069 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared 1961 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2070 * after secure sram context save and restore. Hence we need to
2071 * manually clear those IRQs to avoid spurious interrupts. This
2072 * affects only secure devices.
2073 */
2074 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2075 dma_write(0x3 , IRQSTATUS_L0);
2076 1962
2077 for (ch = 0; ch < dma_chan_count; ch++) 1963 for (ch = 0; ch < dma_chan_count; ch++)
2078 if (dma_chan[ch].dev_id != -1) 1964 if (dma_chan[ch].dev_id != -1)
2079 omap_clear_dma(ch); 1965 omap_clear_dma(ch);
2080} 1966}
2081 1967
2082/*----------------------------------------------------------------------------*/ 1968static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2083
2084static int __init omap_init_dma(void)
2085{ 1969{
2086 unsigned long base; 1970 int ch, ret = 0;
2087 int ch, r; 1971 int dma_irq;
2088 1972 char irq_name[4];
2089 if (cpu_class_is_omap1()) { 1973 int irq_rel;
2090 base = OMAP1_DMA_BASE; 1974
2091 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; 1975 p = pdev->dev.platform_data;
2092 } else if (cpu_is_omap24xx()) { 1976 if (!p) {
2093 base = OMAP24XX_DMA4_BASE; 1977 dev_err(&pdev->dev, "%s: System DMA initialized without"
2094 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT; 1978 "platform data\n", __func__);
2095 } else if (cpu_is_omap34xx()) { 1979 return -EINVAL;
2096 base = OMAP34XX_DMA4_BASE;
2097 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2098 } else if (cpu_is_omap44xx()) {
2099 base = OMAP44XX_DMA4_BASE;
2100 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2101 } else {
2102 pr_err("DMA init failed for unsupported omap\n");
2103 return -ENODEV;
2104 } 1980 }
2105 1981
2106 omap_dma_base = ioremap(base, SZ_4K); 1982 d = p->dma_attr;
2107 BUG_ON(!omap_dma_base); 1983 errata = p->errata;
2108 1984
2109 if (cpu_class_is_omap2() && omap_dma_reserve_channels 1985 if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2110 && (omap_dma_reserve_channels <= dma_lch_count)) 1986 && (omap_dma_reserve_channels <= dma_lch_count))
2111 dma_lch_count = omap_dma_reserve_channels; 1987 d->lch_count = omap_dma_reserve_channels;
2112 1988
2113 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count, 1989 dma_lch_count = d->lch_count;
2114 GFP_KERNEL); 1990 dma_chan_count = dma_lch_count;
2115 if (!dma_chan) { 1991 dma_chan = d->chan;
2116 r = -ENOMEM; 1992 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2117 goto out_unmap;
2118 }
2119 1993
2120 if (cpu_class_is_omap2()) { 1994 if (cpu_class_is_omap2()) {
2121 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) * 1995 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2122 dma_lch_count, GFP_KERNEL); 1996 dma_lch_count, GFP_KERNEL);
2123 if (!dma_linked_lch) { 1997 if (!dma_linked_lch) {
2124 r = -ENOMEM; 1998 ret = -ENOMEM;
2125 goto out_free; 1999 goto exit_dma_lch_fail;
2126 } 2000 }
2127 } 2001 }
2128 2002
2129 if (cpu_is_omap15xx()) {
2130 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2131 dma_chan_count = 9;
2132 enable_1510_mode = 1;
2133 } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2134 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2135 dma_read(HW_ID));
2136 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2137 (dma_read(CAPS_0_U) << 16) |
2138 dma_read(CAPS_0_L),
2139 (dma_read(CAPS_1_U) << 16) |
2140 dma_read(CAPS_1_L),
2141 dma_read(CAPS_2), dma_read(CAPS_3),
2142 dma_read(CAPS_4));
2143 if (!enable_1510_mode) {
2144 u16 w;
2145
2146 /* Disable OMAP 3.0/3.1 compatibility mode. */
2147 w = dma_read(GSCR);
2148 w |= 1 << 3;
2149 dma_write(w, GSCR);
2150 dma_chan_count = 16;
2151 } else
2152 dma_chan_count = 9;
2153 } else if (cpu_class_is_omap2()) {
2154 u8 revision = dma_read(REVISION) & 0xff;
2155 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2156 revision >> 4, revision & 0xf);
2157 dma_chan_count = dma_lch_count;
2158 } else {
2159 dma_chan_count = 0;
2160 return 0;
2161 }
2162
2163 spin_lock_init(&dma_chan_lock); 2003 spin_lock_init(&dma_chan_lock);
2164
2165 for (ch = 0; ch < dma_chan_count; ch++) { 2004 for (ch = 0; ch < dma_chan_count; ch++) {
2166 omap_clear_dma(ch); 2005 omap_clear_dma(ch);
2167 if (cpu_class_is_omap2()) 2006 if (cpu_class_is_omap2())
@@ -2178,20 +2017,23 @@ static int __init omap_init_dma(void)
2178 * request_irq() doesn't like dev_id (ie. ch) being 2017 * request_irq() doesn't like dev_id (ie. ch) being
2179 * zero, so we have to kludge around this. 2018 * zero, so we have to kludge around this.
2180 */ 2019 */
2181 r = request_irq(omap1_dma_irq[ch], 2020 sprintf(&irq_name[0], "%d", ch);
2021 dma_irq = platform_get_irq_byname(pdev, irq_name);
2022
2023 if (dma_irq < 0) {
2024 ret = dma_irq;
2025 goto exit_dma_irq_fail;
2026 }
2027
2028 /* INT_DMA_LCD is handled in lcd_dma.c */
2029 if (dma_irq == INT_DMA_LCD)
2030 continue;
2031
2032 ret = request_irq(dma_irq,
2182 omap1_dma_irq_handler, 0, "DMA", 2033 omap1_dma_irq_handler, 0, "DMA",
2183 (void *) (ch + 1)); 2034 (void *) (ch + 1));
2184 if (r != 0) { 2035 if (ret != 0)
2185 int i; 2036 goto exit_dma_irq_fail;
2186
2187 printk(KERN_ERR "unable to request IRQ %d "
2188 "for DMA (error %d)\n",
2189 omap1_dma_irq[ch], r);
2190 for (i = 0; i < ch; i++)
2191 free_irq(omap1_dma_irq[i],
2192 (void *) (i + 1));
2193 goto out_free;
2194 }
2195 } 2037 }
2196 } 2038 }
2197 2039
@@ -2200,46 +2042,91 @@ static int __init omap_init_dma(void)
2200 DMA_DEFAULT_FIFO_DEPTH, 0); 2042 DMA_DEFAULT_FIFO_DEPTH, 0);
2201 2043
2202 if (cpu_class_is_omap2()) { 2044 if (cpu_class_is_omap2()) {
2203 int irq; 2045 strcpy(irq_name, "0");
2204 if (cpu_is_omap44xx()) 2046 dma_irq = platform_get_irq_byname(pdev, irq_name);
2205 irq = OMAP44XX_IRQ_SDMA_0; 2047 if (dma_irq < 0) {
2206 else 2048 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2207 irq = INT_24XX_SDMA_IRQ0; 2049 goto exit_dma_lch_fail;
2208 setup_irq(irq, &omap24xx_dma_irq); 2050 }
2209 } 2051 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2210 2052 if (ret) {
2211 if (cpu_is_omap34xx() || cpu_is_omap44xx()) { 2053 dev_err(&pdev->dev, "set_up failed for IRQ %d"
2212 /* Enable smartidle idlemodes and autoidle */ 2054 "for DMA (error %d)\n", dma_irq, ret);
2213 u32 v = dma_read(OCP_SYSCONFIG); 2055 goto exit_dma_lch_fail;
2214 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2215 DMA_SYSCONFIG_SIDLEMODE_MASK |
2216 DMA_SYSCONFIG_AUTOIDLE);
2217 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2218 DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2219 DMA_SYSCONFIG_AUTOIDLE);
2220 dma_write(v , OCP_SYSCONFIG);
2221 /* reserve dma channels 0 and 1 in high security devices */
2222 if (cpu_is_omap34xx() &&
2223 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2224 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2225 "HS ROM code\n");
2226 dma_chan[0].dev_id = 0;
2227 dma_chan[1].dev_id = 1;
2228 } 2056 }
2229 } 2057 }
2230 2058
2059 /* reserve dma channels 0 and 1 in high security devices */
2060 if (cpu_is_omap34xx() &&
2061 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2062 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2063 "HS ROM code\n");
2064 dma_chan[0].dev_id = 0;
2065 dma_chan[1].dev_id = 1;
2066 }
2067 p->show_dma_caps();
2231 return 0; 2068 return 0;
2232 2069
2233out_free: 2070exit_dma_irq_fail:
2071 dev_err(&pdev->dev, "unable to request IRQ %d"
2072 "for DMA (error %d)\n", dma_irq, ret);
2073 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2074 dma_irq = platform_get_irq(pdev, irq_rel);
2075 free_irq(dma_irq, (void *)(irq_rel + 1));
2076 }
2077
2078exit_dma_lch_fail:
2079 kfree(p);
2080 kfree(d);
2234 kfree(dma_chan); 2081 kfree(dma_chan);
2082 return ret;
2083}
2235 2084
2236out_unmap: 2085static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2237 iounmap(omap_dma_base); 2086{
2087 int dma_irq;
2238 2088
2239 return r; 2089 if (cpu_class_is_omap2()) {
2090 char irq_name[4];
2091 strcpy(irq_name, "0");
2092 dma_irq = platform_get_irq_byname(pdev, irq_name);
2093 remove_irq(dma_irq, &omap24xx_dma_irq);
2094 } else {
2095 int irq_rel = 0;
2096 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2097 dma_irq = platform_get_irq(pdev, irq_rel);
2098 free_irq(dma_irq, (void *)(irq_rel + 1));
2099 }
2100 }
2101 kfree(p);
2102 kfree(d);
2103 kfree(dma_chan);
2104 return 0;
2105}
2106
2107static struct platform_driver omap_system_dma_driver = {
2108 .probe = omap_system_dma_probe,
2109 .remove = omap_system_dma_remove,
2110 .driver = {
2111 .name = "omap_dma_system"
2112 },
2113};
2114
2115static int __init omap_system_dma_init(void)
2116{
2117 return platform_driver_register(&omap_system_dma_driver);
2118}
2119arch_initcall(omap_system_dma_init);
2120
2121static void __exit omap_system_dma_exit(void)
2122{
2123 platform_driver_unregister(&omap_system_dma_driver);
2240} 2124}
2241 2125
2242arch_initcall(omap_init_dma); 2126MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2127MODULE_LICENSE("GPL");
2128MODULE_ALIAS("platform:" DRIVER_NAME);
2129MODULE_AUTHOR("Texas Instruments Inc");
2243 2130
2244/* 2131/*
2245 * Reserve the omap SDMA channels using cmdline bootarg 2132 * Reserve the omap SDMA channels using cmdline bootarg