aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r--arch/arm/plat-omap/dma.c820
1 files changed, 270 insertions, 550 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 6f51bf37ec02..c4b2b478b1a5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -15,6 +15,10 @@
15 * 15 *
16 * Support functions for the OMAP internal DMA channels. 16 * Support functions for the OMAP internal DMA channels.
17 * 17 *
18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19 * Converted DMA library into DMA platform driver.
20 * - G, Manjunath Kondaiah <manjugk@ti.com>
21 *
18 * This program is free software; you can redistribute it and/or modify 22 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as 23 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation. 24 * published by the Free Software Foundation.
@@ -40,96 +44,6 @@
40 44
41#undef DEBUG 45#undef DEBUG
42 46
43static u16 reg_map_omap1[] = {
44 [GCR] = 0x400,
45 [GSCR] = 0x404,
46 [GRST1] = 0x408,
47 [HW_ID] = 0x442,
48 [PCH2_ID] = 0x444,
49 [PCH0_ID] = 0x446,
50 [PCH1_ID] = 0x448,
51 [PCHG_ID] = 0x44a,
52 [PCHD_ID] = 0x44c,
53 [CAPS_0] = 0x44e,
54 [CAPS_1] = 0x452,
55 [CAPS_2] = 0x456,
56 [CAPS_3] = 0x458,
57 [CAPS_4] = 0x45a,
58 [PCH2_SR] = 0x460,
59 [PCH0_SR] = 0x480,
60 [PCH1_SR] = 0x482,
61 [PCHD_SR] = 0x4c0,
62
63 /* Common Registers */
64 [CSDP] = 0x00,
65 [CCR] = 0x02,
66 [CICR] = 0x04,
67 [CSR] = 0x06,
68 [CEN] = 0x10,
69 [CFN] = 0x12,
70 [CSFI] = 0x14,
71 [CSEI] = 0x16,
72 [CPC] = 0x18, /* 15xx only */
73 [CSAC] = 0x18,
74 [CDAC] = 0x1a,
75 [CDEI] = 0x1c,
76 [CDFI] = 0x1e,
77 [CLNK_CTRL] = 0x28,
78
79 /* Channel specific register offsets */
80 [CSSA] = 0x08,
81 [CDSA] = 0x0c,
82 [COLOR] = 0x20,
83 [CCR2] = 0x24,
84 [LCH_CTRL] = 0x2a,
85};
86
87static u16 reg_map_omap2[] = {
88 [REVISION] = 0x00,
89 [GCR] = 0x78,
90 [IRQSTATUS_L0] = 0x08,
91 [IRQSTATUS_L1] = 0x0c,
92 [IRQSTATUS_L2] = 0x10,
93 [IRQSTATUS_L3] = 0x14,
94 [IRQENABLE_L0] = 0x18,
95 [IRQENABLE_L1] = 0x1c,
96 [IRQENABLE_L2] = 0x20,
97 [IRQENABLE_L3] = 0x24,
98 [SYSSTATUS] = 0x28,
99 [OCP_SYSCONFIG] = 0x2c,
100 [CAPS_0] = 0x64,
101 [CAPS_2] = 0x6c,
102 [CAPS_3] = 0x70,
103 [CAPS_4] = 0x74,
104
105 /* Common register offsets */
106 [CCR] = 0x80,
107 [CLNK_CTRL] = 0x84,
108 [CICR] = 0x88,
109 [CSR] = 0x8c,
110 [CSDP] = 0x90,
111 [CEN] = 0x94,
112 [CFN] = 0x98,
113 [CSEI] = 0xa4,
114 [CSFI] = 0xa8,
115 [CDEI] = 0xac,
116 [CDFI] = 0xb0,
117 [CSAC] = 0xb4,
118 [CDAC] = 0xb8,
119
120 /* Channel specific register offsets */
121 [CSSA] = 0x9c,
122 [CDSA] = 0xa0,
123 [CCEN] = 0xbc,
124 [CCFN] = 0xc0,
125 [COLOR] = 0xc4,
126
127 /* OMAP4 specific registers */
128 [CDP] = 0xd0,
129 [CNDP] = 0xd4,
130 [CCDN] = 0xd8,
131};
132
133#ifndef CONFIG_ARCH_OMAP1 47#ifndef CONFIG_ARCH_OMAP1
134enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED, 48enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
135 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED 49 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
@@ -143,6 +57,9 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
143 57
144#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) 58#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
145 59
60static struct omap_system_dma_plat_info *p;
61static struct omap_dma_dev_attr *d;
62
146static int enable_1510_mode; 63static int enable_1510_mode;
147static u32 errata; 64static u32 errata;
148 65
@@ -152,27 +69,6 @@ static struct omap_dma_global_context_registers {
152 u32 dma_gcr; 69 u32 dma_gcr;
153} omap_dma_global_context; 70} omap_dma_global_context;
154 71
155struct omap_dma_lch {
156 int next_lch;
157 int dev_id;
158 u16 saved_csr;
159 u16 enabled_irqs;
160 const char *dev_name;
161 void (*callback)(int lch, u16 ch_status, void *data);
162 void *data;
163
164#ifndef CONFIG_ARCH_OMAP1
165 /* required for Dynamic chaining */
166 int prev_linked_ch;
167 int next_linked_ch;
168 int state;
169 int chain_id;
170
171 int status;
172#endif
173 long flags;
174};
175
176struct dma_link_info { 72struct dma_link_info {
177 int *linked_dmach_q; 73 int *linked_dmach_q;
178 int no_of_lchs_linked; 74 int no_of_lchs_linked;
@@ -228,18 +124,6 @@ static int omap_dma_reserve_channels;
228 124
229static spinlock_t dma_chan_lock; 125static spinlock_t dma_chan_lock;
230static struct omap_dma_lch *dma_chan; 126static struct omap_dma_lch *dma_chan;
231static void __iomem *omap_dma_base;
232static u16 *reg_map;
233static u8 dma_stride;
234static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
235
236static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
237 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
238 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
239 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
240 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
241 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
242};
243 127
244static inline void disable_lnk(int lch); 128static inline void disable_lnk(int lch);
245static void omap_disable_channel_irq(int lch); 129static void omap_disable_channel_irq(int lch);
@@ -248,52 +132,9 @@ static inline void omap_enable_channel_irq(int lch);
248#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ 132#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
249 __func__); 133 __func__);
250 134
251static inline void dma_write(u32 val, int reg, int lch)
252{
253 u8 stride;
254 u32 offset;
255
256 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
257 offset = reg_map[reg] + (stride * lch);
258
259 if (dma_stride == 0x40) {
260 __raw_writew(val, omap_dma_base + offset);
261 if ((reg > CLNK_CTRL && reg < CCEN) ||
262 (reg > PCHD_ID && reg < CAPS_2)) {
263 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
264 __raw_writew(val >> 16, omap_dma_base + offset2);
265 }
266 } else {
267 __raw_writel(val, omap_dma_base + offset);
268 }
269}
270
271static inline u32 dma_read(int reg, int lch)
272{
273 u8 stride;
274 u32 offset, val;
275
276 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
277 offset = reg_map[reg] + (stride * lch);
278
279 if (dma_stride == 0x40) {
280 val = __raw_readw(omap_dma_base + offset);
281 if ((reg > CLNK_CTRL && reg < CCEN) ||
282 (reg > PCHD_ID && reg < CAPS_2)) {
283 u16 upper;
284 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
285 upper = __raw_readw(omap_dma_base + offset2);
286 val |= (upper << 16);
287 }
288 } else {
289 val = __raw_readl(omap_dma_base + offset);
290 }
291 return val;
292}
293
294#ifdef CONFIG_ARCH_OMAP15XX 135#ifdef CONFIG_ARCH_OMAP15XX
295/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ 136/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
296static int omap_dma_in_1510_mode(void) 137int omap_dma_in_1510_mode(void)
297{ 138{
298 return enable_1510_mode; 139 return enable_1510_mode;
299} 140}
@@ -325,15 +166,6 @@ static inline void set_gdma_dev(int req, int dev)
325#define set_gdma_dev(req, dev) do {} while (0) 166#define set_gdma_dev(req, dev) do {} while (0)
326#endif 167#endif
327 168
328/* Omap1 only */
329static void clear_lch_regs(int lch)
330{
331 int i = dma_common_ch_start;
332
333 for (; i <= dma_common_ch_end; i += 1)
334 dma_write(0, i, lch);
335}
336
337void omap_set_dma_priority(int lch, int dst_port, int priority) 169void omap_set_dma_priority(int lch, int dst_port, int priority)
338{ 170{
339 unsigned long reg; 171 unsigned long reg;
@@ -366,12 +198,12 @@ void omap_set_dma_priority(int lch, int dst_port, int priority)
366 if (cpu_class_is_omap2()) { 198 if (cpu_class_is_omap2()) {
367 u32 ccr; 199 u32 ccr;
368 200
369 ccr = dma_read(CCR, lch); 201 ccr = p->dma_read(CCR, lch);
370 if (priority) 202 if (priority)
371 ccr |= (1 << 6); 203 ccr |= (1 << 6);
372 else 204 else
373 ccr &= ~(1 << 6); 205 ccr &= ~(1 << 6);
374 dma_write(ccr, CCR, lch); 206 p->dma_write(ccr, CCR, lch);
375 } 207 }
376} 208}
377EXPORT_SYMBOL(omap_set_dma_priority); 209EXPORT_SYMBOL(omap_set_dma_priority);
@@ -382,31 +214,31 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
382{ 214{
383 u32 l; 215 u32 l;
384 216
385 l = dma_read(CSDP, lch); 217 l = p->dma_read(CSDP, lch);
386 l &= ~0x03; 218 l &= ~0x03;
387 l |= data_type; 219 l |= data_type;
388 dma_write(l, CSDP, lch); 220 p->dma_write(l, CSDP, lch);
389 221
390 if (cpu_class_is_omap1()) { 222 if (cpu_class_is_omap1()) {
391 u16 ccr; 223 u16 ccr;
392 224
393 ccr = dma_read(CCR, lch); 225 ccr = p->dma_read(CCR, lch);
394 ccr &= ~(1 << 5); 226 ccr &= ~(1 << 5);
395 if (sync_mode == OMAP_DMA_SYNC_FRAME) 227 if (sync_mode == OMAP_DMA_SYNC_FRAME)
396 ccr |= 1 << 5; 228 ccr |= 1 << 5;
397 dma_write(ccr, CCR, lch); 229 p->dma_write(ccr, CCR, lch);
398 230
399 ccr = dma_read(CCR2, lch); 231 ccr = p->dma_read(CCR2, lch);
400 ccr &= ~(1 << 2); 232 ccr &= ~(1 << 2);
401 if (sync_mode == OMAP_DMA_SYNC_BLOCK) 233 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
402 ccr |= 1 << 2; 234 ccr |= 1 << 2;
403 dma_write(ccr, CCR2, lch); 235 p->dma_write(ccr, CCR2, lch);
404 } 236 }
405 237
406 if (cpu_class_is_omap2() && dma_trigger) { 238 if (cpu_class_is_omap2() && dma_trigger) {
407 u32 val; 239 u32 val;
408 240
409 val = dma_read(CCR, lch); 241 val = p->dma_read(CCR, lch);
410 242
411 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ 243 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
412 val &= ~((1 << 23) | (3 << 19) | 0x1f); 244 val &= ~((1 << 23) | (3 << 19) | 0x1f);
@@ -431,11 +263,11 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
431 } else { 263 } else {
432 val &= ~(1 << 24); /* dest synch */ 264 val &= ~(1 << 24); /* dest synch */
433 } 265 }
434 dma_write(val, CCR, lch); 266 p->dma_write(val, CCR, lch);
435 } 267 }
436 268
437 dma_write(elem_count, CEN, lch); 269 p->dma_write(elem_count, CEN, lch);
438 dma_write(frame_count, CFN, lch); 270 p->dma_write(frame_count, CFN, lch);
439} 271}
440EXPORT_SYMBOL(omap_set_dma_transfer_params); 272EXPORT_SYMBOL(omap_set_dma_transfer_params);
441 273
@@ -446,7 +278,7 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
446 if (cpu_class_is_omap1()) { 278 if (cpu_class_is_omap1()) {
447 u16 w; 279 u16 w;
448 280
449 w = dma_read(CCR2, lch); 281 w = p->dma_read(CCR2, lch);
450 w &= ~0x03; 282 w &= ~0x03;
451 283
452 switch (mode) { 284 switch (mode) {
@@ -461,22 +293,22 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
461 default: 293 default:
462 BUG(); 294 BUG();
463 } 295 }
464 dma_write(w, CCR2, lch); 296 p->dma_write(w, CCR2, lch);
465 297
466 w = dma_read(LCH_CTRL, lch); 298 w = p->dma_read(LCH_CTRL, lch);
467 w &= ~0x0f; 299 w &= ~0x0f;
468 /* Default is channel type 2D */ 300 /* Default is channel type 2D */
469 if (mode) { 301 if (mode) {
470 dma_write(color, COLOR, lch); 302 p->dma_write(color, COLOR, lch);
471 w |= 1; /* Channel type G */ 303 w |= 1; /* Channel type G */
472 } 304 }
473 dma_write(w, LCH_CTRL, lch); 305 p->dma_write(w, LCH_CTRL, lch);
474 } 306 }
475 307
476 if (cpu_class_is_omap2()) { 308 if (cpu_class_is_omap2()) {
477 u32 val; 309 u32 val;
478 310
479 val = dma_read(CCR, lch); 311 val = p->dma_read(CCR, lch);
480 val &= ~((1 << 17) | (1 << 16)); 312 val &= ~((1 << 17) | (1 << 16));
481 313
482 switch (mode) { 314 switch (mode) {
@@ -491,10 +323,10 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
491 default: 323 default:
492 BUG(); 324 BUG();
493 } 325 }
494 dma_write(val, CCR, lch); 326 p->dma_write(val, CCR, lch);
495 327
496 color &= 0xffffff; 328 color &= 0xffffff;
497 dma_write(color, COLOR, lch); 329 p->dma_write(color, COLOR, lch);
498 } 330 }
499} 331}
500EXPORT_SYMBOL(omap_set_dma_color_mode); 332EXPORT_SYMBOL(omap_set_dma_color_mode);
@@ -504,10 +336,10 @@ void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
504 if (cpu_class_is_omap2()) { 336 if (cpu_class_is_omap2()) {
505 u32 csdp; 337 u32 csdp;
506 338
507 csdp = dma_read(CSDP, lch); 339 csdp = p->dma_read(CSDP, lch);
508 csdp &= ~(0x3 << 16); 340 csdp &= ~(0x3 << 16);
509 csdp |= (mode << 16); 341 csdp |= (mode << 16);
510 dma_write(csdp, CSDP, lch); 342 p->dma_write(csdp, CSDP, lch);
511 } 343 }
512} 344}
513EXPORT_SYMBOL(omap_set_dma_write_mode); 345EXPORT_SYMBOL(omap_set_dma_write_mode);
@@ -517,10 +349,10 @@ void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
517 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { 349 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
518 u32 l; 350 u32 l;
519 351
520 l = dma_read(LCH_CTRL, lch); 352 l = p->dma_read(LCH_CTRL, lch);
521 l &= ~0x7; 353 l &= ~0x7;
522 l |= mode; 354 l |= mode;
523 dma_write(l, LCH_CTRL, lch); 355 p->dma_write(l, LCH_CTRL, lch);
524 } 356 }
525} 357}
526EXPORT_SYMBOL(omap_set_dma_channel_mode); 358EXPORT_SYMBOL(omap_set_dma_channel_mode);
@@ -535,21 +367,21 @@ void omap_set_dma_src_params(int lch, int src_port, int src_amode,
535 if (cpu_class_is_omap1()) { 367 if (cpu_class_is_omap1()) {
536 u16 w; 368 u16 w;
537 369
538 w = dma_read(CSDP, lch); 370 w = p->dma_read(CSDP, lch);
539 w &= ~(0x1f << 2); 371 w &= ~(0x1f << 2);
540 w |= src_port << 2; 372 w |= src_port << 2;
541 dma_write(w, CSDP, lch); 373 p->dma_write(w, CSDP, lch);
542 } 374 }
543 375
544 l = dma_read(CCR, lch); 376 l = p->dma_read(CCR, lch);
545 l &= ~(0x03 << 12); 377 l &= ~(0x03 << 12);
546 l |= src_amode << 12; 378 l |= src_amode << 12;
547 dma_write(l, CCR, lch); 379 p->dma_write(l, CCR, lch);
548 380
549 dma_write(src_start, CSSA, lch); 381 p->dma_write(src_start, CSSA, lch);
550 382
551 dma_write(src_ei, CSEI, lch); 383 p->dma_write(src_ei, CSEI, lch);
552 dma_write(src_fi, CSFI, lch); 384 p->dma_write(src_fi, CSFI, lch);
553} 385}
554EXPORT_SYMBOL(omap_set_dma_src_params); 386EXPORT_SYMBOL(omap_set_dma_src_params);
555 387
@@ -577,8 +409,8 @@ void omap_set_dma_src_index(int lch, int eidx, int fidx)
577 if (cpu_class_is_omap2()) 409 if (cpu_class_is_omap2())
578 return; 410 return;
579 411
580 dma_write(eidx, CSEI, lch); 412 p->dma_write(eidx, CSEI, lch);
581 dma_write(fidx, CSFI, lch); 413 p->dma_write(fidx, CSFI, lch);
582} 414}
583EXPORT_SYMBOL(omap_set_dma_src_index); 415EXPORT_SYMBOL(omap_set_dma_src_index);
584 416
@@ -586,11 +418,11 @@ void omap_set_dma_src_data_pack(int lch, int enable)
586{ 418{
587 u32 l; 419 u32 l;
588 420
589 l = dma_read(CSDP, lch); 421 l = p->dma_read(CSDP, lch);
590 l &= ~(1 << 6); 422 l &= ~(1 << 6);
591 if (enable) 423 if (enable)
592 l |= (1 << 6); 424 l |= (1 << 6);
593 dma_write(l, CSDP, lch); 425 p->dma_write(l, CSDP, lch);
594} 426}
595EXPORT_SYMBOL(omap_set_dma_src_data_pack); 427EXPORT_SYMBOL(omap_set_dma_src_data_pack);
596 428
@@ -599,7 +431,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
599 unsigned int burst = 0; 431 unsigned int burst = 0;
600 u32 l; 432 u32 l;
601 433
602 l = dma_read(CSDP, lch); 434 l = p->dma_read(CSDP, lch);
603 l &= ~(0x03 << 7); 435 l &= ~(0x03 << 7);
604 436
605 switch (burst_mode) { 437 switch (burst_mode) {
@@ -635,7 +467,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
635 } 467 }
636 468
637 l |= (burst << 7); 469 l |= (burst << 7);
638 dma_write(l, CSDP, lch); 470 p->dma_write(l, CSDP, lch);
639} 471}
640EXPORT_SYMBOL(omap_set_dma_src_burst_mode); 472EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
641 473
@@ -647,21 +479,21 @@ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
647 u32 l; 479 u32 l;
648 480
649 if (cpu_class_is_omap1()) { 481 if (cpu_class_is_omap1()) {
650 l = dma_read(CSDP, lch); 482 l = p->dma_read(CSDP, lch);
651 l &= ~(0x1f << 9); 483 l &= ~(0x1f << 9);
652 l |= dest_port << 9; 484 l |= dest_port << 9;
653 dma_write(l, CSDP, lch); 485 p->dma_write(l, CSDP, lch);
654 } 486 }
655 487
656 l = dma_read(CCR, lch); 488 l = p->dma_read(CCR, lch);
657 l &= ~(0x03 << 14); 489 l &= ~(0x03 << 14);
658 l |= dest_amode << 14; 490 l |= dest_amode << 14;
659 dma_write(l, CCR, lch); 491 p->dma_write(l, CCR, lch);
660 492
661 dma_write(dest_start, CDSA, lch); 493 p->dma_write(dest_start, CDSA, lch);
662 494
663 dma_write(dst_ei, CDEI, lch); 495 p->dma_write(dst_ei, CDEI, lch);
664 dma_write(dst_fi, CDFI, lch); 496 p->dma_write(dst_fi, CDFI, lch);
665} 497}
666EXPORT_SYMBOL(omap_set_dma_dest_params); 498EXPORT_SYMBOL(omap_set_dma_dest_params);
667 499
@@ -670,8 +502,8 @@ void omap_set_dma_dest_index(int lch, int eidx, int fidx)
670 if (cpu_class_is_omap2()) 502 if (cpu_class_is_omap2())
671 return; 503 return;
672 504
673 dma_write(eidx, CDEI, lch); 505 p->dma_write(eidx, CDEI, lch);
674 dma_write(fidx, CDFI, lch); 506 p->dma_write(fidx, CDFI, lch);
675} 507}
676EXPORT_SYMBOL(omap_set_dma_dest_index); 508EXPORT_SYMBOL(omap_set_dma_dest_index);
677 509
@@ -679,11 +511,11 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
679{ 511{
680 u32 l; 512 u32 l;
681 513
682 l = dma_read(CSDP, lch); 514 l = p->dma_read(CSDP, lch);
683 l &= ~(1 << 13); 515 l &= ~(1 << 13);
684 if (enable) 516 if (enable)
685 l |= 1 << 13; 517 l |= 1 << 13;
686 dma_write(l, CSDP, lch); 518 p->dma_write(l, CSDP, lch);
687} 519}
688EXPORT_SYMBOL(omap_set_dma_dest_data_pack); 520EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
689 521
@@ -692,7 +524,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
692 unsigned int burst = 0; 524 unsigned int burst = 0;
693 u32 l; 525 u32 l;
694 526
695 l = dma_read(CSDP, lch); 527 l = p->dma_read(CSDP, lch);
696 l &= ~(0x03 << 14); 528 l &= ~(0x03 << 14);
697 529
698 switch (burst_mode) { 530 switch (burst_mode) {
@@ -725,7 +557,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
725 return; 557 return;
726 } 558 }
727 l |= (burst << 14); 559 l |= (burst << 14);
728 dma_write(l, CSDP, lch); 560 p->dma_write(l, CSDP, lch);
729} 561}
730EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); 562EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
731 563
@@ -735,18 +567,18 @@ static inline void omap_enable_channel_irq(int lch)
735 567
736 /* Clear CSR */ 568 /* Clear CSR */
737 if (cpu_class_is_omap1()) 569 if (cpu_class_is_omap1())
738 status = dma_read(CSR, lch); 570 status = p->dma_read(CSR, lch);
739 else if (cpu_class_is_omap2()) 571 else if (cpu_class_is_omap2())
740 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); 572 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
741 573
742 /* Enable some nice interrupts. */ 574 /* Enable some nice interrupts. */
743 dma_write(dma_chan[lch].enabled_irqs, CICR, lch); 575 p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
744} 576}
745 577
746static void omap_disable_channel_irq(int lch) 578static void omap_disable_channel_irq(int lch)
747{ 579{
748 if (cpu_class_is_omap2()) 580 if (cpu_class_is_omap2())
749 dma_write(0, CICR, lch); 581 p->dma_write(0, CICR, lch);
750} 582}
751 583
752void omap_enable_dma_irq(int lch, u16 bits) 584void omap_enable_dma_irq(int lch, u16 bits)
@@ -765,7 +597,7 @@ static inline void enable_lnk(int lch)
765{ 597{
766 u32 l; 598 u32 l;
767 599
768 l = dma_read(CLNK_CTRL, lch); 600 l = p->dma_read(CLNK_CTRL, lch);
769 601
770 if (cpu_class_is_omap1()) 602 if (cpu_class_is_omap1())
771 l &= ~(1 << 14); 603 l &= ~(1 << 14);
@@ -780,18 +612,18 @@ static inline void enable_lnk(int lch)
780 l = dma_chan[lch].next_linked_ch | (1 << 15); 612 l = dma_chan[lch].next_linked_ch | (1 << 15);
781#endif 613#endif
782 614
783 dma_write(l, CLNK_CTRL, lch); 615 p->dma_write(l, CLNK_CTRL, lch);
784} 616}
785 617
786static inline void disable_lnk(int lch) 618static inline void disable_lnk(int lch)
787{ 619{
788 u32 l; 620 u32 l;
789 621
790 l = dma_read(CLNK_CTRL, lch); 622 l = p->dma_read(CLNK_CTRL, lch);
791 623
792 /* Disable interrupts */ 624 /* Disable interrupts */
793 if (cpu_class_is_omap1()) { 625 if (cpu_class_is_omap1()) {
794 dma_write(0, CICR, lch); 626 p->dma_write(0, CICR, lch);
795 /* Set the STOP_LNK bit */ 627 /* Set the STOP_LNK bit */
796 l |= 1 << 14; 628 l |= 1 << 14;
797 } 629 }
@@ -802,7 +634,7 @@ static inline void disable_lnk(int lch)
802 l &= ~(1 << 15); 634 l &= ~(1 << 15);
803 } 635 }
804 636
805 dma_write(l, CLNK_CTRL, lch); 637 p->dma_write(l, CLNK_CTRL, lch);
806 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; 638 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
807} 639}
808 640
@@ -815,9 +647,9 @@ static inline void omap2_enable_irq_lch(int lch)
815 return; 647 return;
816 648
817 spin_lock_irqsave(&dma_chan_lock, flags); 649 spin_lock_irqsave(&dma_chan_lock, flags);
818 val = dma_read(IRQENABLE_L0, lch); 650 val = p->dma_read(IRQENABLE_L0, lch);
819 val |= 1 << lch; 651 val |= 1 << lch;
820 dma_write(val, IRQENABLE_L0, lch); 652 p->dma_write(val, IRQENABLE_L0, lch);
821 spin_unlock_irqrestore(&dma_chan_lock, flags); 653 spin_unlock_irqrestore(&dma_chan_lock, flags);
822} 654}
823 655
@@ -830,9 +662,9 @@ static inline void omap2_disable_irq_lch(int lch)
830 return; 662 return;
831 663
832 spin_lock_irqsave(&dma_chan_lock, flags); 664 spin_lock_irqsave(&dma_chan_lock, flags);
833 val = dma_read(IRQENABLE_L0, lch); 665 val = p->dma_read(IRQENABLE_L0, lch);
834 val &= ~(1 << lch); 666 val &= ~(1 << lch);
835 dma_write(val, IRQENABLE_L0, lch); 667 p->dma_write(val, IRQENABLE_L0, lch);
836 spin_unlock_irqrestore(&dma_chan_lock, flags); 668 spin_unlock_irqrestore(&dma_chan_lock, flags);
837} 669}
838 670
@@ -859,8 +691,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
859 chan = dma_chan + free_ch; 691 chan = dma_chan + free_ch;
860 chan->dev_id = dev_id; 692 chan->dev_id = dev_id;
861 693
862 if (cpu_class_is_omap1()) 694 if (p->clear_lch_regs)
863 clear_lch_regs(free_ch); 695 p->clear_lch_regs(free_ch);
864 696
865 if (cpu_class_is_omap2()) 697 if (cpu_class_is_omap2())
866 omap_clear_dma(free_ch); 698 omap_clear_dma(free_ch);
@@ -897,17 +729,17 @@ int omap_request_dma(int dev_id, const char *dev_name,
897 * Disable the 1510 compatibility mode and set the sync device 729 * Disable the 1510 compatibility mode and set the sync device
898 * id. 730 * id.
899 */ 731 */
900 dma_write(dev_id | (1 << 10), CCR, free_ch); 732 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
901 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { 733 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
902 dma_write(dev_id, CCR, free_ch); 734 p->dma_write(dev_id, CCR, free_ch);
903 } 735 }
904 736
905 if (cpu_class_is_omap2()) { 737 if (cpu_class_is_omap2()) {
906 omap2_enable_irq_lch(free_ch); 738 omap2_enable_irq_lch(free_ch);
907 omap_enable_channel_irq(free_ch); 739 omap_enable_channel_irq(free_ch);
908 /* Clear the CSR register and IRQ status register */ 740 /* Clear the CSR register and IRQ status register */
909 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch); 741 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
910 dma_write(1 << free_ch, IRQSTATUS_L0, 0); 742 p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
911 } 743 }
912 744
913 *dma_ch_out = free_ch; 745 *dma_ch_out = free_ch;
@@ -928,23 +760,23 @@ void omap_free_dma(int lch)
928 760
929 if (cpu_class_is_omap1()) { 761 if (cpu_class_is_omap1()) {
930 /* Disable all DMA interrupts for the channel. */ 762 /* Disable all DMA interrupts for the channel. */
931 dma_write(0, CICR, lch); 763 p->dma_write(0, CICR, lch);
932 /* Make sure the DMA transfer is stopped. */ 764 /* Make sure the DMA transfer is stopped. */
933 dma_write(0, CCR, lch); 765 p->dma_write(0, CCR, lch);
934 } 766 }
935 767
936 if (cpu_class_is_omap2()) { 768 if (cpu_class_is_omap2()) {
937 omap2_disable_irq_lch(lch); 769 omap2_disable_irq_lch(lch);
938 770
939 /* Clear the CSR register and IRQ status register */ 771 /* Clear the CSR register and IRQ status register */
940 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); 772 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
941 dma_write(1 << lch, IRQSTATUS_L0, lch); 773 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
942 774
943 /* Disable all DMA interrupts for the channel. */ 775 /* Disable all DMA interrupts for the channel. */
944 dma_write(0, CICR, lch); 776 p->dma_write(0, CICR, lch);
945 777
946 /* Make sure the DMA transfer is stopped. */ 778 /* Make sure the DMA transfer is stopped. */
947 dma_write(0, CCR, lch); 779 p->dma_write(0, CCR, lch);
948 omap_clear_dma(lch); 780 omap_clear_dma(lch);
949 } 781 }
950 782
@@ -985,7 +817,7 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
985 reg |= (0x3 & tparams) << 12; 817 reg |= (0x3 & tparams) << 12;
986 reg |= (arb_rate & 0xff) << 16; 818 reg |= (arb_rate & 0xff) << 16;
987 819
988 dma_write(reg, GCR, 0); 820 p->dma_write(reg, GCR, 0);
989} 821}
990EXPORT_SYMBOL(omap_dma_set_global_params); 822EXPORT_SYMBOL(omap_dma_set_global_params);
991 823
@@ -1008,14 +840,14 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
1008 printk(KERN_ERR "Invalid channel id\n"); 840 printk(KERN_ERR "Invalid channel id\n");
1009 return -EINVAL; 841 return -EINVAL;
1010 } 842 }
1011 l = dma_read(CCR, lch); 843 l = p->dma_read(CCR, lch);
1012 l &= ~((1 << 6) | (1 << 26)); 844 l &= ~((1 << 6) | (1 << 26));
1013 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 845 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
1014 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); 846 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
1015 else 847 else
1016 l |= ((read_prio & 0x1) << 6); 848 l |= ((read_prio & 0x1) << 6);
1017 849
1018 dma_write(l, CCR, lch); 850 p->dma_write(l, CCR, lch);
1019 851
1020 return 0; 852 return 0;
1021} 853}
@@ -1030,24 +862,7 @@ void omap_clear_dma(int lch)
1030 unsigned long flags; 862 unsigned long flags;
1031 863
1032 local_irq_save(flags); 864 local_irq_save(flags);
1033 865 p->clear_dma(lch);
1034 if (cpu_class_is_omap1()) {
1035 u32 l;
1036
1037 l = dma_read(CCR, lch);
1038 l &= ~OMAP_DMA_CCR_EN;
1039 dma_write(l, CCR, lch);
1040
1041 /* Clear pending interrupts */
1042 l = dma_read(CSR, lch);
1043 }
1044
1045 if (cpu_class_is_omap2()) {
1046 int i = dma_common_ch_start;
1047 for (; i <= dma_common_ch_end; i += 1)
1048 dma_write(0, i, lch);
1049 }
1050
1051 local_irq_restore(flags); 866 local_irq_restore(flags);
1052} 867}
1053EXPORT_SYMBOL(omap_clear_dma); 868EXPORT_SYMBOL(omap_clear_dma);
@@ -1061,13 +876,13 @@ void omap_start_dma(int lch)
1061 * before starting dma transfer. 876 * before starting dma transfer.
1062 */ 877 */
1063 if (cpu_is_omap15xx()) 878 if (cpu_is_omap15xx())
1064 dma_write(0, CPC, lch); 879 p->dma_write(0, CPC, lch);
1065 else 880 else
1066 dma_write(0, CDAC, lch); 881 p->dma_write(0, CDAC, lch);
1067 882
1068 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 883 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1069 int next_lch, cur_lch; 884 int next_lch, cur_lch;
1070 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 885 char dma_chan_link_map[dma_lch_count];
1071 886
1072 dma_chan_link_map[lch] = 1; 887 dma_chan_link_map[lch] = 1;
1073 /* Set the link register of the first channel */ 888 /* Set the link register of the first channel */
@@ -1090,17 +905,17 @@ void omap_start_dma(int lch)
1090 cur_lch = next_lch; 905 cur_lch = next_lch;
1091 } while (next_lch != -1); 906 } while (next_lch != -1);
1092 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS)) 907 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
1093 dma_write(lch, CLNK_CTRL, lch); 908 p->dma_write(lch, CLNK_CTRL, lch);
1094 909
1095 omap_enable_channel_irq(lch); 910 omap_enable_channel_irq(lch);
1096 911
1097 l = dma_read(CCR, lch); 912 l = p->dma_read(CCR, lch);
1098 913
1099 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING)) 914 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
1100 l |= OMAP_DMA_CCR_BUFFERING_DISABLE; 915 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1101 l |= OMAP_DMA_CCR_EN; 916 l |= OMAP_DMA_CCR_EN;
1102 917
1103 dma_write(l, CCR, lch); 918 p->dma_write(l, CCR, lch);
1104 919
1105 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 920 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1106} 921}
@@ -1112,46 +927,46 @@ void omap_stop_dma(int lch)
1112 927
1113 /* Disable all interrupts on the channel */ 928 /* Disable all interrupts on the channel */
1114 if (cpu_class_is_omap1()) 929 if (cpu_class_is_omap1())
1115 dma_write(0, CICR, lch); 930 p->dma_write(0, CICR, lch);
1116 931
1117 l = dma_read(CCR, lch); 932 l = p->dma_read(CCR, lch);
1118 if (IS_DMA_ERRATA(DMA_ERRATA_i541) && 933 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1119 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { 934 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1120 int i = 0; 935 int i = 0;
1121 u32 sys_cf; 936 u32 sys_cf;
1122 937
1123 /* Configure No-Standby */ 938 /* Configure No-Standby */
1124 l = dma_read(OCP_SYSCONFIG, lch); 939 l = p->dma_read(OCP_SYSCONFIG, lch);
1125 sys_cf = l; 940 sys_cf = l;
1126 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; 941 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1127 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); 942 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1128 dma_write(l , OCP_SYSCONFIG, 0); 943 p->dma_write(l , OCP_SYSCONFIG, 0);
1129 944
1130 l = dma_read(CCR, lch); 945 l = p->dma_read(CCR, lch);
1131 l &= ~OMAP_DMA_CCR_EN; 946 l &= ~OMAP_DMA_CCR_EN;
1132 dma_write(l, CCR, lch); 947 p->dma_write(l, CCR, lch);
1133 948
1134 /* Wait for sDMA FIFO drain */ 949 /* Wait for sDMA FIFO drain */
1135 l = dma_read(CCR, lch); 950 l = p->dma_read(CCR, lch);
1136 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | 951 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1137 OMAP_DMA_CCR_WR_ACTIVE))) { 952 OMAP_DMA_CCR_WR_ACTIVE))) {
1138 udelay(5); 953 udelay(5);
1139 i++; 954 i++;
1140 l = dma_read(CCR, lch); 955 l = p->dma_read(CCR, lch);
1141 } 956 }
1142 if (i >= 100) 957 if (i >= 100)
1143 printk(KERN_ERR "DMA drain did not complete on " 958 printk(KERN_ERR "DMA drain did not complete on "
1144 "lch %d\n", lch); 959 "lch %d\n", lch);
1145 /* Restore OCP_SYSCONFIG */ 960 /* Restore OCP_SYSCONFIG */
1146 dma_write(sys_cf, OCP_SYSCONFIG, lch); 961 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
1147 } else { 962 } else {
1148 l &= ~OMAP_DMA_CCR_EN; 963 l &= ~OMAP_DMA_CCR_EN;
1149 dma_write(l, CCR, lch); 964 p->dma_write(l, CCR, lch);
1150 } 965 }
1151 966
1152 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 967 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1153 int next_lch, cur_lch = lch; 968 int next_lch, cur_lch = lch;
1154 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 969 char dma_chan_link_map[dma_lch_count];
1155 970
1156 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); 971 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1157 do { 972 do {
@@ -1212,15 +1027,15 @@ dma_addr_t omap_get_dma_src_pos(int lch)
1212 dma_addr_t offset = 0; 1027 dma_addr_t offset = 0;
1213 1028
1214 if (cpu_is_omap15xx()) 1029 if (cpu_is_omap15xx())
1215 offset = dma_read(CPC, lch); 1030 offset = p->dma_read(CPC, lch);
1216 else 1031 else
1217 offset = dma_read(CSAC, lch); 1032 offset = p->dma_read(CSAC, lch);
1218 1033
1219 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0) 1034 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1220 offset = dma_read(CSAC, lch); 1035 offset = p->dma_read(CSAC, lch);
1221 1036
1222 if (cpu_class_is_omap1()) 1037 if (cpu_class_is_omap1())
1223 offset |= (dma_read(CSSA, lch) & 0xFFFF0000); 1038 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1224 1039
1225 return offset; 1040 return offset;
1226} 1041}
@@ -1239,19 +1054,19 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
1239 dma_addr_t offset = 0; 1054 dma_addr_t offset = 0;
1240 1055
1241 if (cpu_is_omap15xx()) 1056 if (cpu_is_omap15xx())
1242 offset = dma_read(CPC, lch); 1057 offset = p->dma_read(CPC, lch);
1243 else 1058 else
1244 offset = dma_read(CDAC, lch); 1059 offset = p->dma_read(CDAC, lch);
1245 1060
1246 /* 1061 /*
1247 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1062 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1248 * read before the DMA controller finished disabling the channel. 1063 * read before the DMA controller finished disabling the channel.
1249 */ 1064 */
1250 if (!cpu_is_omap15xx() && offset == 0) 1065 if (!cpu_is_omap15xx() && offset == 0)
1251 offset = dma_read(CDAC, lch); 1066 offset = p->dma_read(CDAC, lch);
1252 1067
1253 if (cpu_class_is_omap1()) 1068 if (cpu_class_is_omap1())
1254 offset |= (dma_read(CDSA, lch) & 0xFFFF0000); 1069 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1255 1070
1256 return offset; 1071 return offset;
1257} 1072}
@@ -1259,7 +1074,7 @@ EXPORT_SYMBOL(omap_get_dma_dst_pos);
1259 1074
1260int omap_get_dma_active_status(int lch) 1075int omap_get_dma_active_status(int lch)
1261{ 1076{
1262 return (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0; 1077 return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1263} 1078}
1264EXPORT_SYMBOL(omap_get_dma_active_status); 1079EXPORT_SYMBOL(omap_get_dma_active_status);
1265 1080
@@ -1272,7 +1087,7 @@ int omap_dma_running(void)
1272 return 1; 1087 return 1;
1273 1088
1274 for (lch = 0; lch < dma_chan_count; lch++) 1089 for (lch = 0; lch < dma_chan_count; lch++)
1275 if (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) 1090 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1276 return 1; 1091 return 1;
1277 1092
1278 return 0; 1093 return 0;
@@ -1287,7 +1102,7 @@ void omap_dma_link_lch(int lch_head, int lch_queue)
1287{ 1102{
1288 if (omap_dma_in_1510_mode()) { 1103 if (omap_dma_in_1510_mode()) {
1289 if (lch_head == lch_queue) { 1104 if (lch_head == lch_queue) {
1290 dma_write(dma_read(CCR, lch_head) | (3 << 8), 1105 p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1291 CCR, lch_head); 1106 CCR, lch_head);
1292 return; 1107 return;
1293 } 1108 }
@@ -1314,7 +1129,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1314{ 1129{
1315 if (omap_dma_in_1510_mode()) { 1130 if (omap_dma_in_1510_mode()) {
1316 if (lch_head == lch_queue) { 1131 if (lch_head == lch_queue) {
1317 dma_write(dma_read(CCR, lch_head) & ~(3 << 8), 1132 p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1318 CCR, lch_head); 1133 CCR, lch_head);
1319 return; 1134 return;
1320 } 1135 }
@@ -1341,8 +1156,6 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1341} 1156}
1342EXPORT_SYMBOL(omap_dma_unlink_lch); 1157EXPORT_SYMBOL(omap_dma_unlink_lch);
1343 1158
1344/*----------------------------------------------------------------------------*/
1345
1346#ifndef CONFIG_ARCH_OMAP1 1159#ifndef CONFIG_ARCH_OMAP1
1347/* Create chain of DMA channesls */ 1160/* Create chain of DMA channesls */
1348static void create_dma_lch_chain(int lch_head, int lch_queue) 1161static void create_dma_lch_chain(int lch_head, int lch_queue)
@@ -1367,15 +1180,15 @@ static void create_dma_lch_chain(int lch_head, int lch_queue)
1367 lch_queue; 1180 lch_queue;
1368 } 1181 }
1369 1182
1370 l = dma_read(CLNK_CTRL, lch_head); 1183 l = p->dma_read(CLNK_CTRL, lch_head);
1371 l &= ~(0x1f); 1184 l &= ~(0x1f);
1372 l |= lch_queue; 1185 l |= lch_queue;
1373 dma_write(l, CLNK_CTRL, lch_head); 1186 p->dma_write(l, CLNK_CTRL, lch_head);
1374 1187
1375 l = dma_read(CLNK_CTRL, lch_queue); 1188 l = p->dma_read(CLNK_CTRL, lch_queue);
1376 l &= ~(0x1f); 1189 l &= ~(0x1f);
1377 l |= (dma_chan[lch_queue].next_linked_ch); 1190 l |= (dma_chan[lch_queue].next_linked_ch);
1378 dma_write(l, CLNK_CTRL, lch_queue); 1191 p->dma_write(l, CLNK_CTRL, lch_queue);
1379} 1192}
1380 1193
1381/** 1194/**
@@ -1651,13 +1464,13 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1651 1464
1652 /* Set the params to the free channel */ 1465 /* Set the params to the free channel */
1653 if (src_start != 0) 1466 if (src_start != 0)
1654 dma_write(src_start, CSSA, lch); 1467 p->dma_write(src_start, CSSA, lch);
1655 if (dest_start != 0) 1468 if (dest_start != 0)
1656 dma_write(dest_start, CDSA, lch); 1469 p->dma_write(dest_start, CDSA, lch);
1657 1470
1658 /* Write the buffer size */ 1471 /* Write the buffer size */
1659 dma_write(elem_count, CEN, lch); 1472 p->dma_write(elem_count, CEN, lch);
1660 dma_write(frame_count, CFN, lch); 1473 p->dma_write(frame_count, CFN, lch);
1661 1474
1662 /* 1475 /*
1663 * If the chain is dynamically linked, 1476 * If the chain is dynamically linked,
@@ -1690,7 +1503,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1690 enable_lnk(dma_chan[lch].prev_linked_ch); 1503 enable_lnk(dma_chan[lch].prev_linked_ch);
1691 dma_chan[lch].state = DMA_CH_QUEUED; 1504 dma_chan[lch].state = DMA_CH_QUEUED;
1692 start_dma = 0; 1505 start_dma = 0;
1693 if (0 == ((1 << 7) & dma_read( 1506 if (0 == ((1 << 7) & p->dma_read(
1694 CCR, dma_chan[lch].prev_linked_ch))) { 1507 CCR, dma_chan[lch].prev_linked_ch))) {
1695 disable_lnk(dma_chan[lch]. 1508 disable_lnk(dma_chan[lch].
1696 prev_linked_ch); 1509 prev_linked_ch);
@@ -1707,7 +1520,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1707 } 1520 }
1708 omap_enable_channel_irq(lch); 1521 omap_enable_channel_irq(lch);
1709 1522
1710 l = dma_read(CCR, lch); 1523 l = p->dma_read(CCR, lch);
1711 1524
1712 if ((0 == (l & (1 << 24)))) 1525 if ((0 == (l & (1 << 24))))
1713 l &= ~(1 << 25); 1526 l &= ~(1 << 25);
@@ -1718,12 +1531,12 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1718 l |= (1 << 7); 1531 l |= (1 << 7);
1719 dma_chan[lch].state = DMA_CH_STARTED; 1532 dma_chan[lch].state = DMA_CH_STARTED;
1720 pr_debug("starting %d\n", lch); 1533 pr_debug("starting %d\n", lch);
1721 dma_write(l, CCR, lch); 1534 p->dma_write(l, CCR, lch);
1722 } else 1535 } else
1723 start_dma = 0; 1536 start_dma = 0;
1724 } else { 1537 } else {
1725 if (0 == (l & (1 << 7))) 1538 if (0 == (l & (1 << 7)))
1726 dma_write(l, CCR, lch); 1539 p->dma_write(l, CCR, lch);
1727 } 1540 }
1728 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 1541 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1729 } 1542 }
@@ -1768,7 +1581,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1768 omap_enable_channel_irq(channels[0]); 1581 omap_enable_channel_irq(channels[0]);
1769 } 1582 }
1770 1583
1771 l = dma_read(CCR, channels[0]); 1584 l = p->dma_read(CCR, channels[0]);
1772 l |= (1 << 7); 1585 l |= (1 << 7);
1773 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; 1586 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1774 dma_chan[channels[0]].state = DMA_CH_STARTED; 1587 dma_chan[channels[0]].state = DMA_CH_STARTED;
@@ -1777,7 +1590,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1777 l &= ~(1 << 25); 1590 l &= ~(1 << 25);
1778 else 1591 else
1779 l |= (1 << 25); 1592 l |= (1 << 25);
1780 dma_write(l, CCR, channels[0]); 1593 p->dma_write(l, CCR, channels[0]);
1781 1594
1782 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; 1595 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1783 1596
@@ -1813,19 +1626,19 @@ int omap_stop_dma_chain_transfers(int chain_id)
1813 channels = dma_linked_lch[chain_id].linked_dmach_q; 1626 channels = dma_linked_lch[chain_id].linked_dmach_q;
1814 1627
1815 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) { 1628 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1816 sys_cf = dma_read(OCP_SYSCONFIG, 0); 1629 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1817 l = sys_cf; 1630 l = sys_cf;
1818 /* Middle mode reg set no Standby */ 1631 /* Middle mode reg set no Standby */
1819 l &= ~((1 << 12)|(1 << 13)); 1632 l &= ~((1 << 12)|(1 << 13));
1820 dma_write(l, OCP_SYSCONFIG, 0); 1633 p->dma_write(l, OCP_SYSCONFIG, 0);
1821 } 1634 }
1822 1635
1823 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { 1636 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1824 1637
1825 /* Stop the Channel transmission */ 1638 /* Stop the Channel transmission */
1826 l = dma_read(CCR, channels[i]); 1639 l = p->dma_read(CCR, channels[i]);
1827 l &= ~(1 << 7); 1640 l &= ~(1 << 7);
1828 dma_write(l, CCR, channels[i]); 1641 p->dma_write(l, CCR, channels[i]);
1829 1642
1830 /* Disable the link in all the channels */ 1643 /* Disable the link in all the channels */
1831 disable_lnk(channels[i]); 1644 disable_lnk(channels[i]);
@@ -1838,7 +1651,7 @@ int omap_stop_dma_chain_transfers(int chain_id)
1838 OMAP_DMA_CHAIN_QINIT(chain_id); 1651 OMAP_DMA_CHAIN_QINIT(chain_id);
1839 1652
1840 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) 1653 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1841 dma_write(sys_cf, OCP_SYSCONFIG, 0); 1654 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1842 1655
1843 return 0; 1656 return 0;
1844} 1657}
@@ -1880,8 +1693,8 @@ int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1880 /* Get the current channel */ 1693 /* Get the current channel */
1881 lch = channels[dma_linked_lch[chain_id].q_head]; 1694 lch = channels[dma_linked_lch[chain_id].q_head];
1882 1695
1883 *ei = dma_read(CCEN, lch); 1696 *ei = p->dma_read(CCEN, lch);
1884 *fi = dma_read(CCFN, lch); 1697 *fi = p->dma_read(CCFN, lch);
1885 1698
1886 return 0; 1699 return 0;
1887} 1700}
@@ -1918,7 +1731,7 @@ int omap_get_dma_chain_dst_pos(int chain_id)
1918 /* Get the current channel */ 1731 /* Get the current channel */
1919 lch = channels[dma_linked_lch[chain_id].q_head]; 1732 lch = channels[dma_linked_lch[chain_id].q_head];
1920 1733
1921 return dma_read(CDAC, lch); 1734 return p->dma_read(CDAC, lch);
1922} 1735}
1923EXPORT_SYMBOL(omap_get_dma_chain_dst_pos); 1736EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1924 1737
@@ -1952,7 +1765,7 @@ int omap_get_dma_chain_src_pos(int chain_id)
1952 /* Get the current channel */ 1765 /* Get the current channel */
1953 lch = channels[dma_linked_lch[chain_id].q_head]; 1766 lch = channels[dma_linked_lch[chain_id].q_head];
1954 1767
1955 return dma_read(CSAC, lch); 1768 return p->dma_read(CSAC, lch);
1956} 1769}
1957EXPORT_SYMBOL(omap_get_dma_chain_src_pos); 1770EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1958#endif /* ifndef CONFIG_ARCH_OMAP1 */ 1771#endif /* ifndef CONFIG_ARCH_OMAP1 */
@@ -1969,7 +1782,7 @@ static int omap1_dma_handle_ch(int ch)
1969 csr = dma_chan[ch].saved_csr; 1782 csr = dma_chan[ch].saved_csr;
1970 dma_chan[ch].saved_csr = 0; 1783 dma_chan[ch].saved_csr = 0;
1971 } else 1784 } else
1972 csr = dma_read(CSR, ch); 1785 csr = p->dma_read(CSR, ch);
1973 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { 1786 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1974 dma_chan[ch + 6].saved_csr = csr >> 7; 1787 dma_chan[ch + 6].saved_csr = csr >> 7;
1975 csr &= 0x7f; 1788 csr &= 0x7f;
@@ -2022,13 +1835,13 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
2022 1835
2023static int omap2_dma_handle_ch(int ch) 1836static int omap2_dma_handle_ch(int ch)
2024{ 1837{
2025 u32 status = dma_read(CSR, ch); 1838 u32 status = p->dma_read(CSR, ch);
2026 1839
2027 if (!status) { 1840 if (!status) {
2028 if (printk_ratelimit()) 1841 if (printk_ratelimit())
2029 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", 1842 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
2030 ch); 1843 ch);
2031 dma_write(1 << ch, IRQSTATUS_L0, ch); 1844 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
2032 return 0; 1845 return 0;
2033 } 1846 }
2034 if (unlikely(dma_chan[ch].dev_id == -1)) { 1847 if (unlikely(dma_chan[ch].dev_id == -1)) {
@@ -2047,9 +1860,9 @@ static int omap2_dma_handle_ch(int ch)
2047 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) { 1860 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
2048 u32 ccr; 1861 u32 ccr;
2049 1862
2050 ccr = dma_read(CCR, ch); 1863 ccr = p->dma_read(CCR, ch);
2051 ccr &= ~OMAP_DMA_CCR_EN; 1864 ccr &= ~OMAP_DMA_CCR_EN;
2052 dma_write(ccr, CCR, ch); 1865 p->dma_write(ccr, CCR, ch);
2053 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; 1866 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2054 } 1867 }
2055 } 1868 }
@@ -2060,16 +1873,16 @@ static int omap2_dma_handle_ch(int ch)
2060 printk(KERN_INFO "DMA misaligned error with device %d\n", 1873 printk(KERN_INFO "DMA misaligned error with device %d\n",
2061 dma_chan[ch].dev_id); 1874 dma_chan[ch].dev_id);
2062 1875
2063 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch); 1876 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
2064 dma_write(1 << ch, IRQSTATUS_L0, ch); 1877 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
2065 /* read back the register to flush the write */ 1878 /* read back the register to flush the write */
2066 dma_read(IRQSTATUS_L0, ch); 1879 p->dma_read(IRQSTATUS_L0, ch);
2067 1880
2068 /* If the ch is not chained then chain_id will be -1 */ 1881 /* If the ch is not chained then chain_id will be -1 */
2069 if (dma_chan[ch].chain_id != -1) { 1882 if (dma_chan[ch].chain_id != -1) {
2070 int chain_id = dma_chan[ch].chain_id; 1883 int chain_id = dma_chan[ch].chain_id;
2071 dma_chan[ch].state = DMA_CH_NOTSTARTED; 1884 dma_chan[ch].state = DMA_CH_NOTSTARTED;
2072 if (dma_read(CLNK_CTRL, ch) & (1 << 15)) 1885 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
2073 dma_chan[dma_chan[ch].next_linked_ch].state = 1886 dma_chan[dma_chan[ch].next_linked_ch].state =
2074 DMA_CH_STARTED; 1887 DMA_CH_STARTED;
2075 if (dma_linked_lch[chain_id].chain_mode == 1888 if (dma_linked_lch[chain_id].chain_mode ==
@@ -2079,10 +1892,10 @@ static int omap2_dma_handle_ch(int ch)
2079 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id)) 1892 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2080 OMAP_DMA_CHAIN_INCQHEAD(chain_id); 1893 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2081 1894
2082 status = dma_read(CSR, ch); 1895 status = p->dma_read(CSR, ch);
2083 } 1896 }
2084 1897
2085 dma_write(status, CSR, ch); 1898 p->dma_write(status, CSR, ch);
2086 1899
2087 if (likely(dma_chan[ch].callback != NULL)) 1900 if (likely(dma_chan[ch].callback != NULL))
2088 dma_chan[ch].callback(ch, status, dma_chan[ch].data); 1901 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
@@ -2096,13 +1909,13 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2096 u32 val, enable_reg; 1909 u32 val, enable_reg;
2097 int i; 1910 int i;
2098 1911
2099 val = dma_read(IRQSTATUS_L0, 0); 1912 val = p->dma_read(IRQSTATUS_L0, 0);
2100 if (val == 0) { 1913 if (val == 0) {
2101 if (printk_ratelimit()) 1914 if (printk_ratelimit())
2102 printk(KERN_WARNING "Spurious DMA IRQ\n"); 1915 printk(KERN_WARNING "Spurious DMA IRQ\n");
2103 return IRQ_HANDLED; 1916 return IRQ_HANDLED;
2104 } 1917 }
2105 enable_reg = dma_read(IRQENABLE_L0, 0); 1918 enable_reg = p->dma_read(IRQENABLE_L0, 0);
2106 val &= enable_reg; /* Dispatch only relevant interrupts */ 1919 val &= enable_reg; /* Dispatch only relevant interrupts */
2107 for (i = 0; i < dma_lch_count && val != 0; i++) { 1920 for (i = 0; i < dma_lch_count && val != 0; i++) {
2108 if (val & 1) 1921 if (val & 1)
@@ -2128,206 +1941,66 @@ static struct irqaction omap24xx_dma_irq;
2128void omap_dma_global_context_save(void) 1941void omap_dma_global_context_save(void)
2129{ 1942{
2130 omap_dma_global_context.dma_irqenable_l0 = 1943 omap_dma_global_context.dma_irqenable_l0 =
2131 dma_read(IRQENABLE_L0, 0); 1944 p->dma_read(IRQENABLE_L0, 0);
2132 omap_dma_global_context.dma_ocp_sysconfig = 1945 omap_dma_global_context.dma_ocp_sysconfig =
2133 dma_read(OCP_SYSCONFIG, 0); 1946 p->dma_read(OCP_SYSCONFIG, 0);
2134 omap_dma_global_context.dma_gcr = dma_read(GCR, 0); 1947 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
2135} 1948}
2136 1949
2137void omap_dma_global_context_restore(void) 1950void omap_dma_global_context_restore(void)
2138{ 1951{
2139 int ch; 1952 int ch;
2140 1953
2141 dma_write(omap_dma_global_context.dma_gcr, GCR, 0); 1954 p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2142 dma_write(omap_dma_global_context.dma_ocp_sysconfig, 1955 p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2143 OCP_SYSCONFIG, 0); 1956 OCP_SYSCONFIG, 0);
2144 dma_write(omap_dma_global_context.dma_irqenable_l0, 1957 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
2145 IRQENABLE_L0, 0); 1958 IRQENABLE_L0, 0);
2146 1959
2147 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) 1960 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2148 dma_write(0x3 , IRQSTATUS_L0, 0); 1961 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2149 1962
2150 for (ch = 0; ch < dma_chan_count; ch++) 1963 for (ch = 0; ch < dma_chan_count; ch++)
2151 if (dma_chan[ch].dev_id != -1) 1964 if (dma_chan[ch].dev_id != -1)
2152 omap_clear_dma(ch); 1965 omap_clear_dma(ch);
2153} 1966}
2154 1967
2155static void configure_dma_errata(void) 1968static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2156{ 1969{
2157 1970 int ch, ret = 0;
2158 /* 1971 int dma_irq;
2159 * Errata applicable for OMAP2430ES1.0 and all omap2420 1972 char irq_name[4];
2160 * 1973 int irq_rel;
2161 * I. 1974
2162 * Erratum ID: Not Available 1975 p = pdev->dev.platform_data;
2163 * Inter Frame DMA buffering issue DMA will wrongly 1976 if (!p) {
2164 * buffer elements if packing and bursting is enabled. This might 1977 dev_err(&pdev->dev, "%s: System DMA initialized without"
2165 * result in data gets stalled in FIFO at the end of the block. 1978 "platform data\n", __func__);
2166 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to 1979 return -EINVAL;
2167 * guarantee no data will stay in the DMA FIFO in case inter frame
2168 * buffering occurs
2169 *
2170 * II.
2171 * Erratum ID: Not Available
2172 * DMA may hang when several channels are used in parallel
2173 * In the following configuration, DMA channel hanging can occur:
2174 * a. Channel i, hardware synchronized, is enabled
2175 * b. Another channel (Channel x), software synchronized, is enabled.
2176 * c. Channel i is disabled before end of transfer
2177 * d. Channel i is reenabled.
2178 * e. Steps 1 to 4 are repeated a certain number of times.
2179 * f. A third channel (Channel y), software synchronized, is enabled.
2180 * Channel x and Channel y may hang immediately after step 'f'.
2181 * Workaround:
2182 * For any channel used - make sure NextLCH_ID is set to the value j.
2183 */
2184 if (cpu_is_omap2420() || (cpu_is_omap2430() &&
2185 (omap_type() == OMAP2430_REV_ES1_0))) {
2186 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
2187 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
2188 }
2189
2190 /*
2191 * Erratum ID: i378: OMAP2plus: sDMA Channel is not disabled
2192 * after a transaction error.
2193 * Workaround: SW should explicitely disable the channel.
2194 */
2195 if (cpu_class_is_omap2())
2196 SET_DMA_ERRATA(DMA_ERRATA_i378);
2197
2198 /*
2199 * Erratum ID: i541: sDMA FIFO draining does not finish
2200 * If sDMA channel is disabled on the fly, sDMA enters standby even
2201 * through FIFO Drain is still in progress
2202 * Workaround: Put sDMA in NoStandby more before a logical channel is
2203 * disabled, then put it back to SmartStandby right after the channel
2204 * finishes FIFO draining.
2205 */
2206 if (cpu_is_omap34xx())
2207 SET_DMA_ERRATA(DMA_ERRATA_i541);
2208
2209 /*
2210 * Erratum ID: i88 : Special programming model needed to disable DMA
2211 * before end of block.
2212 * Workaround: software must ensure that the DMA is configured in No
2213 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
2214 */
2215 if (cpu_is_omap34xx() && (omap_type() == OMAP3430_REV_ES1_0))
2216 SET_DMA_ERRATA(DMA_ERRATA_i88);
2217
2218 /*
2219 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
2220 * read before the DMA controller finished disabling the channel.
2221 */
2222 if (!cpu_is_omap15xx())
2223 SET_DMA_ERRATA(DMA_ERRATA_3_3);
2224
2225 /*
2226 * Erratum ID: Not Available
2227 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2228 * after secure sram context save and restore.
2229 * Work around: Hence we need to manually clear those IRQs to avoid
2230 * spurious interrupts. This affects only secure devices.
2231 */
2232 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2233 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
2234}
2235
2236/*----------------------------------------------------------------------------*/
2237
2238static int __init omap_init_dma(void)
2239{
2240 unsigned long base;
2241 int ch, r;
2242
2243 if (cpu_class_is_omap1()) {
2244 base = OMAP1_DMA_BASE;
2245 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2246 } else if (cpu_is_omap24xx()) {
2247 base = OMAP24XX_DMA4_BASE;
2248 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2249 } else if (cpu_is_omap34xx()) {
2250 base = OMAP34XX_DMA4_BASE;
2251 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2252 } else if (cpu_is_omap44xx()) {
2253 base = OMAP44XX_DMA4_BASE;
2254 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2255 } else {
2256 pr_err("DMA init failed for unsupported omap\n");
2257 return -ENODEV;
2258 } 1980 }
2259 1981
2260 omap_dma_base = ioremap(base, SZ_4K); 1982 d = p->dma_attr;
2261 BUG_ON(!omap_dma_base); 1983 errata = p->errata;
2262
2263 if (cpu_class_is_omap1()) {
2264 dma_stride = 0x40;
2265 reg_map = reg_map_omap1;
2266 dma_common_ch_start = CPC;
2267 dma_common_ch_end = COLOR;
2268 } else {
2269 dma_stride = 0x60;
2270 reg_map = reg_map_omap2;
2271 dma_common_ch_start = CSDP;
2272 if (cpu_is_omap3630() || cpu_is_omap4430())
2273 dma_common_ch_end = CCDN;
2274 else
2275 dma_common_ch_end = CCFN;
2276 }
2277 1984
2278 if (cpu_class_is_omap2() && omap_dma_reserve_channels 1985 if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2279 && (omap_dma_reserve_channels <= dma_lch_count)) 1986 && (omap_dma_reserve_channels <= dma_lch_count))
2280 dma_lch_count = omap_dma_reserve_channels; 1987 d->lch_count = omap_dma_reserve_channels;
2281 1988
2282 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count, 1989 dma_lch_count = d->lch_count;
2283 GFP_KERNEL); 1990 dma_chan_count = dma_lch_count;
2284 if (!dma_chan) { 1991 dma_chan = d->chan;
2285 r = -ENOMEM; 1992 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2286 goto out_unmap;
2287 }
2288 1993
2289 if (cpu_class_is_omap2()) { 1994 if (cpu_class_is_omap2()) {
2290 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) * 1995 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2291 dma_lch_count, GFP_KERNEL); 1996 dma_lch_count, GFP_KERNEL);
2292 if (!dma_linked_lch) { 1997 if (!dma_linked_lch) {
2293 r = -ENOMEM; 1998 ret = -ENOMEM;
2294 goto out_free; 1999 goto exit_dma_lch_fail;
2295 } 2000 }
2296 } 2001 }
2297 2002
2298 if (cpu_is_omap15xx()) {
2299 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2300 dma_chan_count = 9;
2301 enable_1510_mode = 1;
2302 } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2303 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2304 dma_read(HW_ID, 0));
2305 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2306 dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
2307 dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
2308 dma_read(CAPS_4, 0));
2309 if (!enable_1510_mode) {
2310 u16 w;
2311
2312 /* Disable OMAP 3.0/3.1 compatibility mode. */
2313 w = dma_read(GSCR, 0);
2314 w |= 1 << 3;
2315 dma_write(w, GSCR, 0);
2316 dma_chan_count = 16;
2317 } else
2318 dma_chan_count = 9;
2319 } else if (cpu_class_is_omap2()) {
2320 u8 revision = dma_read(REVISION, 0) & 0xff;
2321 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2322 revision >> 4, revision & 0xf);
2323 dma_chan_count = dma_lch_count;
2324 } else {
2325 dma_chan_count = 0;
2326 return 0;
2327 }
2328
2329 spin_lock_init(&dma_chan_lock); 2003 spin_lock_init(&dma_chan_lock);
2330
2331 for (ch = 0; ch < dma_chan_count; ch++) { 2004 for (ch = 0; ch < dma_chan_count; ch++) {
2332 omap_clear_dma(ch); 2005 omap_clear_dma(ch);
2333 if (cpu_class_is_omap2()) 2006 if (cpu_class_is_omap2())
@@ -2344,20 +2017,23 @@ static int __init omap_init_dma(void)
2344 * request_irq() doesn't like dev_id (ie. ch) being 2017 * request_irq() doesn't like dev_id (ie. ch) being
2345 * zero, so we have to kludge around this. 2018 * zero, so we have to kludge around this.
2346 */ 2019 */
2347 r = request_irq(omap1_dma_irq[ch], 2020 sprintf(&irq_name[0], "%d", ch);
2021 dma_irq = platform_get_irq_byname(pdev, irq_name);
2022
2023 if (dma_irq < 0) {
2024 ret = dma_irq;
2025 goto exit_dma_irq_fail;
2026 }
2027
2028 /* INT_DMA_LCD is handled in lcd_dma.c */
2029 if (dma_irq == INT_DMA_LCD)
2030 continue;
2031
2032 ret = request_irq(dma_irq,
2348 omap1_dma_irq_handler, 0, "DMA", 2033 omap1_dma_irq_handler, 0, "DMA",
2349 (void *) (ch + 1)); 2034 (void *) (ch + 1));
2350 if (r != 0) { 2035 if (ret != 0)
2351 int i; 2036 goto exit_dma_irq_fail;
2352
2353 printk(KERN_ERR "unable to request IRQ %d "
2354 "for DMA (error %d)\n",
2355 omap1_dma_irq[ch], r);
2356 for (i = 0; i < ch; i++)
2357 free_irq(omap1_dma_irq[i],
2358 (void *) (i + 1));
2359 goto out_free;
2360 }
2361 } 2037 }
2362 } 2038 }
2363 2039
@@ -2366,47 +2042,91 @@ static int __init omap_init_dma(void)
2366 DMA_DEFAULT_FIFO_DEPTH, 0); 2042 DMA_DEFAULT_FIFO_DEPTH, 0);
2367 2043
2368 if (cpu_class_is_omap2()) { 2044 if (cpu_class_is_omap2()) {
2369 int irq; 2045 strcpy(irq_name, "0");
2370 if (cpu_is_omap44xx()) 2046 dma_irq = platform_get_irq_byname(pdev, irq_name);
2371 irq = OMAP44XX_IRQ_SDMA_0; 2047 if (dma_irq < 0) {
2372 else 2048 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2373 irq = INT_24XX_SDMA_IRQ0; 2049 goto exit_dma_lch_fail;
2374 setup_irq(irq, &omap24xx_dma_irq); 2050 }
2375 } 2051 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2376 2052 if (ret) {
2377 if (cpu_is_omap34xx() || cpu_is_omap44xx()) { 2053 dev_err(&pdev->dev, "set_up failed for IRQ %d"
2378 /* Enable smartidle idlemodes and autoidle */ 2054 "for DMA (error %d)\n", dma_irq, ret);
2379 u32 v = dma_read(OCP_SYSCONFIG, 0); 2055 goto exit_dma_lch_fail;
2380 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2381 DMA_SYSCONFIG_SIDLEMODE_MASK |
2382 DMA_SYSCONFIG_AUTOIDLE);
2383 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2384 DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2385 DMA_SYSCONFIG_AUTOIDLE);
2386 dma_write(v , OCP_SYSCONFIG, 0);
2387 /* reserve dma channels 0 and 1 in high security devices */
2388 if (cpu_is_omap34xx() &&
2389 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2390 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2391 "HS ROM code\n");
2392 dma_chan[0].dev_id = 0;
2393 dma_chan[1].dev_id = 1;
2394 } 2056 }
2395 } 2057 }
2396 configure_dma_errata();
2397 2058
2059 /* reserve dma channels 0 and 1 in high security devices */
2060 if (cpu_is_omap34xx() &&
2061 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2062 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2063 "HS ROM code\n");
2064 dma_chan[0].dev_id = 0;
2065 dma_chan[1].dev_id = 1;
2066 }
2067 p->show_dma_caps();
2398 return 0; 2068 return 0;
2399 2069
2400out_free: 2070exit_dma_irq_fail:
2071 dev_err(&pdev->dev, "unable to request IRQ %d"
2072 "for DMA (error %d)\n", dma_irq, ret);
2073 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2074 dma_irq = platform_get_irq(pdev, irq_rel);
2075 free_irq(dma_irq, (void *)(irq_rel + 1));
2076 }
2077
2078exit_dma_lch_fail:
2079 kfree(p);
2080 kfree(d);
2401 kfree(dma_chan); 2081 kfree(dma_chan);
2082 return ret;
2083}
2402 2084
2403out_unmap: 2085static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2404 iounmap(omap_dma_base); 2086{
2087 int dma_irq;
2405 2088
2406 return r; 2089 if (cpu_class_is_omap2()) {
2090 char irq_name[4];
2091 strcpy(irq_name, "0");
2092 dma_irq = platform_get_irq_byname(pdev, irq_name);
2093 remove_irq(dma_irq, &omap24xx_dma_irq);
2094 } else {
2095 int irq_rel = 0;
2096 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2097 dma_irq = platform_get_irq(pdev, irq_rel);
2098 free_irq(dma_irq, (void *)(irq_rel + 1));
2099 }
2100 }
2101 kfree(p);
2102 kfree(d);
2103 kfree(dma_chan);
2104 return 0;
2105}
2106
2107static struct platform_driver omap_system_dma_driver = {
2108 .probe = omap_system_dma_probe,
2109 .remove = omap_system_dma_remove,
2110 .driver = {
2111 .name = "omap_dma_system"
2112 },
2113};
2114
2115static int __init omap_system_dma_init(void)
2116{
2117 return platform_driver_register(&omap_system_dma_driver);
2118}
2119arch_initcall(omap_system_dma_init);
2120
2121static void __exit omap_system_dma_exit(void)
2122{
2123 platform_driver_unregister(&omap_system_dma_driver);
2407} 2124}
2408 2125
2409arch_initcall(omap_init_dma); 2126MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2127MODULE_LICENSE("GPL");
2128MODULE_ALIAS("platform:" DRIVER_NAME);
2129MODULE_AUTHOR("Texas Instruments Inc");
2410 2130
2411/* 2131/*
2412 * Reserve the omap SDMA channels using cmdline bootarg 2132 * Reserve the omap SDMA channels using cmdline bootarg