aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorG, Manjunath Kondaiah <manjugk@ti.com>2010-12-20 21:27:19 -0500
committerTony Lindgren <tony@atomide.com>2010-12-20 21:38:31 -0500
commitf31cc9622d75c1c6f041d786698daa425c0425c2 (patch)
tree4e52cc4a4d5a6c478823150d263cecbb08271f7b
parent59de3cf1ce9a961ba9ab657707727db2111e72fa (diff)
OMAP: DMA: Convert DMA library into platform driver
Convert DMA library into DMA platform driver and make use of platform data provided by hwmod data base for OMAP2+ onwards. For OMAP1 processors, the DMA driver in mach-omap uses resource structures for getting platform data. Thanks to Tony Lindgren <tony@atomide.com> for fixing various omap1 issues and testing the same on OSK5912 board. Signed-off-by: G, Manjunath Kondaiah <manjugk@ti.com> Tested-by: Kevin Hilman <khilman@deeprootsystems.com> Acked-by: Kevin Hilman <khilman@deeprootsystems.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/dma.c215
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/dma.c223
-rw-r--r--arch/arm/plat-omap/dma.c820
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h57
6 files changed, 754 insertions, 565 deletions
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 0b1c07ffa2f1..6ee19504845f 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o 6obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
7obj-y += clock.o clock_data.o opp_data.o 7obj-y += clock.o clock_data.o opp_data.o
8 8
9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o 9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index 120eff707ab2..d8559344c6e2 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -30,6 +30,57 @@
30#include <plat/irqs.h> 30#include <plat/irqs.h>
31 31
32#define OMAP1_DMA_BASE (0xfffed800) 32#define OMAP1_DMA_BASE (0xfffed800)
33#define OMAP1_LOGICAL_DMA_CH_COUNT 17
34#define OMAP1_DMA_STRIDE 0x40
35
36static u32 errata;
37static u32 enable_1510_mode;
38static u8 dma_stride;
39static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
40
41static u16 reg_map[] = {
42 [GCR] = 0x400,
43 [GSCR] = 0x404,
44 [GRST1] = 0x408,
45 [HW_ID] = 0x442,
46 [PCH2_ID] = 0x444,
47 [PCH0_ID] = 0x446,
48 [PCH1_ID] = 0x448,
49 [PCHG_ID] = 0x44a,
50 [PCHD_ID] = 0x44c,
51 [CAPS_0] = 0x44e,
52 [CAPS_1] = 0x452,
53 [CAPS_2] = 0x456,
54 [CAPS_3] = 0x458,
55 [CAPS_4] = 0x45a,
56 [PCH2_SR] = 0x460,
57 [PCH0_SR] = 0x480,
58 [PCH1_SR] = 0x482,
59 [PCHD_SR] = 0x4c0,
60
61 /* Common Registers */
62 [CSDP] = 0x00,
63 [CCR] = 0x02,
64 [CICR] = 0x04,
65 [CSR] = 0x06,
66 [CEN] = 0x10,
67 [CFN] = 0x12,
68 [CSFI] = 0x14,
69 [CSEI] = 0x16,
70 [CPC] = 0x18, /* 15xx only */
71 [CSAC] = 0x18,
72 [CDAC] = 0x1a,
73 [CDEI] = 0x1c,
74 [CDFI] = 0x1e,
75 [CLNK_CTRL] = 0x28,
76
77 /* Channel specific register offsets */
78 [CSSA] = 0x08,
79 [CDSA] = 0x0c,
80 [COLOR] = 0x20,
81 [CCR2] = 0x24,
82 [LCH_CTRL] = 0x2a,
83};
33 84
34static struct resource res[] __initdata = { 85static struct resource res[] __initdata = {
35 [0] = { 86 [0] = {
@@ -67,6 +118,7 @@ static struct resource res[] __initdata = {
67 .start = INT_DMA_CH5, 118 .start = INT_DMA_CH5,
68 .flags = IORESOURCE_IRQ, 119 .flags = IORESOURCE_IRQ,
69 }, 120 },
121 /* Handled in lcd_dma.c */
70 [7] = { 122 [7] = {
71 .name = "6", 123 .name = "6",
72 .start = INT_1610_DMA_CH6, 124 .start = INT_1610_DMA_CH6,
@@ -125,9 +177,100 @@ static struct resource res[] __initdata = {
125 }, 177 },
126}; 178};
127 179
180static void __iomem *dma_base;
181static inline void dma_write(u32 val, int reg, int lch)
182{
183 u8 stride;
184 u32 offset;
185
186 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
187 offset = reg_map[reg] + (stride * lch);
188
189 __raw_writew(val, dma_base + offset);
190 if ((reg > CLNK_CTRL && reg < CCEN) ||
191 (reg > PCHD_ID && reg < CAPS_2)) {
192 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
193 __raw_writew(val >> 16, dma_base + offset2);
194 }
195}
196
197static inline u32 dma_read(int reg, int lch)
198{
199 u8 stride;
200 u32 offset, val;
201
202 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
203 offset = reg_map[reg] + (stride * lch);
204
205 val = __raw_readw(dma_base + offset);
206 if ((reg > CLNK_CTRL && reg < CCEN) ||
207 (reg > PCHD_ID && reg < CAPS_2)) {
208 u16 upper;
209 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
210 upper = __raw_readw(dma_base + offset2);
211 val |= (upper << 16);
212 }
213 return val;
214}
215
216static void omap1_clear_lch_regs(int lch)
217{
218 int i = dma_common_ch_start;
219
220 for (; i <= dma_common_ch_end; i += 1)
221 dma_write(0, i, lch);
222}
223
224static void omap1_clear_dma(int lch)
225{
226 u32 l;
227
228 l = dma_read(CCR, lch);
229 l &= ~OMAP_DMA_CCR_EN;
230 dma_write(l, CCR, lch);
231
232 /* Clear pending interrupts */
233 l = dma_read(CSR, lch);
234}
235
236static void omap1_show_dma_caps(void)
237{
238 if (enable_1510_mode) {
239 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
240 } else {
241 u16 w;
242 printk(KERN_INFO "OMAP DMA hardware version %d\n",
243 dma_read(HW_ID, 0));
244 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
245 dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
246 dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
247 dma_read(CAPS_4, 0));
248
249 /* Disable OMAP 3.0/3.1 compatibility mode. */
250 w = dma_read(GSCR, 0);
251 w |= 1 << 3;
252 dma_write(w, GSCR, 0);
253 }
254 return;
255}
256
257static u32 configure_dma_errata(void)
258{
259
260 /*
261 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
262 * read before the DMA controller finished disabling the channel.
263 */
264 if (!cpu_is_omap15xx())
265 SET_DMA_ERRATA(DMA_ERRATA_3_3);
266
267 return errata;
268}
269
128static int __init omap1_system_dma_init(void) 270static int __init omap1_system_dma_init(void)
129{ 271{
130 struct omap_system_dma_plat_info *p; 272 struct omap_system_dma_plat_info *p;
273 struct omap_dma_dev_attr *d;
131 struct platform_device *pdev; 274 struct platform_device *pdev;
132 int ret; 275 int ret;
133 276
@@ -138,6 +281,12 @@ static int __init omap1_system_dma_init(void)
138 return -ENOMEM; 281 return -ENOMEM;
139 } 282 }
140 283
284 dma_base = ioremap(res[0].start, resource_size(&res[0]));
285 if (!dma_base) {
286 pr_err("%s: Unable to ioremap\n", __func__);
287 return -ENODEV;
288 }
289
141 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 290 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
142 if (ret) { 291 if (ret) {
143 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 292 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
@@ -153,22 +302,84 @@ static int __init omap1_system_dma_init(void)
153 goto exit_device_put; 302 goto exit_device_put;
154 } 303 }
155 304
305 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
306 if (!d) {
307 dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n",
308 __func__, pdev->name);
309 ret = -ENOMEM;
310 goto exit_release_p;
311 }
312
313 d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
314
315 /* Valid attributes for omap1 plus processors */
316 if (cpu_is_omap15xx())
317 d->dev_caps = ENABLE_1510_MODE;
318 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
319
320 d->dev_caps |= SRC_PORT;
321 d->dev_caps |= DST_PORT;
322 d->dev_caps |= SRC_INDEX;
323 d->dev_caps |= DST_INDEX;
324 d->dev_caps |= IS_BURST_ONLY4;
325 d->dev_caps |= CLEAR_CSR_ON_READ;
326 d->dev_caps |= IS_WORD_16;
327
328
329 d->chan = kzalloc(sizeof(struct omap_dma_lch) *
330 (d->lch_count), GFP_KERNEL);
331 if (!d->chan) {
332 dev_err(&pdev->dev, "%s: Memory allocation failed"
333 "for d->chan!!!\n", __func__);
334 goto exit_release_d;
335 }
336
337 if (cpu_is_omap15xx())
338 d->chan_count = 9;
339 else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
340 if (!(d->dev_caps & ENABLE_1510_MODE))
341 d->chan_count = 16;
342 else
343 d->chan_count = 9;
344 }
345
346 p->dma_attr = d;
347
348 p->show_dma_caps = omap1_show_dma_caps;
349 p->clear_lch_regs = omap1_clear_lch_regs;
350 p->clear_dma = omap1_clear_dma;
351 p->dma_write = dma_write;
352 p->dma_read = dma_read;
353 p->disable_irq_lch = NULL;
354
355 p->errata = configure_dma_errata();
356
156 ret = platform_device_add_data(pdev, p, sizeof(*p)); 357 ret = platform_device_add_data(pdev, p, sizeof(*p));
157 if (ret) { 358 if (ret) {
158 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 359 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
159 __func__, pdev->name, pdev->id); 360 __func__, pdev->name, pdev->id);
160 goto exit_device_put; 361 goto exit_release_chan;
161 } 362 }
162 363
163 ret = platform_device_add(pdev); 364 ret = platform_device_add(pdev);
164 if (ret) { 365 if (ret) {
165 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 366 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
166 __func__, pdev->name, pdev->id); 367 __func__, pdev->name, pdev->id);
167 goto exit_device_put; 368 goto exit_release_chan;
168 } 369 }
169 370
371 dma_stride = OMAP1_DMA_STRIDE;
372 dma_common_ch_start = CPC;
373 dma_common_ch_end = COLOR;
374
170 return ret; 375 return ret;
171 376
377exit_release_chan:
378 kfree(d->chan);
379exit_release_d:
380 kfree(d);
381exit_release_p:
382 kfree(p);
172exit_device_put: 383exit_device_put:
173 platform_device_put(pdev); 384 platform_device_put(pdev);
174exit_device_del: 385exit_device_del:
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 25bc9453700d..1538e32637b9 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
4 4
5# Common support 5# Common support
6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \ 6obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
7 common.o gpio.o 7 common.o gpio.o dma.o
8 8
9omap-2-3-common = irq.o sdrc.o prm2xxx_3xxx.o 9omap-2-3-common = irq.o sdrc.o prm2xxx_3xxx.o
10hwmod-common = omap_hwmod.o \ 10hwmod-common = omap_hwmod.o \
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index 2130059e98cb..d2f15f5cfd36 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -32,6 +32,61 @@
32#include <plat/omap_device.h> 32#include <plat/omap_device.h>
33#include <plat/dma.h> 33#include <plat/dma.h>
34 34
35#define OMAP2_DMA_STRIDE 0x60
36
37static u32 errata;
38static u8 dma_stride;
39
40static struct omap_dma_dev_attr *d;
41
42static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
43
44static u16 reg_map[] = {
45 [REVISION] = 0x00,
46 [GCR] = 0x78,
47 [IRQSTATUS_L0] = 0x08,
48 [IRQSTATUS_L1] = 0x0c,
49 [IRQSTATUS_L2] = 0x10,
50 [IRQSTATUS_L3] = 0x14,
51 [IRQENABLE_L0] = 0x18,
52 [IRQENABLE_L1] = 0x1c,
53 [IRQENABLE_L2] = 0x20,
54 [IRQENABLE_L3] = 0x24,
55 [SYSSTATUS] = 0x28,
56 [OCP_SYSCONFIG] = 0x2c,
57 [CAPS_0] = 0x64,
58 [CAPS_2] = 0x6c,
59 [CAPS_3] = 0x70,
60 [CAPS_4] = 0x74,
61
62 /* Common register offsets */
63 [CCR] = 0x80,
64 [CLNK_CTRL] = 0x84,
65 [CICR] = 0x88,
66 [CSR] = 0x8c,
67 [CSDP] = 0x90,
68 [CEN] = 0x94,
69 [CFN] = 0x98,
70 [CSEI] = 0xa4,
71 [CSFI] = 0xa8,
72 [CDEI] = 0xac,
73 [CDFI] = 0xb0,
74 [CSAC] = 0xb4,
75 [CDAC] = 0xb8,
76
77 /* Channel specific register offsets */
78 [CSSA] = 0x9c,
79 [CDSA] = 0xa0,
80 [CCEN] = 0xbc,
81 [CCFN] = 0xc0,
82 [COLOR] = 0xc4,
83
84 /* OMAP4 specific registers */
85 [CDP] = 0xd0,
86 [CNDP] = 0xd4,
87 [CCDN] = 0xd8,
88};
89
35static struct omap_device_pm_latency omap2_dma_latency[] = { 90static struct omap_device_pm_latency omap2_dma_latency[] = {
36 { 91 {
37 .deactivate_func = omap_device_idle_hwmods, 92 .deactivate_func = omap_device_idle_hwmods,
@@ -40,13 +95,151 @@ static struct omap_device_pm_latency omap2_dma_latency[] = {
40 }, 95 },
41}; 96};
42 97
98static void __iomem *dma_base;
99static inline void dma_write(u32 val, int reg, int lch)
100{
101 u8 stride;
102 u32 offset;
103
104 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
105 offset = reg_map[reg] + (stride * lch);
106 __raw_writel(val, dma_base + offset);
107}
108
109static inline u32 dma_read(int reg, int lch)
110{
111 u8 stride;
112 u32 offset, val;
113
114 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
115 offset = reg_map[reg] + (stride * lch);
116 val = __raw_readl(dma_base + offset);
117 return val;
118}
119
120static inline void omap2_disable_irq_lch(int lch)
121{
122 u32 val;
123
124 val = dma_read(IRQENABLE_L0, lch);
125 val &= ~(1 << lch);
126 dma_write(val, IRQENABLE_L0, lch);
127}
128
129static void omap2_clear_dma(int lch)
130{
131 int i = dma_common_ch_start;
132
133 for (; i <= dma_common_ch_end; i += 1)
134 dma_write(0, i, lch);
135}
136
137static void omap2_show_dma_caps(void)
138{
139 u8 revision = dma_read(REVISION, 0) & 0xff;
140 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
141 revision >> 4, revision & 0xf);
142 return;
143}
144
145static u32 configure_dma_errata(void)
146{
147
148 /*
149 * Errata applicable for OMAP2430ES1.0 and all omap2420
150 *
151 * I.
152 * Erratum ID: Not Available
153 * Inter Frame DMA buffering issue DMA will wrongly
154 * buffer elements if packing and bursting is enabled. This might
155 * result in data gets stalled in FIFO at the end of the block.
156 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
157 * guarantee no data will stay in the DMA FIFO in case inter frame
158 * buffering occurs
159 *
160 * II.
161 * Erratum ID: Not Available
162 * DMA may hang when several channels are used in parallel
163 * In the following configuration, DMA channel hanging can occur:
164 * a. Channel i, hardware synchronized, is enabled
165 * b. Another channel (Channel x), software synchronized, is enabled.
166 * c. Channel i is disabled before end of transfer
167 * d. Channel i is reenabled.
168 * e. Steps 1 to 4 are repeated a certain number of times.
169 * f. A third channel (Channel y), software synchronized, is enabled.
170 * Channel x and Channel y may hang immediately after step 'f'.
171 * Workaround:
172 * For any channel used - make sure NextLCH_ID is set to the value j.
173 */
174 if (cpu_is_omap2420() || (cpu_is_omap2430() &&
175 (omap_type() == OMAP2430_REV_ES1_0))) {
176
177 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
178 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
179 }
180
181 /*
182 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
183 * after a transaction error.
184 * Workaround: SW should explicitely disable the channel.
185 */
186 if (cpu_class_is_omap2())
187 SET_DMA_ERRATA(DMA_ERRATA_i378);
188
189 /*
190 * Erratum ID: i541: sDMA FIFO draining does not finish
191 * If sDMA channel is disabled on the fly, sDMA enters standby even
192 * through FIFO Drain is still in progress
193 * Workaround: Put sDMA in NoStandby more before a logical channel is
194 * disabled, then put it back to SmartStandby right after the channel
195 * finishes FIFO draining.
196 */
197 if (cpu_is_omap34xx())
198 SET_DMA_ERRATA(DMA_ERRATA_i541);
199
200 /*
201 * Erratum ID: i88 : Special programming model needed to disable DMA
202 * before end of block.
203 * Workaround: software must ensure that the DMA is configured in No
204 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
205 */
206 if (omap_type() == OMAP3430_REV_ES1_0)
207 SET_DMA_ERRATA(DMA_ERRATA_i88);
208
209 /*
210 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
211 * read before the DMA controller finished disabling the channel.
212 */
213 SET_DMA_ERRATA(DMA_ERRATA_3_3);
214
215 /*
216 * Erratum ID: Not Available
217 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
218 * after secure sram context save and restore.
219 * Work around: Hence we need to manually clear those IRQs to avoid
220 * spurious interrupts. This affects only secure devices.
221 */
222 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
223 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
224
225 return errata;
226}
227
43/* One time initializations */ 228/* One time initializations */
44static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) 229static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
45{ 230{
46 struct omap_device *od; 231 struct omap_device *od;
47 struct omap_system_dma_plat_info *p; 232 struct omap_system_dma_plat_info *p;
233 struct resource *mem;
48 char *name = "omap_dma_system"; 234 char *name = "omap_dma_system";
49 235
236 dma_stride = OMAP2_DMA_STRIDE;
237 dma_common_ch_start = CSDP;
238 if (cpu_is_omap3630() || cpu_is_omap4430())
239 dma_common_ch_end = CCDN;
240 else
241 dma_common_ch_end = CCFN;
242
50 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); 243 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
51 if (!p) { 244 if (!p) {
52 pr_err("%s: Unable to allocate pdata for %s:%s\n", 245 pr_err("%s: Unable to allocate pdata for %s:%s\n",
@@ -54,6 +247,17 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
54 return -ENOMEM; 247 return -ENOMEM;
55 } 248 }
56 249
250 p->dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr;
251 p->disable_irq_lch = omap2_disable_irq_lch;
252 p->show_dma_caps = omap2_show_dma_caps;
253 p->clear_dma = omap2_clear_dma;
254 p->dma_write = dma_write;
255 p->dma_read = dma_read;
256
257 p->clear_lch_regs = NULL;
258
259 p->errata = configure_dma_errata();
260
57 od = omap_device_build(name, 0, oh, p, sizeof(*p), 261 od = omap_device_build(name, 0, oh, p, sizeof(*p),
58 omap2_dma_latency, ARRAY_SIZE(omap2_dma_latency), 0); 262 omap2_dma_latency, ARRAY_SIZE(omap2_dma_latency), 0);
59 kfree(p); 263 kfree(p);
@@ -63,6 +267,25 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
63 return IS_ERR(od); 267 return IS_ERR(od);
64 } 268 }
65 269
270 mem = platform_get_resource(&od->pdev, IORESOURCE_MEM, 0);
271 if (!mem) {
272 dev_err(&od->pdev.dev, "%s: no mem resource\n", __func__);
273 return -EINVAL;
274 }
275 dma_base = ioremap(mem->start, resource_size(mem));
276 if (!dma_base) {
277 dev_err(&od->pdev.dev, "%s: ioremap fail\n", __func__);
278 return -ENOMEM;
279 }
280
281 d = oh->dev_attr;
282 d->chan = kzalloc(sizeof(struct omap_dma_lch) *
283 (d->lch_count), GFP_KERNEL);
284
285 if (!d->chan) {
286 dev_err(&od->pdev.dev, "%s: kzalloc fail\n", __func__);
287 return -ENOMEM;
288 }
66 return 0; 289 return 0;
67} 290}
68 291
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 6f51bf37ec02..c4b2b478b1a5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -15,6 +15,10 @@
15 * 15 *
16 * Support functions for the OMAP internal DMA channels. 16 * Support functions for the OMAP internal DMA channels.
17 * 17 *
18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19 * Converted DMA library into DMA platform driver.
20 * - G, Manjunath Kondaiah <manjugk@ti.com>
21 *
18 * This program is free software; you can redistribute it and/or modify 22 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as 23 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation. 24 * published by the Free Software Foundation.
@@ -40,96 +44,6 @@
40 44
41#undef DEBUG 45#undef DEBUG
42 46
43static u16 reg_map_omap1[] = {
44 [GCR] = 0x400,
45 [GSCR] = 0x404,
46 [GRST1] = 0x408,
47 [HW_ID] = 0x442,
48 [PCH2_ID] = 0x444,
49 [PCH0_ID] = 0x446,
50 [PCH1_ID] = 0x448,
51 [PCHG_ID] = 0x44a,
52 [PCHD_ID] = 0x44c,
53 [CAPS_0] = 0x44e,
54 [CAPS_1] = 0x452,
55 [CAPS_2] = 0x456,
56 [CAPS_3] = 0x458,
57 [CAPS_4] = 0x45a,
58 [PCH2_SR] = 0x460,
59 [PCH0_SR] = 0x480,
60 [PCH1_SR] = 0x482,
61 [PCHD_SR] = 0x4c0,
62
63 /* Common Registers */
64 [CSDP] = 0x00,
65 [CCR] = 0x02,
66 [CICR] = 0x04,
67 [CSR] = 0x06,
68 [CEN] = 0x10,
69 [CFN] = 0x12,
70 [CSFI] = 0x14,
71 [CSEI] = 0x16,
72 [CPC] = 0x18, /* 15xx only */
73 [CSAC] = 0x18,
74 [CDAC] = 0x1a,
75 [CDEI] = 0x1c,
76 [CDFI] = 0x1e,
77 [CLNK_CTRL] = 0x28,
78
79 /* Channel specific register offsets */
80 [CSSA] = 0x08,
81 [CDSA] = 0x0c,
82 [COLOR] = 0x20,
83 [CCR2] = 0x24,
84 [LCH_CTRL] = 0x2a,
85};
86
87static u16 reg_map_omap2[] = {
88 [REVISION] = 0x00,
89 [GCR] = 0x78,
90 [IRQSTATUS_L0] = 0x08,
91 [IRQSTATUS_L1] = 0x0c,
92 [IRQSTATUS_L2] = 0x10,
93 [IRQSTATUS_L3] = 0x14,
94 [IRQENABLE_L0] = 0x18,
95 [IRQENABLE_L1] = 0x1c,
96 [IRQENABLE_L2] = 0x20,
97 [IRQENABLE_L3] = 0x24,
98 [SYSSTATUS] = 0x28,
99 [OCP_SYSCONFIG] = 0x2c,
100 [CAPS_0] = 0x64,
101 [CAPS_2] = 0x6c,
102 [CAPS_3] = 0x70,
103 [CAPS_4] = 0x74,
104
105 /* Common register offsets */
106 [CCR] = 0x80,
107 [CLNK_CTRL] = 0x84,
108 [CICR] = 0x88,
109 [CSR] = 0x8c,
110 [CSDP] = 0x90,
111 [CEN] = 0x94,
112 [CFN] = 0x98,
113 [CSEI] = 0xa4,
114 [CSFI] = 0xa8,
115 [CDEI] = 0xac,
116 [CDFI] = 0xb0,
117 [CSAC] = 0xb4,
118 [CDAC] = 0xb8,
119
120 /* Channel specific register offsets */
121 [CSSA] = 0x9c,
122 [CDSA] = 0xa0,
123 [CCEN] = 0xbc,
124 [CCFN] = 0xc0,
125 [COLOR] = 0xc4,
126
127 /* OMAP4 specific registers */
128 [CDP] = 0xd0,
129 [CNDP] = 0xd4,
130 [CCDN] = 0xd8,
131};
132
133#ifndef CONFIG_ARCH_OMAP1 47#ifndef CONFIG_ARCH_OMAP1
134enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED, 48enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
135 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED 49 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
@@ -143,6 +57,9 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
143 57
144#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) 58#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
145 59
60static struct omap_system_dma_plat_info *p;
61static struct omap_dma_dev_attr *d;
62
146static int enable_1510_mode; 63static int enable_1510_mode;
147static u32 errata; 64static u32 errata;
148 65
@@ -152,27 +69,6 @@ static struct omap_dma_global_context_registers {
152 u32 dma_gcr; 69 u32 dma_gcr;
153} omap_dma_global_context; 70} omap_dma_global_context;
154 71
155struct omap_dma_lch {
156 int next_lch;
157 int dev_id;
158 u16 saved_csr;
159 u16 enabled_irqs;
160 const char *dev_name;
161 void (*callback)(int lch, u16 ch_status, void *data);
162 void *data;
163
164#ifndef CONFIG_ARCH_OMAP1
165 /* required for Dynamic chaining */
166 int prev_linked_ch;
167 int next_linked_ch;
168 int state;
169 int chain_id;
170
171 int status;
172#endif
173 long flags;
174};
175
176struct dma_link_info { 72struct dma_link_info {
177 int *linked_dmach_q; 73 int *linked_dmach_q;
178 int no_of_lchs_linked; 74 int no_of_lchs_linked;
@@ -228,18 +124,6 @@ static int omap_dma_reserve_channels;
228 124
229static spinlock_t dma_chan_lock; 125static spinlock_t dma_chan_lock;
230static struct omap_dma_lch *dma_chan; 126static struct omap_dma_lch *dma_chan;
231static void __iomem *omap_dma_base;
232static u16 *reg_map;
233static u8 dma_stride;
234static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
235
236static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
237 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
238 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
239 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
240 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
241 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
242};
243 127
244static inline void disable_lnk(int lch); 128static inline void disable_lnk(int lch);
245static void omap_disable_channel_irq(int lch); 129static void omap_disable_channel_irq(int lch);
@@ -248,52 +132,9 @@ static inline void omap_enable_channel_irq(int lch);
248#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ 132#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
249 __func__); 133 __func__);
250 134
251static inline void dma_write(u32 val, int reg, int lch)
252{
253 u8 stride;
254 u32 offset;
255
256 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
257 offset = reg_map[reg] + (stride * lch);
258
259 if (dma_stride == 0x40) {
260 __raw_writew(val, omap_dma_base + offset);
261 if ((reg > CLNK_CTRL && reg < CCEN) ||
262 (reg > PCHD_ID && reg < CAPS_2)) {
263 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
264 __raw_writew(val >> 16, omap_dma_base + offset2);
265 }
266 } else {
267 __raw_writel(val, omap_dma_base + offset);
268 }
269}
270
271static inline u32 dma_read(int reg, int lch)
272{
273 u8 stride;
274 u32 offset, val;
275
276 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
277 offset = reg_map[reg] + (stride * lch);
278
279 if (dma_stride == 0x40) {
280 val = __raw_readw(omap_dma_base + offset);
281 if ((reg > CLNK_CTRL && reg < CCEN) ||
282 (reg > PCHD_ID && reg < CAPS_2)) {
283 u16 upper;
284 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
285 upper = __raw_readw(omap_dma_base + offset2);
286 val |= (upper << 16);
287 }
288 } else {
289 val = __raw_readl(omap_dma_base + offset);
290 }
291 return val;
292}
293
294#ifdef CONFIG_ARCH_OMAP15XX 135#ifdef CONFIG_ARCH_OMAP15XX
295/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ 136/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
296static int omap_dma_in_1510_mode(void) 137int omap_dma_in_1510_mode(void)
297{ 138{
298 return enable_1510_mode; 139 return enable_1510_mode;
299} 140}
@@ -325,15 +166,6 @@ static inline void set_gdma_dev(int req, int dev)
325#define set_gdma_dev(req, dev) do {} while (0) 166#define set_gdma_dev(req, dev) do {} while (0)
326#endif 167#endif
327 168
328/* Omap1 only */
329static void clear_lch_regs(int lch)
330{
331 int i = dma_common_ch_start;
332
333 for (; i <= dma_common_ch_end; i += 1)
334 dma_write(0, i, lch);
335}
336
337void omap_set_dma_priority(int lch, int dst_port, int priority) 169void omap_set_dma_priority(int lch, int dst_port, int priority)
338{ 170{
339 unsigned long reg; 171 unsigned long reg;
@@ -366,12 +198,12 @@ void omap_set_dma_priority(int lch, int dst_port, int priority)
366 if (cpu_class_is_omap2()) { 198 if (cpu_class_is_omap2()) {
367 u32 ccr; 199 u32 ccr;
368 200
369 ccr = dma_read(CCR, lch); 201 ccr = p->dma_read(CCR, lch);
370 if (priority) 202 if (priority)
371 ccr |= (1 << 6); 203 ccr |= (1 << 6);
372 else 204 else
373 ccr &= ~(1 << 6); 205 ccr &= ~(1 << 6);
374 dma_write(ccr, CCR, lch); 206 p->dma_write(ccr, CCR, lch);
375 } 207 }
376} 208}
377EXPORT_SYMBOL(omap_set_dma_priority); 209EXPORT_SYMBOL(omap_set_dma_priority);
@@ -382,31 +214,31 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
382{ 214{
383 u32 l; 215 u32 l;
384 216
385 l = dma_read(CSDP, lch); 217 l = p->dma_read(CSDP, lch);
386 l &= ~0x03; 218 l &= ~0x03;
387 l |= data_type; 219 l |= data_type;
388 dma_write(l, CSDP, lch); 220 p->dma_write(l, CSDP, lch);
389 221
390 if (cpu_class_is_omap1()) { 222 if (cpu_class_is_omap1()) {
391 u16 ccr; 223 u16 ccr;
392 224
393 ccr = dma_read(CCR, lch); 225 ccr = p->dma_read(CCR, lch);
394 ccr &= ~(1 << 5); 226 ccr &= ~(1 << 5);
395 if (sync_mode == OMAP_DMA_SYNC_FRAME) 227 if (sync_mode == OMAP_DMA_SYNC_FRAME)
396 ccr |= 1 << 5; 228 ccr |= 1 << 5;
397 dma_write(ccr, CCR, lch); 229 p->dma_write(ccr, CCR, lch);
398 230
399 ccr = dma_read(CCR2, lch); 231 ccr = p->dma_read(CCR2, lch);
400 ccr &= ~(1 << 2); 232 ccr &= ~(1 << 2);
401 if (sync_mode == OMAP_DMA_SYNC_BLOCK) 233 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
402 ccr |= 1 << 2; 234 ccr |= 1 << 2;
403 dma_write(ccr, CCR2, lch); 235 p->dma_write(ccr, CCR2, lch);
404 } 236 }
405 237
406 if (cpu_class_is_omap2() && dma_trigger) { 238 if (cpu_class_is_omap2() && dma_trigger) {
407 u32 val; 239 u32 val;
408 240
409 val = dma_read(CCR, lch); 241 val = p->dma_read(CCR, lch);
410 242
411 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ 243 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
412 val &= ~((1 << 23) | (3 << 19) | 0x1f); 244 val &= ~((1 << 23) | (3 << 19) | 0x1f);
@@ -431,11 +263,11 @@ void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
431 } else { 263 } else {
432 val &= ~(1 << 24); /* dest synch */ 264 val &= ~(1 << 24); /* dest synch */
433 } 265 }
434 dma_write(val, CCR, lch); 266 p->dma_write(val, CCR, lch);
435 } 267 }
436 268
437 dma_write(elem_count, CEN, lch); 269 p->dma_write(elem_count, CEN, lch);
438 dma_write(frame_count, CFN, lch); 270 p->dma_write(frame_count, CFN, lch);
439} 271}
440EXPORT_SYMBOL(omap_set_dma_transfer_params); 272EXPORT_SYMBOL(omap_set_dma_transfer_params);
441 273
@@ -446,7 +278,7 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
446 if (cpu_class_is_omap1()) { 278 if (cpu_class_is_omap1()) {
447 u16 w; 279 u16 w;
448 280
449 w = dma_read(CCR2, lch); 281 w = p->dma_read(CCR2, lch);
450 w &= ~0x03; 282 w &= ~0x03;
451 283
452 switch (mode) { 284 switch (mode) {
@@ -461,22 +293,22 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
461 default: 293 default:
462 BUG(); 294 BUG();
463 } 295 }
464 dma_write(w, CCR2, lch); 296 p->dma_write(w, CCR2, lch);
465 297
466 w = dma_read(LCH_CTRL, lch); 298 w = p->dma_read(LCH_CTRL, lch);
467 w &= ~0x0f; 299 w &= ~0x0f;
468 /* Default is channel type 2D */ 300 /* Default is channel type 2D */
469 if (mode) { 301 if (mode) {
470 dma_write(color, COLOR, lch); 302 p->dma_write(color, COLOR, lch);
471 w |= 1; /* Channel type G */ 303 w |= 1; /* Channel type G */
472 } 304 }
473 dma_write(w, LCH_CTRL, lch); 305 p->dma_write(w, LCH_CTRL, lch);
474 } 306 }
475 307
476 if (cpu_class_is_omap2()) { 308 if (cpu_class_is_omap2()) {
477 u32 val; 309 u32 val;
478 310
479 val = dma_read(CCR, lch); 311 val = p->dma_read(CCR, lch);
480 val &= ~((1 << 17) | (1 << 16)); 312 val &= ~((1 << 17) | (1 << 16));
481 313
482 switch (mode) { 314 switch (mode) {
@@ -491,10 +323,10 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
491 default: 323 default:
492 BUG(); 324 BUG();
493 } 325 }
494 dma_write(val, CCR, lch); 326 p->dma_write(val, CCR, lch);
495 327
496 color &= 0xffffff; 328 color &= 0xffffff;
497 dma_write(color, COLOR, lch); 329 p->dma_write(color, COLOR, lch);
498 } 330 }
499} 331}
500EXPORT_SYMBOL(omap_set_dma_color_mode); 332EXPORT_SYMBOL(omap_set_dma_color_mode);
@@ -504,10 +336,10 @@ void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
504 if (cpu_class_is_omap2()) { 336 if (cpu_class_is_omap2()) {
505 u32 csdp; 337 u32 csdp;
506 338
507 csdp = dma_read(CSDP, lch); 339 csdp = p->dma_read(CSDP, lch);
508 csdp &= ~(0x3 << 16); 340 csdp &= ~(0x3 << 16);
509 csdp |= (mode << 16); 341 csdp |= (mode << 16);
510 dma_write(csdp, CSDP, lch); 342 p->dma_write(csdp, CSDP, lch);
511 } 343 }
512} 344}
513EXPORT_SYMBOL(omap_set_dma_write_mode); 345EXPORT_SYMBOL(omap_set_dma_write_mode);
@@ -517,10 +349,10 @@ void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
517 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) { 349 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
518 u32 l; 350 u32 l;
519 351
520 l = dma_read(LCH_CTRL, lch); 352 l = p->dma_read(LCH_CTRL, lch);
521 l &= ~0x7; 353 l &= ~0x7;
522 l |= mode; 354 l |= mode;
523 dma_write(l, LCH_CTRL, lch); 355 p->dma_write(l, LCH_CTRL, lch);
524 } 356 }
525} 357}
526EXPORT_SYMBOL(omap_set_dma_channel_mode); 358EXPORT_SYMBOL(omap_set_dma_channel_mode);
@@ -535,21 +367,21 @@ void omap_set_dma_src_params(int lch, int src_port, int src_amode,
535 if (cpu_class_is_omap1()) { 367 if (cpu_class_is_omap1()) {
536 u16 w; 368 u16 w;
537 369
538 w = dma_read(CSDP, lch); 370 w = p->dma_read(CSDP, lch);
539 w &= ~(0x1f << 2); 371 w &= ~(0x1f << 2);
540 w |= src_port << 2; 372 w |= src_port << 2;
541 dma_write(w, CSDP, lch); 373 p->dma_write(w, CSDP, lch);
542 } 374 }
543 375
544 l = dma_read(CCR, lch); 376 l = p->dma_read(CCR, lch);
545 l &= ~(0x03 << 12); 377 l &= ~(0x03 << 12);
546 l |= src_amode << 12; 378 l |= src_amode << 12;
547 dma_write(l, CCR, lch); 379 p->dma_write(l, CCR, lch);
548 380
549 dma_write(src_start, CSSA, lch); 381 p->dma_write(src_start, CSSA, lch);
550 382
551 dma_write(src_ei, CSEI, lch); 383 p->dma_write(src_ei, CSEI, lch);
552 dma_write(src_fi, CSFI, lch); 384 p->dma_write(src_fi, CSFI, lch);
553} 385}
554EXPORT_SYMBOL(omap_set_dma_src_params); 386EXPORT_SYMBOL(omap_set_dma_src_params);
555 387
@@ -577,8 +409,8 @@ void omap_set_dma_src_index(int lch, int eidx, int fidx)
577 if (cpu_class_is_omap2()) 409 if (cpu_class_is_omap2())
578 return; 410 return;
579 411
580 dma_write(eidx, CSEI, lch); 412 p->dma_write(eidx, CSEI, lch);
581 dma_write(fidx, CSFI, lch); 413 p->dma_write(fidx, CSFI, lch);
582} 414}
583EXPORT_SYMBOL(omap_set_dma_src_index); 415EXPORT_SYMBOL(omap_set_dma_src_index);
584 416
@@ -586,11 +418,11 @@ void omap_set_dma_src_data_pack(int lch, int enable)
586{ 418{
587 u32 l; 419 u32 l;
588 420
589 l = dma_read(CSDP, lch); 421 l = p->dma_read(CSDP, lch);
590 l &= ~(1 << 6); 422 l &= ~(1 << 6);
591 if (enable) 423 if (enable)
592 l |= (1 << 6); 424 l |= (1 << 6);
593 dma_write(l, CSDP, lch); 425 p->dma_write(l, CSDP, lch);
594} 426}
595EXPORT_SYMBOL(omap_set_dma_src_data_pack); 427EXPORT_SYMBOL(omap_set_dma_src_data_pack);
596 428
@@ -599,7 +431,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
599 unsigned int burst = 0; 431 unsigned int burst = 0;
600 u32 l; 432 u32 l;
601 433
602 l = dma_read(CSDP, lch); 434 l = p->dma_read(CSDP, lch);
603 l &= ~(0x03 << 7); 435 l &= ~(0x03 << 7);
604 436
605 switch (burst_mode) { 437 switch (burst_mode) {
@@ -635,7 +467,7 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
635 } 467 }
636 468
637 l |= (burst << 7); 469 l |= (burst << 7);
638 dma_write(l, CSDP, lch); 470 p->dma_write(l, CSDP, lch);
639} 471}
640EXPORT_SYMBOL(omap_set_dma_src_burst_mode); 472EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
641 473
@@ -647,21 +479,21 @@ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
647 u32 l; 479 u32 l;
648 480
649 if (cpu_class_is_omap1()) { 481 if (cpu_class_is_omap1()) {
650 l = dma_read(CSDP, lch); 482 l = p->dma_read(CSDP, lch);
651 l &= ~(0x1f << 9); 483 l &= ~(0x1f << 9);
652 l |= dest_port << 9; 484 l |= dest_port << 9;
653 dma_write(l, CSDP, lch); 485 p->dma_write(l, CSDP, lch);
654 } 486 }
655 487
656 l = dma_read(CCR, lch); 488 l = p->dma_read(CCR, lch);
657 l &= ~(0x03 << 14); 489 l &= ~(0x03 << 14);
658 l |= dest_amode << 14; 490 l |= dest_amode << 14;
659 dma_write(l, CCR, lch); 491 p->dma_write(l, CCR, lch);
660 492
661 dma_write(dest_start, CDSA, lch); 493 p->dma_write(dest_start, CDSA, lch);
662 494
663 dma_write(dst_ei, CDEI, lch); 495 p->dma_write(dst_ei, CDEI, lch);
664 dma_write(dst_fi, CDFI, lch); 496 p->dma_write(dst_fi, CDFI, lch);
665} 497}
666EXPORT_SYMBOL(omap_set_dma_dest_params); 498EXPORT_SYMBOL(omap_set_dma_dest_params);
667 499
@@ -670,8 +502,8 @@ void omap_set_dma_dest_index(int lch, int eidx, int fidx)
670 if (cpu_class_is_omap2()) 502 if (cpu_class_is_omap2())
671 return; 503 return;
672 504
673 dma_write(eidx, CDEI, lch); 505 p->dma_write(eidx, CDEI, lch);
674 dma_write(fidx, CDFI, lch); 506 p->dma_write(fidx, CDFI, lch);
675} 507}
676EXPORT_SYMBOL(omap_set_dma_dest_index); 508EXPORT_SYMBOL(omap_set_dma_dest_index);
677 509
@@ -679,11 +511,11 @@ void omap_set_dma_dest_data_pack(int lch, int enable)
679{ 511{
680 u32 l; 512 u32 l;
681 513
682 l = dma_read(CSDP, lch); 514 l = p->dma_read(CSDP, lch);
683 l &= ~(1 << 13); 515 l &= ~(1 << 13);
684 if (enable) 516 if (enable)
685 l |= 1 << 13; 517 l |= 1 << 13;
686 dma_write(l, CSDP, lch); 518 p->dma_write(l, CSDP, lch);
687} 519}
688EXPORT_SYMBOL(omap_set_dma_dest_data_pack); 520EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
689 521
@@ -692,7 +524,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
692 unsigned int burst = 0; 524 unsigned int burst = 0;
693 u32 l; 525 u32 l;
694 526
695 l = dma_read(CSDP, lch); 527 l = p->dma_read(CSDP, lch);
696 l &= ~(0x03 << 14); 528 l &= ~(0x03 << 14);
697 529
698 switch (burst_mode) { 530 switch (burst_mode) {
@@ -725,7 +557,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
725 return; 557 return;
726 } 558 }
727 l |= (burst << 14); 559 l |= (burst << 14);
728 dma_write(l, CSDP, lch); 560 p->dma_write(l, CSDP, lch);
729} 561}
730EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); 562EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
731 563
@@ -735,18 +567,18 @@ static inline void omap_enable_channel_irq(int lch)
735 567
736 /* Clear CSR */ 568 /* Clear CSR */
737 if (cpu_class_is_omap1()) 569 if (cpu_class_is_omap1())
738 status = dma_read(CSR, lch); 570 status = p->dma_read(CSR, lch);
739 else if (cpu_class_is_omap2()) 571 else if (cpu_class_is_omap2())
740 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); 572 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
741 573
742 /* Enable some nice interrupts. */ 574 /* Enable some nice interrupts. */
743 dma_write(dma_chan[lch].enabled_irqs, CICR, lch); 575 p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
744} 576}
745 577
746static void omap_disable_channel_irq(int lch) 578static void omap_disable_channel_irq(int lch)
747{ 579{
748 if (cpu_class_is_omap2()) 580 if (cpu_class_is_omap2())
749 dma_write(0, CICR, lch); 581 p->dma_write(0, CICR, lch);
750} 582}
751 583
752void omap_enable_dma_irq(int lch, u16 bits) 584void omap_enable_dma_irq(int lch, u16 bits)
@@ -765,7 +597,7 @@ static inline void enable_lnk(int lch)
765{ 597{
766 u32 l; 598 u32 l;
767 599
768 l = dma_read(CLNK_CTRL, lch); 600 l = p->dma_read(CLNK_CTRL, lch);
769 601
770 if (cpu_class_is_omap1()) 602 if (cpu_class_is_omap1())
771 l &= ~(1 << 14); 603 l &= ~(1 << 14);
@@ -780,18 +612,18 @@ static inline void enable_lnk(int lch)
780 l = dma_chan[lch].next_linked_ch | (1 << 15); 612 l = dma_chan[lch].next_linked_ch | (1 << 15);
781#endif 613#endif
782 614
783 dma_write(l, CLNK_CTRL, lch); 615 p->dma_write(l, CLNK_CTRL, lch);
784} 616}
785 617
786static inline void disable_lnk(int lch) 618static inline void disable_lnk(int lch)
787{ 619{
788 u32 l; 620 u32 l;
789 621
790 l = dma_read(CLNK_CTRL, lch); 622 l = p->dma_read(CLNK_CTRL, lch);
791 623
792 /* Disable interrupts */ 624 /* Disable interrupts */
793 if (cpu_class_is_omap1()) { 625 if (cpu_class_is_omap1()) {
794 dma_write(0, CICR, lch); 626 p->dma_write(0, CICR, lch);
795 /* Set the STOP_LNK bit */ 627 /* Set the STOP_LNK bit */
796 l |= 1 << 14; 628 l |= 1 << 14;
797 } 629 }
@@ -802,7 +634,7 @@ static inline void disable_lnk(int lch)
802 l &= ~(1 << 15); 634 l &= ~(1 << 15);
803 } 635 }
804 636
805 dma_write(l, CLNK_CTRL, lch); 637 p->dma_write(l, CLNK_CTRL, lch);
806 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; 638 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
807} 639}
808 640
@@ -815,9 +647,9 @@ static inline void omap2_enable_irq_lch(int lch)
815 return; 647 return;
816 648
817 spin_lock_irqsave(&dma_chan_lock, flags); 649 spin_lock_irqsave(&dma_chan_lock, flags);
818 val = dma_read(IRQENABLE_L0, lch); 650 val = p->dma_read(IRQENABLE_L0, lch);
819 val |= 1 << lch; 651 val |= 1 << lch;
820 dma_write(val, IRQENABLE_L0, lch); 652 p->dma_write(val, IRQENABLE_L0, lch);
821 spin_unlock_irqrestore(&dma_chan_lock, flags); 653 spin_unlock_irqrestore(&dma_chan_lock, flags);
822} 654}
823 655
@@ -830,9 +662,9 @@ static inline void omap2_disable_irq_lch(int lch)
830 return; 662 return;
831 663
832 spin_lock_irqsave(&dma_chan_lock, flags); 664 spin_lock_irqsave(&dma_chan_lock, flags);
833 val = dma_read(IRQENABLE_L0, lch); 665 val = p->dma_read(IRQENABLE_L0, lch);
834 val &= ~(1 << lch); 666 val &= ~(1 << lch);
835 dma_write(val, IRQENABLE_L0, lch); 667 p->dma_write(val, IRQENABLE_L0, lch);
836 spin_unlock_irqrestore(&dma_chan_lock, flags); 668 spin_unlock_irqrestore(&dma_chan_lock, flags);
837} 669}
838 670
@@ -859,8 +691,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
859 chan = dma_chan + free_ch; 691 chan = dma_chan + free_ch;
860 chan->dev_id = dev_id; 692 chan->dev_id = dev_id;
861 693
862 if (cpu_class_is_omap1()) 694 if (p->clear_lch_regs)
863 clear_lch_regs(free_ch); 695 p->clear_lch_regs(free_ch);
864 696
865 if (cpu_class_is_omap2()) 697 if (cpu_class_is_omap2())
866 omap_clear_dma(free_ch); 698 omap_clear_dma(free_ch);
@@ -897,17 +729,17 @@ int omap_request_dma(int dev_id, const char *dev_name,
897 * Disable the 1510 compatibility mode and set the sync device 729 * Disable the 1510 compatibility mode and set the sync device
898 * id. 730 * id.
899 */ 731 */
900 dma_write(dev_id | (1 << 10), CCR, free_ch); 732 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
901 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { 733 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
902 dma_write(dev_id, CCR, free_ch); 734 p->dma_write(dev_id, CCR, free_ch);
903 } 735 }
904 736
905 if (cpu_class_is_omap2()) { 737 if (cpu_class_is_omap2()) {
906 omap2_enable_irq_lch(free_ch); 738 omap2_enable_irq_lch(free_ch);
907 omap_enable_channel_irq(free_ch); 739 omap_enable_channel_irq(free_ch);
908 /* Clear the CSR register and IRQ status register */ 740 /* Clear the CSR register and IRQ status register */
909 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch); 741 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
910 dma_write(1 << free_ch, IRQSTATUS_L0, 0); 742 p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
911 } 743 }
912 744
913 *dma_ch_out = free_ch; 745 *dma_ch_out = free_ch;
@@ -928,23 +760,23 @@ void omap_free_dma(int lch)
928 760
929 if (cpu_class_is_omap1()) { 761 if (cpu_class_is_omap1()) {
930 /* Disable all DMA interrupts for the channel. */ 762 /* Disable all DMA interrupts for the channel. */
931 dma_write(0, CICR, lch); 763 p->dma_write(0, CICR, lch);
932 /* Make sure the DMA transfer is stopped. */ 764 /* Make sure the DMA transfer is stopped. */
933 dma_write(0, CCR, lch); 765 p->dma_write(0, CCR, lch);
934 } 766 }
935 767
936 if (cpu_class_is_omap2()) { 768 if (cpu_class_is_omap2()) {
937 omap2_disable_irq_lch(lch); 769 omap2_disable_irq_lch(lch);
938 770
939 /* Clear the CSR register and IRQ status register */ 771 /* Clear the CSR register and IRQ status register */
940 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); 772 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
941 dma_write(1 << lch, IRQSTATUS_L0, lch); 773 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
942 774
943 /* Disable all DMA interrupts for the channel. */ 775 /* Disable all DMA interrupts for the channel. */
944 dma_write(0, CICR, lch); 776 p->dma_write(0, CICR, lch);
945 777
946 /* Make sure the DMA transfer is stopped. */ 778 /* Make sure the DMA transfer is stopped. */
947 dma_write(0, CCR, lch); 779 p->dma_write(0, CCR, lch);
948 omap_clear_dma(lch); 780 omap_clear_dma(lch);
949 } 781 }
950 782
@@ -985,7 +817,7 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
985 reg |= (0x3 & tparams) << 12; 817 reg |= (0x3 & tparams) << 12;
986 reg |= (arb_rate & 0xff) << 16; 818 reg |= (arb_rate & 0xff) << 16;
987 819
988 dma_write(reg, GCR, 0); 820 p->dma_write(reg, GCR, 0);
989} 821}
990EXPORT_SYMBOL(omap_dma_set_global_params); 822EXPORT_SYMBOL(omap_dma_set_global_params);
991 823
@@ -1008,14 +840,14 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
1008 printk(KERN_ERR "Invalid channel id\n"); 840 printk(KERN_ERR "Invalid channel id\n");
1009 return -EINVAL; 841 return -EINVAL;
1010 } 842 }
1011 l = dma_read(CCR, lch); 843 l = p->dma_read(CCR, lch);
1012 l &= ~((1 << 6) | (1 << 26)); 844 l &= ~((1 << 6) | (1 << 26));
1013 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 845 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
1014 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); 846 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
1015 else 847 else
1016 l |= ((read_prio & 0x1) << 6); 848 l |= ((read_prio & 0x1) << 6);
1017 849
1018 dma_write(l, CCR, lch); 850 p->dma_write(l, CCR, lch);
1019 851
1020 return 0; 852 return 0;
1021} 853}
@@ -1030,24 +862,7 @@ void omap_clear_dma(int lch)
1030 unsigned long flags; 862 unsigned long flags;
1031 863
1032 local_irq_save(flags); 864 local_irq_save(flags);
1033 865 p->clear_dma(lch);
1034 if (cpu_class_is_omap1()) {
1035 u32 l;
1036
1037 l = dma_read(CCR, lch);
1038 l &= ~OMAP_DMA_CCR_EN;
1039 dma_write(l, CCR, lch);
1040
1041 /* Clear pending interrupts */
1042 l = dma_read(CSR, lch);
1043 }
1044
1045 if (cpu_class_is_omap2()) {
1046 int i = dma_common_ch_start;
1047 for (; i <= dma_common_ch_end; i += 1)
1048 dma_write(0, i, lch);
1049 }
1050
1051 local_irq_restore(flags); 866 local_irq_restore(flags);
1052} 867}
1053EXPORT_SYMBOL(omap_clear_dma); 868EXPORT_SYMBOL(omap_clear_dma);
@@ -1061,13 +876,13 @@ void omap_start_dma(int lch)
1061 * before starting dma transfer. 876 * before starting dma transfer.
1062 */ 877 */
1063 if (cpu_is_omap15xx()) 878 if (cpu_is_omap15xx())
1064 dma_write(0, CPC, lch); 879 p->dma_write(0, CPC, lch);
1065 else 880 else
1066 dma_write(0, CDAC, lch); 881 p->dma_write(0, CDAC, lch);
1067 882
1068 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 883 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1069 int next_lch, cur_lch; 884 int next_lch, cur_lch;
1070 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 885 char dma_chan_link_map[dma_lch_count];
1071 886
1072 dma_chan_link_map[lch] = 1; 887 dma_chan_link_map[lch] = 1;
1073 /* Set the link register of the first channel */ 888 /* Set the link register of the first channel */
@@ -1090,17 +905,17 @@ void omap_start_dma(int lch)
1090 cur_lch = next_lch; 905 cur_lch = next_lch;
1091 } while (next_lch != -1); 906 } while (next_lch != -1);
1092 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS)) 907 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
1093 dma_write(lch, CLNK_CTRL, lch); 908 p->dma_write(lch, CLNK_CTRL, lch);
1094 909
1095 omap_enable_channel_irq(lch); 910 omap_enable_channel_irq(lch);
1096 911
1097 l = dma_read(CCR, lch); 912 l = p->dma_read(CCR, lch);
1098 913
1099 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING)) 914 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
1100 l |= OMAP_DMA_CCR_BUFFERING_DISABLE; 915 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1101 l |= OMAP_DMA_CCR_EN; 916 l |= OMAP_DMA_CCR_EN;
1102 917
1103 dma_write(l, CCR, lch); 918 p->dma_write(l, CCR, lch);
1104 919
1105 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 920 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1106} 921}
@@ -1112,46 +927,46 @@ void omap_stop_dma(int lch)
1112 927
1113 /* Disable all interrupts on the channel */ 928 /* Disable all interrupts on the channel */
1114 if (cpu_class_is_omap1()) 929 if (cpu_class_is_omap1())
1115 dma_write(0, CICR, lch); 930 p->dma_write(0, CICR, lch);
1116 931
1117 l = dma_read(CCR, lch); 932 l = p->dma_read(CCR, lch);
1118 if (IS_DMA_ERRATA(DMA_ERRATA_i541) && 933 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1119 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { 934 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1120 int i = 0; 935 int i = 0;
1121 u32 sys_cf; 936 u32 sys_cf;
1122 937
1123 /* Configure No-Standby */ 938 /* Configure No-Standby */
1124 l = dma_read(OCP_SYSCONFIG, lch); 939 l = p->dma_read(OCP_SYSCONFIG, lch);
1125 sys_cf = l; 940 sys_cf = l;
1126 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; 941 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1127 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); 942 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1128 dma_write(l , OCP_SYSCONFIG, 0); 943 p->dma_write(l , OCP_SYSCONFIG, 0);
1129 944
1130 l = dma_read(CCR, lch); 945 l = p->dma_read(CCR, lch);
1131 l &= ~OMAP_DMA_CCR_EN; 946 l &= ~OMAP_DMA_CCR_EN;
1132 dma_write(l, CCR, lch); 947 p->dma_write(l, CCR, lch);
1133 948
1134 /* Wait for sDMA FIFO drain */ 949 /* Wait for sDMA FIFO drain */
1135 l = dma_read(CCR, lch); 950 l = p->dma_read(CCR, lch);
1136 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | 951 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1137 OMAP_DMA_CCR_WR_ACTIVE))) { 952 OMAP_DMA_CCR_WR_ACTIVE))) {
1138 udelay(5); 953 udelay(5);
1139 i++; 954 i++;
1140 l = dma_read(CCR, lch); 955 l = p->dma_read(CCR, lch);
1141 } 956 }
1142 if (i >= 100) 957 if (i >= 100)
1143 printk(KERN_ERR "DMA drain did not complete on " 958 printk(KERN_ERR "DMA drain did not complete on "
1144 "lch %d\n", lch); 959 "lch %d\n", lch);
1145 /* Restore OCP_SYSCONFIG */ 960 /* Restore OCP_SYSCONFIG */
1146 dma_write(sys_cf, OCP_SYSCONFIG, lch); 961 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
1147 } else { 962 } else {
1148 l &= ~OMAP_DMA_CCR_EN; 963 l &= ~OMAP_DMA_CCR_EN;
1149 dma_write(l, CCR, lch); 964 p->dma_write(l, CCR, lch);
1150 } 965 }
1151 966
1152 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 967 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1153 int next_lch, cur_lch = lch; 968 int next_lch, cur_lch = lch;
1154 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; 969 char dma_chan_link_map[dma_lch_count];
1155 970
1156 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); 971 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1157 do { 972 do {
@@ -1212,15 +1027,15 @@ dma_addr_t omap_get_dma_src_pos(int lch)
1212 dma_addr_t offset = 0; 1027 dma_addr_t offset = 0;
1213 1028
1214 if (cpu_is_omap15xx()) 1029 if (cpu_is_omap15xx())
1215 offset = dma_read(CPC, lch); 1030 offset = p->dma_read(CPC, lch);
1216 else 1031 else
1217 offset = dma_read(CSAC, lch); 1032 offset = p->dma_read(CSAC, lch);
1218 1033
1219 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0) 1034 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1220 offset = dma_read(CSAC, lch); 1035 offset = p->dma_read(CSAC, lch);
1221 1036
1222 if (cpu_class_is_omap1()) 1037 if (cpu_class_is_omap1())
1223 offset |= (dma_read(CSSA, lch) & 0xFFFF0000); 1038 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1224 1039
1225 return offset; 1040 return offset;
1226} 1041}
@@ -1239,19 +1054,19 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
1239 dma_addr_t offset = 0; 1054 dma_addr_t offset = 0;
1240 1055
1241 if (cpu_is_omap15xx()) 1056 if (cpu_is_omap15xx())
1242 offset = dma_read(CPC, lch); 1057 offset = p->dma_read(CPC, lch);
1243 else 1058 else
1244 offset = dma_read(CDAC, lch); 1059 offset = p->dma_read(CDAC, lch);
1245 1060
1246 /* 1061 /*
1247 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 1062 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1248 * read before the DMA controller finished disabling the channel. 1063 * read before the DMA controller finished disabling the channel.
1249 */ 1064 */
1250 if (!cpu_is_omap15xx() && offset == 0) 1065 if (!cpu_is_omap15xx() && offset == 0)
1251 offset = dma_read(CDAC, lch); 1066 offset = p->dma_read(CDAC, lch);
1252 1067
1253 if (cpu_class_is_omap1()) 1068 if (cpu_class_is_omap1())
1254 offset |= (dma_read(CDSA, lch) & 0xFFFF0000); 1069 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1255 1070
1256 return offset; 1071 return offset;
1257} 1072}
@@ -1259,7 +1074,7 @@ EXPORT_SYMBOL(omap_get_dma_dst_pos);
1259 1074
1260int omap_get_dma_active_status(int lch) 1075int omap_get_dma_active_status(int lch)
1261{ 1076{
1262 return (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0; 1077 return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1263} 1078}
1264EXPORT_SYMBOL(omap_get_dma_active_status); 1079EXPORT_SYMBOL(omap_get_dma_active_status);
1265 1080
@@ -1272,7 +1087,7 @@ int omap_dma_running(void)
1272 return 1; 1087 return 1;
1273 1088
1274 for (lch = 0; lch < dma_chan_count; lch++) 1089 for (lch = 0; lch < dma_chan_count; lch++)
1275 if (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) 1090 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1276 return 1; 1091 return 1;
1277 1092
1278 return 0; 1093 return 0;
@@ -1287,7 +1102,7 @@ void omap_dma_link_lch(int lch_head, int lch_queue)
1287{ 1102{
1288 if (omap_dma_in_1510_mode()) { 1103 if (omap_dma_in_1510_mode()) {
1289 if (lch_head == lch_queue) { 1104 if (lch_head == lch_queue) {
1290 dma_write(dma_read(CCR, lch_head) | (3 << 8), 1105 p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1291 CCR, lch_head); 1106 CCR, lch_head);
1292 return; 1107 return;
1293 } 1108 }
@@ -1314,7 +1129,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1314{ 1129{
1315 if (omap_dma_in_1510_mode()) { 1130 if (omap_dma_in_1510_mode()) {
1316 if (lch_head == lch_queue) { 1131 if (lch_head == lch_queue) {
1317 dma_write(dma_read(CCR, lch_head) & ~(3 << 8), 1132 p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1318 CCR, lch_head); 1133 CCR, lch_head);
1319 return; 1134 return;
1320 } 1135 }
@@ -1341,8 +1156,6 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1341} 1156}
1342EXPORT_SYMBOL(omap_dma_unlink_lch); 1157EXPORT_SYMBOL(omap_dma_unlink_lch);
1343 1158
1344/*----------------------------------------------------------------------------*/
1345
1346#ifndef CONFIG_ARCH_OMAP1 1159#ifndef CONFIG_ARCH_OMAP1
1347/* Create chain of DMA channesls */ 1160/* Create chain of DMA channesls */
1348static void create_dma_lch_chain(int lch_head, int lch_queue) 1161static void create_dma_lch_chain(int lch_head, int lch_queue)
@@ -1367,15 +1180,15 @@ static void create_dma_lch_chain(int lch_head, int lch_queue)
1367 lch_queue; 1180 lch_queue;
1368 } 1181 }
1369 1182
1370 l = dma_read(CLNK_CTRL, lch_head); 1183 l = p->dma_read(CLNK_CTRL, lch_head);
1371 l &= ~(0x1f); 1184 l &= ~(0x1f);
1372 l |= lch_queue; 1185 l |= lch_queue;
1373 dma_write(l, CLNK_CTRL, lch_head); 1186 p->dma_write(l, CLNK_CTRL, lch_head);
1374 1187
1375 l = dma_read(CLNK_CTRL, lch_queue); 1188 l = p->dma_read(CLNK_CTRL, lch_queue);
1376 l &= ~(0x1f); 1189 l &= ~(0x1f);
1377 l |= (dma_chan[lch_queue].next_linked_ch); 1190 l |= (dma_chan[lch_queue].next_linked_ch);
1378 dma_write(l, CLNK_CTRL, lch_queue); 1191 p->dma_write(l, CLNK_CTRL, lch_queue);
1379} 1192}
1380 1193
1381/** 1194/**
@@ -1651,13 +1464,13 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1651 1464
1652 /* Set the params to the free channel */ 1465 /* Set the params to the free channel */
1653 if (src_start != 0) 1466 if (src_start != 0)
1654 dma_write(src_start, CSSA, lch); 1467 p->dma_write(src_start, CSSA, lch);
1655 if (dest_start != 0) 1468 if (dest_start != 0)
1656 dma_write(dest_start, CDSA, lch); 1469 p->dma_write(dest_start, CDSA, lch);
1657 1470
1658 /* Write the buffer size */ 1471 /* Write the buffer size */
1659 dma_write(elem_count, CEN, lch); 1472 p->dma_write(elem_count, CEN, lch);
1660 dma_write(frame_count, CFN, lch); 1473 p->dma_write(frame_count, CFN, lch);
1661 1474
1662 /* 1475 /*
1663 * If the chain is dynamically linked, 1476 * If the chain is dynamically linked,
@@ -1690,7 +1503,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1690 enable_lnk(dma_chan[lch].prev_linked_ch); 1503 enable_lnk(dma_chan[lch].prev_linked_ch);
1691 dma_chan[lch].state = DMA_CH_QUEUED; 1504 dma_chan[lch].state = DMA_CH_QUEUED;
1692 start_dma = 0; 1505 start_dma = 0;
1693 if (0 == ((1 << 7) & dma_read( 1506 if (0 == ((1 << 7) & p->dma_read(
1694 CCR, dma_chan[lch].prev_linked_ch))) { 1507 CCR, dma_chan[lch].prev_linked_ch))) {
1695 disable_lnk(dma_chan[lch]. 1508 disable_lnk(dma_chan[lch].
1696 prev_linked_ch); 1509 prev_linked_ch);
@@ -1707,7 +1520,7 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1707 } 1520 }
1708 omap_enable_channel_irq(lch); 1521 omap_enable_channel_irq(lch);
1709 1522
1710 l = dma_read(CCR, lch); 1523 l = p->dma_read(CCR, lch);
1711 1524
1712 if ((0 == (l & (1 << 24)))) 1525 if ((0 == (l & (1 << 24))))
1713 l &= ~(1 << 25); 1526 l &= ~(1 << 25);
@@ -1718,12 +1531,12 @@ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1718 l |= (1 << 7); 1531 l |= (1 << 7);
1719 dma_chan[lch].state = DMA_CH_STARTED; 1532 dma_chan[lch].state = DMA_CH_STARTED;
1720 pr_debug("starting %d\n", lch); 1533 pr_debug("starting %d\n", lch);
1721 dma_write(l, CCR, lch); 1534 p->dma_write(l, CCR, lch);
1722 } else 1535 } else
1723 start_dma = 0; 1536 start_dma = 0;
1724 } else { 1537 } else {
1725 if (0 == (l & (1 << 7))) 1538 if (0 == (l & (1 << 7)))
1726 dma_write(l, CCR, lch); 1539 p->dma_write(l, CCR, lch);
1727 } 1540 }
1728 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 1541 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1729 } 1542 }
@@ -1768,7 +1581,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1768 omap_enable_channel_irq(channels[0]); 1581 omap_enable_channel_irq(channels[0]);
1769 } 1582 }
1770 1583
1771 l = dma_read(CCR, channels[0]); 1584 l = p->dma_read(CCR, channels[0]);
1772 l |= (1 << 7); 1585 l |= (1 << 7);
1773 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; 1586 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1774 dma_chan[channels[0]].state = DMA_CH_STARTED; 1587 dma_chan[channels[0]].state = DMA_CH_STARTED;
@@ -1777,7 +1590,7 @@ int omap_start_dma_chain_transfers(int chain_id)
1777 l &= ~(1 << 25); 1590 l &= ~(1 << 25);
1778 else 1591 else
1779 l |= (1 << 25); 1592 l |= (1 << 25);
1780 dma_write(l, CCR, channels[0]); 1593 p->dma_write(l, CCR, channels[0]);
1781 1594
1782 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; 1595 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1783 1596
@@ -1813,19 +1626,19 @@ int omap_stop_dma_chain_transfers(int chain_id)
1813 channels = dma_linked_lch[chain_id].linked_dmach_q; 1626 channels = dma_linked_lch[chain_id].linked_dmach_q;
1814 1627
1815 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) { 1628 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1816 sys_cf = dma_read(OCP_SYSCONFIG, 0); 1629 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1817 l = sys_cf; 1630 l = sys_cf;
1818 /* Middle mode reg set no Standby */ 1631 /* Middle mode reg set no Standby */
1819 l &= ~((1 << 12)|(1 << 13)); 1632 l &= ~((1 << 12)|(1 << 13));
1820 dma_write(l, OCP_SYSCONFIG, 0); 1633 p->dma_write(l, OCP_SYSCONFIG, 0);
1821 } 1634 }
1822 1635
1823 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { 1636 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1824 1637
1825 /* Stop the Channel transmission */ 1638 /* Stop the Channel transmission */
1826 l = dma_read(CCR, channels[i]); 1639 l = p->dma_read(CCR, channels[i]);
1827 l &= ~(1 << 7); 1640 l &= ~(1 << 7);
1828 dma_write(l, CCR, channels[i]); 1641 p->dma_write(l, CCR, channels[i]);
1829 1642
1830 /* Disable the link in all the channels */ 1643 /* Disable the link in all the channels */
1831 disable_lnk(channels[i]); 1644 disable_lnk(channels[i]);
@@ -1838,7 +1651,7 @@ int omap_stop_dma_chain_transfers(int chain_id)
1838 OMAP_DMA_CHAIN_QINIT(chain_id); 1651 OMAP_DMA_CHAIN_QINIT(chain_id);
1839 1652
1840 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) 1653 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1841 dma_write(sys_cf, OCP_SYSCONFIG, 0); 1654 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1842 1655
1843 return 0; 1656 return 0;
1844} 1657}
@@ -1880,8 +1693,8 @@ int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1880 /* Get the current channel */ 1693 /* Get the current channel */
1881 lch = channels[dma_linked_lch[chain_id].q_head]; 1694 lch = channels[dma_linked_lch[chain_id].q_head];
1882 1695
1883 *ei = dma_read(CCEN, lch); 1696 *ei = p->dma_read(CCEN, lch);
1884 *fi = dma_read(CCFN, lch); 1697 *fi = p->dma_read(CCFN, lch);
1885 1698
1886 return 0; 1699 return 0;
1887} 1700}
@@ -1918,7 +1731,7 @@ int omap_get_dma_chain_dst_pos(int chain_id)
1918 /* Get the current channel */ 1731 /* Get the current channel */
1919 lch = channels[dma_linked_lch[chain_id].q_head]; 1732 lch = channels[dma_linked_lch[chain_id].q_head];
1920 1733
1921 return dma_read(CDAC, lch); 1734 return p->dma_read(CDAC, lch);
1922} 1735}
1923EXPORT_SYMBOL(omap_get_dma_chain_dst_pos); 1736EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1924 1737
@@ -1952,7 +1765,7 @@ int omap_get_dma_chain_src_pos(int chain_id)
1952 /* Get the current channel */ 1765 /* Get the current channel */
1953 lch = channels[dma_linked_lch[chain_id].q_head]; 1766 lch = channels[dma_linked_lch[chain_id].q_head];
1954 1767
1955 return dma_read(CSAC, lch); 1768 return p->dma_read(CSAC, lch);
1956} 1769}
1957EXPORT_SYMBOL(omap_get_dma_chain_src_pos); 1770EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1958#endif /* ifndef CONFIG_ARCH_OMAP1 */ 1771#endif /* ifndef CONFIG_ARCH_OMAP1 */
@@ -1969,7 +1782,7 @@ static int omap1_dma_handle_ch(int ch)
1969 csr = dma_chan[ch].saved_csr; 1782 csr = dma_chan[ch].saved_csr;
1970 dma_chan[ch].saved_csr = 0; 1783 dma_chan[ch].saved_csr = 0;
1971 } else 1784 } else
1972 csr = dma_read(CSR, ch); 1785 csr = p->dma_read(CSR, ch);
1973 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { 1786 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1974 dma_chan[ch + 6].saved_csr = csr >> 7; 1787 dma_chan[ch + 6].saved_csr = csr >> 7;
1975 csr &= 0x7f; 1788 csr &= 0x7f;
@@ -2022,13 +1835,13 @@ static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
2022 1835
2023static int omap2_dma_handle_ch(int ch) 1836static int omap2_dma_handle_ch(int ch)
2024{ 1837{
2025 u32 status = dma_read(CSR, ch); 1838 u32 status = p->dma_read(CSR, ch);
2026 1839
2027 if (!status) { 1840 if (!status) {
2028 if (printk_ratelimit()) 1841 if (printk_ratelimit())
2029 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", 1842 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
2030 ch); 1843 ch);
2031 dma_write(1 << ch, IRQSTATUS_L0, ch); 1844 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
2032 return 0; 1845 return 0;
2033 } 1846 }
2034 if (unlikely(dma_chan[ch].dev_id == -1)) { 1847 if (unlikely(dma_chan[ch].dev_id == -1)) {
@@ -2047,9 +1860,9 @@ static int omap2_dma_handle_ch(int ch)
2047 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) { 1860 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
2048 u32 ccr; 1861 u32 ccr;
2049 1862
2050 ccr = dma_read(CCR, ch); 1863 ccr = p->dma_read(CCR, ch);
2051 ccr &= ~OMAP_DMA_CCR_EN; 1864 ccr &= ~OMAP_DMA_CCR_EN;
2052 dma_write(ccr, CCR, ch); 1865 p->dma_write(ccr, CCR, ch);
2053 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; 1866 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2054 } 1867 }
2055 } 1868 }
@@ -2060,16 +1873,16 @@ static int omap2_dma_handle_ch(int ch)
2060 printk(KERN_INFO "DMA misaligned error with device %d\n", 1873 printk(KERN_INFO "DMA misaligned error with device %d\n",
2061 dma_chan[ch].dev_id); 1874 dma_chan[ch].dev_id);
2062 1875
2063 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch); 1876 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
2064 dma_write(1 << ch, IRQSTATUS_L0, ch); 1877 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
2065 /* read back the register to flush the write */ 1878 /* read back the register to flush the write */
2066 dma_read(IRQSTATUS_L0, ch); 1879 p->dma_read(IRQSTATUS_L0, ch);
2067 1880
2068 /* If the ch is not chained then chain_id will be -1 */ 1881 /* If the ch is not chained then chain_id will be -1 */
2069 if (dma_chan[ch].chain_id != -1) { 1882 if (dma_chan[ch].chain_id != -1) {
2070 int chain_id = dma_chan[ch].chain_id; 1883 int chain_id = dma_chan[ch].chain_id;
2071 dma_chan[ch].state = DMA_CH_NOTSTARTED; 1884 dma_chan[ch].state = DMA_CH_NOTSTARTED;
2072 if (dma_read(CLNK_CTRL, ch) & (1 << 15)) 1885 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
2073 dma_chan[dma_chan[ch].next_linked_ch].state = 1886 dma_chan[dma_chan[ch].next_linked_ch].state =
2074 DMA_CH_STARTED; 1887 DMA_CH_STARTED;
2075 if (dma_linked_lch[chain_id].chain_mode == 1888 if (dma_linked_lch[chain_id].chain_mode ==
@@ -2079,10 +1892,10 @@ static int omap2_dma_handle_ch(int ch)
2079 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id)) 1892 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2080 OMAP_DMA_CHAIN_INCQHEAD(chain_id); 1893 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2081 1894
2082 status = dma_read(CSR, ch); 1895 status = p->dma_read(CSR, ch);
2083 } 1896 }
2084 1897
2085 dma_write(status, CSR, ch); 1898 p->dma_write(status, CSR, ch);
2086 1899
2087 if (likely(dma_chan[ch].callback != NULL)) 1900 if (likely(dma_chan[ch].callback != NULL))
2088 dma_chan[ch].callback(ch, status, dma_chan[ch].data); 1901 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
@@ -2096,13 +1909,13 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2096 u32 val, enable_reg; 1909 u32 val, enable_reg;
2097 int i; 1910 int i;
2098 1911
2099 val = dma_read(IRQSTATUS_L0, 0); 1912 val = p->dma_read(IRQSTATUS_L0, 0);
2100 if (val == 0) { 1913 if (val == 0) {
2101 if (printk_ratelimit()) 1914 if (printk_ratelimit())
2102 printk(KERN_WARNING "Spurious DMA IRQ\n"); 1915 printk(KERN_WARNING "Spurious DMA IRQ\n");
2103 return IRQ_HANDLED; 1916 return IRQ_HANDLED;
2104 } 1917 }
2105 enable_reg = dma_read(IRQENABLE_L0, 0); 1918 enable_reg = p->dma_read(IRQENABLE_L0, 0);
2106 val &= enable_reg; /* Dispatch only relevant interrupts */ 1919 val &= enable_reg; /* Dispatch only relevant interrupts */
2107 for (i = 0; i < dma_lch_count && val != 0; i++) { 1920 for (i = 0; i < dma_lch_count && val != 0; i++) {
2108 if (val & 1) 1921 if (val & 1)
@@ -2128,206 +1941,66 @@ static struct irqaction omap24xx_dma_irq;
2128void omap_dma_global_context_save(void) 1941void omap_dma_global_context_save(void)
2129{ 1942{
2130 omap_dma_global_context.dma_irqenable_l0 = 1943 omap_dma_global_context.dma_irqenable_l0 =
2131 dma_read(IRQENABLE_L0, 0); 1944 p->dma_read(IRQENABLE_L0, 0);
2132 omap_dma_global_context.dma_ocp_sysconfig = 1945 omap_dma_global_context.dma_ocp_sysconfig =
2133 dma_read(OCP_SYSCONFIG, 0); 1946 p->dma_read(OCP_SYSCONFIG, 0);
2134 omap_dma_global_context.dma_gcr = dma_read(GCR, 0); 1947 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
2135} 1948}
2136 1949
2137void omap_dma_global_context_restore(void) 1950void omap_dma_global_context_restore(void)
2138{ 1951{
2139 int ch; 1952 int ch;
2140 1953
2141 dma_write(omap_dma_global_context.dma_gcr, GCR, 0); 1954 p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2142 dma_write(omap_dma_global_context.dma_ocp_sysconfig, 1955 p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2143 OCP_SYSCONFIG, 0); 1956 OCP_SYSCONFIG, 0);
2144 dma_write(omap_dma_global_context.dma_irqenable_l0, 1957 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
2145 IRQENABLE_L0, 0); 1958 IRQENABLE_L0, 0);
2146 1959
2147 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) 1960 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2148 dma_write(0x3 , IRQSTATUS_L0, 0); 1961 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2149 1962
2150 for (ch = 0; ch < dma_chan_count; ch++) 1963 for (ch = 0; ch < dma_chan_count; ch++)
2151 if (dma_chan[ch].dev_id != -1) 1964 if (dma_chan[ch].dev_id != -1)
2152 omap_clear_dma(ch); 1965 omap_clear_dma(ch);
2153} 1966}
2154 1967
2155static void configure_dma_errata(void) 1968static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2156{ 1969{
2157 1970 int ch, ret = 0;
2158 /* 1971 int dma_irq;
2159 * Errata applicable for OMAP2430ES1.0 and all omap2420 1972 char irq_name[4];
2160 * 1973 int irq_rel;
2161 * I. 1974
2162 * Erratum ID: Not Available 1975 p = pdev->dev.platform_data;
2163 * Inter Frame DMA buffering issue DMA will wrongly 1976 if (!p) {
2164 * buffer elements if packing and bursting is enabled. This might 1977 dev_err(&pdev->dev, "%s: System DMA initialized without"
2165 * result in data gets stalled in FIFO at the end of the block. 1978 "platform data\n", __func__);
2166 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to 1979 return -EINVAL;
2167 * guarantee no data will stay in the DMA FIFO in case inter frame
2168 * buffering occurs
2169 *
2170 * II.
2171 * Erratum ID: Not Available
2172 * DMA may hang when several channels are used in parallel
2173 * In the following configuration, DMA channel hanging can occur:
2174 * a. Channel i, hardware synchronized, is enabled
2175 * b. Another channel (Channel x), software synchronized, is enabled.
2176 * c. Channel i is disabled before end of transfer
2177 * d. Channel i is reenabled.
2178 * e. Steps 1 to 4 are repeated a certain number of times.
2179 * f. A third channel (Channel y), software synchronized, is enabled.
2180 * Channel x and Channel y may hang immediately after step 'f'.
2181 * Workaround:
2182 * For any channel used - make sure NextLCH_ID is set to the value j.
2183 */
2184 if (cpu_is_omap2420() || (cpu_is_omap2430() &&
2185 (omap_type() == OMAP2430_REV_ES1_0))) {
2186 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
2187 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
2188 }
2189
2190 /*
2191 * Erratum ID: i378: OMAP2plus: sDMA Channel is not disabled
2192 * after a transaction error.
2193 * Workaround: SW should explicitely disable the channel.
2194 */
2195 if (cpu_class_is_omap2())
2196 SET_DMA_ERRATA(DMA_ERRATA_i378);
2197
2198 /*
2199 * Erratum ID: i541: sDMA FIFO draining does not finish
2200 * If sDMA channel is disabled on the fly, sDMA enters standby even
2201 * through FIFO Drain is still in progress
2202 * Workaround: Put sDMA in NoStandby more before a logical channel is
2203 * disabled, then put it back to SmartStandby right after the channel
2204 * finishes FIFO draining.
2205 */
2206 if (cpu_is_omap34xx())
2207 SET_DMA_ERRATA(DMA_ERRATA_i541);
2208
2209 /*
2210 * Erratum ID: i88 : Special programming model needed to disable DMA
2211 * before end of block.
2212 * Workaround: software must ensure that the DMA is configured in No
2213 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
2214 */
2215 if (cpu_is_omap34xx() && (omap_type() == OMAP3430_REV_ES1_0))
2216 SET_DMA_ERRATA(DMA_ERRATA_i88);
2217
2218 /*
2219 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
2220 * read before the DMA controller finished disabling the channel.
2221 */
2222 if (!cpu_is_omap15xx())
2223 SET_DMA_ERRATA(DMA_ERRATA_3_3);
2224
2225 /*
2226 * Erratum ID: Not Available
2227 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2228 * after secure sram context save and restore.
2229 * Work around: Hence we need to manually clear those IRQs to avoid
2230 * spurious interrupts. This affects only secure devices.
2231 */
2232 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2233 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
2234}
2235
2236/*----------------------------------------------------------------------------*/
2237
2238static int __init omap_init_dma(void)
2239{
2240 unsigned long base;
2241 int ch, r;
2242
2243 if (cpu_class_is_omap1()) {
2244 base = OMAP1_DMA_BASE;
2245 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2246 } else if (cpu_is_omap24xx()) {
2247 base = OMAP24XX_DMA4_BASE;
2248 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2249 } else if (cpu_is_omap34xx()) {
2250 base = OMAP34XX_DMA4_BASE;
2251 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2252 } else if (cpu_is_omap44xx()) {
2253 base = OMAP44XX_DMA4_BASE;
2254 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2255 } else {
2256 pr_err("DMA init failed for unsupported omap\n");
2257 return -ENODEV;
2258 } 1980 }
2259 1981
2260 omap_dma_base = ioremap(base, SZ_4K); 1982 d = p->dma_attr;
2261 BUG_ON(!omap_dma_base); 1983 errata = p->errata;
2262
2263 if (cpu_class_is_omap1()) {
2264 dma_stride = 0x40;
2265 reg_map = reg_map_omap1;
2266 dma_common_ch_start = CPC;
2267 dma_common_ch_end = COLOR;
2268 } else {
2269 dma_stride = 0x60;
2270 reg_map = reg_map_omap2;
2271 dma_common_ch_start = CSDP;
2272 if (cpu_is_omap3630() || cpu_is_omap4430())
2273 dma_common_ch_end = CCDN;
2274 else
2275 dma_common_ch_end = CCFN;
2276 }
2277 1984
2278 if (cpu_class_is_omap2() && omap_dma_reserve_channels 1985 if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2279 && (omap_dma_reserve_channels <= dma_lch_count)) 1986 && (omap_dma_reserve_channels <= dma_lch_count))
2280 dma_lch_count = omap_dma_reserve_channels; 1987 d->lch_count = omap_dma_reserve_channels;
2281 1988
2282 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count, 1989 dma_lch_count = d->lch_count;
2283 GFP_KERNEL); 1990 dma_chan_count = dma_lch_count;
2284 if (!dma_chan) { 1991 dma_chan = d->chan;
2285 r = -ENOMEM; 1992 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2286 goto out_unmap;
2287 }
2288 1993
2289 if (cpu_class_is_omap2()) { 1994 if (cpu_class_is_omap2()) {
2290 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) * 1995 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2291 dma_lch_count, GFP_KERNEL); 1996 dma_lch_count, GFP_KERNEL);
2292 if (!dma_linked_lch) { 1997 if (!dma_linked_lch) {
2293 r = -ENOMEM; 1998 ret = -ENOMEM;
2294 goto out_free; 1999 goto exit_dma_lch_fail;
2295 } 2000 }
2296 } 2001 }
2297 2002
2298 if (cpu_is_omap15xx()) {
2299 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2300 dma_chan_count = 9;
2301 enable_1510_mode = 1;
2302 } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2303 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2304 dma_read(HW_ID, 0));
2305 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2306 dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
2307 dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
2308 dma_read(CAPS_4, 0));
2309 if (!enable_1510_mode) {
2310 u16 w;
2311
2312 /* Disable OMAP 3.0/3.1 compatibility mode. */
2313 w = dma_read(GSCR, 0);
2314 w |= 1 << 3;
2315 dma_write(w, GSCR, 0);
2316 dma_chan_count = 16;
2317 } else
2318 dma_chan_count = 9;
2319 } else if (cpu_class_is_omap2()) {
2320 u8 revision = dma_read(REVISION, 0) & 0xff;
2321 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2322 revision >> 4, revision & 0xf);
2323 dma_chan_count = dma_lch_count;
2324 } else {
2325 dma_chan_count = 0;
2326 return 0;
2327 }
2328
2329 spin_lock_init(&dma_chan_lock); 2003 spin_lock_init(&dma_chan_lock);
2330
2331 for (ch = 0; ch < dma_chan_count; ch++) { 2004 for (ch = 0; ch < dma_chan_count; ch++) {
2332 omap_clear_dma(ch); 2005 omap_clear_dma(ch);
2333 if (cpu_class_is_omap2()) 2006 if (cpu_class_is_omap2())
@@ -2344,20 +2017,23 @@ static int __init omap_init_dma(void)
2344 * request_irq() doesn't like dev_id (ie. ch) being 2017 * request_irq() doesn't like dev_id (ie. ch) being
2345 * zero, so we have to kludge around this. 2018 * zero, so we have to kludge around this.
2346 */ 2019 */
2347 r = request_irq(omap1_dma_irq[ch], 2020 sprintf(&irq_name[0], "%d", ch);
2021 dma_irq = platform_get_irq_byname(pdev, irq_name);
2022
2023 if (dma_irq < 0) {
2024 ret = dma_irq;
2025 goto exit_dma_irq_fail;
2026 }
2027
2028 /* INT_DMA_LCD is handled in lcd_dma.c */
2029 if (dma_irq == INT_DMA_LCD)
2030 continue;
2031
2032 ret = request_irq(dma_irq,
2348 omap1_dma_irq_handler, 0, "DMA", 2033 omap1_dma_irq_handler, 0, "DMA",
2349 (void *) (ch + 1)); 2034 (void *) (ch + 1));
2350 if (r != 0) { 2035 if (ret != 0)
2351 int i; 2036 goto exit_dma_irq_fail;
2352
2353 printk(KERN_ERR "unable to request IRQ %d "
2354 "for DMA (error %d)\n",
2355 omap1_dma_irq[ch], r);
2356 for (i = 0; i < ch; i++)
2357 free_irq(omap1_dma_irq[i],
2358 (void *) (i + 1));
2359 goto out_free;
2360 }
2361 } 2037 }
2362 } 2038 }
2363 2039
@@ -2366,47 +2042,91 @@ static int __init omap_init_dma(void)
2366 DMA_DEFAULT_FIFO_DEPTH, 0); 2042 DMA_DEFAULT_FIFO_DEPTH, 0);
2367 2043
2368 if (cpu_class_is_omap2()) { 2044 if (cpu_class_is_omap2()) {
2369 int irq; 2045 strcpy(irq_name, "0");
2370 if (cpu_is_omap44xx()) 2046 dma_irq = platform_get_irq_byname(pdev, irq_name);
2371 irq = OMAP44XX_IRQ_SDMA_0; 2047 if (dma_irq < 0) {
2372 else 2048 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2373 irq = INT_24XX_SDMA_IRQ0; 2049 goto exit_dma_lch_fail;
2374 setup_irq(irq, &omap24xx_dma_irq); 2050 }
2375 } 2051 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2376 2052 if (ret) {
2377 if (cpu_is_omap34xx() || cpu_is_omap44xx()) { 2053 dev_err(&pdev->dev, "set_up failed for IRQ %d"
2378 /* Enable smartidle idlemodes and autoidle */ 2054 "for DMA (error %d)\n", dma_irq, ret);
2379 u32 v = dma_read(OCP_SYSCONFIG, 0); 2055 goto exit_dma_lch_fail;
2380 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2381 DMA_SYSCONFIG_SIDLEMODE_MASK |
2382 DMA_SYSCONFIG_AUTOIDLE);
2383 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2384 DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2385 DMA_SYSCONFIG_AUTOIDLE);
2386 dma_write(v , OCP_SYSCONFIG, 0);
2387 /* reserve dma channels 0 and 1 in high security devices */
2388 if (cpu_is_omap34xx() &&
2389 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2390 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2391 "HS ROM code\n");
2392 dma_chan[0].dev_id = 0;
2393 dma_chan[1].dev_id = 1;
2394 } 2056 }
2395 } 2057 }
2396 configure_dma_errata();
2397 2058
2059 /* reserve dma channels 0 and 1 in high security devices */
2060 if (cpu_is_omap34xx() &&
2061 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2062 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2063 "HS ROM code\n");
2064 dma_chan[0].dev_id = 0;
2065 dma_chan[1].dev_id = 1;
2066 }
2067 p->show_dma_caps();
2398 return 0; 2068 return 0;
2399 2069
2400out_free: 2070exit_dma_irq_fail:
2071 dev_err(&pdev->dev, "unable to request IRQ %d"
2072 "for DMA (error %d)\n", dma_irq, ret);
2073 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2074 dma_irq = platform_get_irq(pdev, irq_rel);
2075 free_irq(dma_irq, (void *)(irq_rel + 1));
2076 }
2077
2078exit_dma_lch_fail:
2079 kfree(p);
2080 kfree(d);
2401 kfree(dma_chan); 2081 kfree(dma_chan);
2082 return ret;
2083}
2402 2084
2403out_unmap: 2085static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2404 iounmap(omap_dma_base); 2086{
2087 int dma_irq;
2405 2088
2406 return r; 2089 if (cpu_class_is_omap2()) {
2090 char irq_name[4];
2091 strcpy(irq_name, "0");
2092 dma_irq = platform_get_irq_byname(pdev, irq_name);
2093 remove_irq(dma_irq, &omap24xx_dma_irq);
2094 } else {
2095 int irq_rel = 0;
2096 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2097 dma_irq = platform_get_irq(pdev, irq_rel);
2098 free_irq(dma_irq, (void *)(irq_rel + 1));
2099 }
2100 }
2101 kfree(p);
2102 kfree(d);
2103 kfree(dma_chan);
2104 return 0;
2105}
2106
2107static struct platform_driver omap_system_dma_driver = {
2108 .probe = omap_system_dma_probe,
2109 .remove = omap_system_dma_remove,
2110 .driver = {
2111 .name = "omap_dma_system"
2112 },
2113};
2114
2115static int __init omap_system_dma_init(void)
2116{
2117 return platform_driver_register(&omap_system_dma_driver);
2118}
2119arch_initcall(omap_system_dma_init);
2120
2121static void __exit omap_system_dma_exit(void)
2122{
2123 platform_driver_unregister(&omap_system_dma_driver);
2407} 2124}
2408 2125
2409arch_initcall(omap_init_dma); 2126MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2127MODULE_LICENSE("GPL");
2128MODULE_ALIAS("platform:" DRIVER_NAME);
2129MODULE_AUTHOR("Texas Instruments Inc");
2410 2130
2411/* 2131/*
2412 * Reserve the omap SDMA channels using cmdline bootarg 2132 * Reserve the omap SDMA channels using cmdline bootarg
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 4b51d2b93b0e..d1c916fcf770 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -21,19 +21,15 @@
21#ifndef __ASM_ARCH_DMA_H 21#ifndef __ASM_ARCH_DMA_H
22#define __ASM_ARCH_DMA_H 22#define __ASM_ARCH_DMA_H
23 23
24/* Move omap4 specific defines to dma-44xx.h */ 24#include <linux/platform_device.h>
25#include "dma-44xx.h"
26
27/* Hardware registers for omap1 */
28#define OMAP1_DMA_BASE (0xfffed800)
29 25
30/* Hardware registers for omap2 and omap3 */ 26/*
31#define OMAP24XX_DMA4_BASE (L4_24XX_BASE + 0x56000) 27 * TODO: These dma channel defines should go away once all
32#define OMAP34XX_DMA4_BASE (L4_34XX_BASE + 0x56000) 28 * the omap drivers hwmod adapted.
33#define OMAP44XX_DMA4_BASE (L4_44XX_BASE + 0x56000) 29 */
34 30
35#define OMAP1_LOGICAL_DMA_CH_COUNT 17 31/* Move omap4 specific defines to dma-44xx.h */
36#define OMAP_DMA4_LOGICAL_DMA_CH_COUNT 32 /* REVISIT: Is this 32 + 2? */ 32#include "dma-44xx.h"
37 33
38/* DMA channels for omap1 */ 34/* DMA channels for omap1 */
39#define OMAP_DMA_NO_DEVICE 0 35#define OMAP_DMA_NO_DEVICE 0
@@ -302,6 +298,14 @@
302#define IS_CSSA_32 BIT(0x3) 298#define IS_CSSA_32 BIT(0x3)
303#define IS_CDSA_32 BIT(0x4) 299#define IS_CDSA_32 BIT(0x4)
304#define IS_RW_PRIORITY BIT(0x5) 300#define IS_RW_PRIORITY BIT(0x5)
301#define ENABLE_1510_MODE BIT(0x6)
302#define SRC_PORT BIT(0x7)
303#define DST_PORT BIT(0x8)
304#define SRC_INDEX BIT(0x9)
305#define DST_INDEX BIT(0xA)
306#define IS_BURST_ONLY4 BIT(0xB)
307#define CLEAR_CSR_ON_READ BIT(0xC)
308#define IS_WORD_16 BIT(0xD)
305 309
306enum omap_reg_offsets { 310enum omap_reg_offsets {
307 311
@@ -397,9 +401,40 @@ struct omap_dma_channel_params {
397#endif 401#endif
398}; 402};
399 403
404struct omap_dma_lch {
405 int next_lch;
406 int dev_id;
407 u16 saved_csr;
408 u16 enabled_irqs;
409 const char *dev_name;
410 void (*callback)(int lch, u16 ch_status, void *data);
411 void *data;
412 long flags;
413 /* required for Dynamic chaining */
414 int prev_linked_ch;
415 int next_linked_ch;
416 int state;
417 int chain_id;
418 int status;
419};
420
400struct omap_dma_dev_attr { 421struct omap_dma_dev_attr {
401 u32 dev_caps; 422 u32 dev_caps;
402 u16 lch_count; 423 u16 lch_count;
424 u16 chan_count;
425 struct omap_dma_lch *chan;
426};
427
428/* System DMA platform data structure */
429struct omap_system_dma_plat_info {
430 struct omap_dma_dev_attr *dma_attr;
431 u32 errata;
432 void (*disable_irq_lch)(int lch);
433 void (*show_dma_caps)(void);
434 void (*clear_lch_regs)(int lch);
435 void (*clear_dma)(int lch);
436 void (*dma_write)(u32 val, int reg, int lch);
437 u32 (*dma_read)(int reg, int lch);
403}; 438};
404 439
405extern void omap_set_dma_priority(int lch, int dst_port, int priority); 440extern void omap_set_dma_priority(int lch, int dst_port, int priority);