diff options
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r-- | arch/arm/plat-omap/dma.c | 910 |
1 files changed, 591 insertions, 319 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index da7b65145658..f5cc21ad0956 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> | 6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> |
7 | * Graphics DMA and LCD DMA graphics tranformations | 7 | * Graphics DMA and LCD DMA graphics tranformations |
8 | * by Imre Deak <imre.deak@nokia.com> | 8 | * by Imre Deak <imre.deak@nokia.com> |
9 | * OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc. | ||
10 | * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com> | ||
9 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. | 11 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. |
10 | * | 12 | * |
11 | * Support functions for the OMAP internal DMA channels. | 13 | * Support functions for the OMAP internal DMA channels. |
@@ -31,8 +33,15 @@ | |||
31 | 33 | ||
32 | #include <asm/arch/tc.h> | 34 | #include <asm/arch/tc.h> |
33 | 35 | ||
34 | #define OMAP_DMA_ACTIVE 0x01 | 36 | #define DEBUG_PRINTS |
37 | #undef DEBUG_PRINTS | ||
38 | #ifdef DEBUG_PRINTS | ||
39 | #define debug_printk(x) printk x | ||
40 | #else | ||
41 | #define debug_printk(x) | ||
42 | #endif | ||
35 | 43 | ||
44 | #define OMAP_DMA_ACTIVE 0x01 | ||
36 | #define OMAP_DMA_CCR_EN (1 << 7) | 45 | #define OMAP_DMA_CCR_EN (1 << 7) |
37 | 46 | ||
38 | #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) | 47 | #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) |
@@ -55,7 +64,7 @@ static int dma_chan_count; | |||
55 | static spinlock_t dma_chan_lock; | 64 | static spinlock_t dma_chan_lock; |
56 | static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT]; | 65 | static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT]; |
57 | 66 | ||
58 | const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = { | 67 | const static u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = { |
59 | INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3, | 68 | INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3, |
60 | INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7, | 69 | INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7, |
61 | INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10, | 70 | INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10, |
@@ -63,6 +72,20 @@ const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = { | |||
63 | INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD | 72 | INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD |
64 | }; | 73 | }; |
65 | 74 | ||
75 | #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ | ||
76 | __FUNCTION__); | ||
77 | |||
78 | #ifdef CONFIG_ARCH_OMAP15XX | ||
79 | /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ | ||
80 | int omap_dma_in_1510_mode(void) | ||
81 | { | ||
82 | return enable_1510_mode; | ||
83 | } | ||
84 | #else | ||
85 | #define omap_dma_in_1510_mode() 0 | ||
86 | #endif | ||
87 | |||
88 | #ifdef CONFIG_ARCH_OMAP1 | ||
66 | static inline int get_gdma_dev(int req) | 89 | static inline int get_gdma_dev(int req) |
67 | { | 90 | { |
68 | u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4; | 91 | u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4; |
@@ -82,6 +105,9 @@ static inline void set_gdma_dev(int req, int dev) | |||
82 | l |= (dev - 1) << shift; | 105 | l |= (dev - 1) << shift; |
83 | omap_writel(l, reg); | 106 | omap_writel(l, reg); |
84 | } | 107 | } |
108 | #else | ||
109 | #define set_gdma_dev(req, dev) do {} while (0) | ||
110 | #endif | ||
85 | 111 | ||
86 | static void clear_lch_regs(int lch) | 112 | static void clear_lch_regs(int lch) |
87 | { | 113 | { |
@@ -121,38 +147,62 @@ void omap_set_dma_priority(int dst_port, int priority) | |||
121 | } | 147 | } |
122 | 148 | ||
123 | void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, | 149 | void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, |
124 | int frame_count, int sync_mode) | 150 | int frame_count, int sync_mode, |
151 | int dma_trigger, int src_or_dst_synch) | ||
125 | { | 152 | { |
126 | u16 w; | 153 | OMAP_DMA_CSDP_REG(lch) &= ~0x03; |
154 | OMAP_DMA_CSDP_REG(lch) |= data_type; | ||
127 | 155 | ||
128 | w = omap_readw(OMAP_DMA_CSDP(lch)); | 156 | if (cpu_class_is_omap1()) { |
129 | w &= ~0x03; | 157 | OMAP_DMA_CCR_REG(lch) &= ~(1 << 5); |
130 | w |= data_type; | 158 | if (sync_mode == OMAP_DMA_SYNC_FRAME) |
131 | omap_writew(w, OMAP_DMA_CSDP(lch)); | 159 | OMAP_DMA_CCR_REG(lch) |= 1 << 5; |
160 | |||
161 | OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2); | ||
162 | if (sync_mode == OMAP_DMA_SYNC_BLOCK) | ||
163 | OMAP1_DMA_CCR2_REG(lch) |= 1 << 2; | ||
164 | } | ||
165 | |||
166 | if (cpu_is_omap24xx() && dma_trigger) { | ||
167 | u32 val = OMAP_DMA_CCR_REG(lch); | ||
168 | |||
169 | if (dma_trigger > 63) | ||
170 | val |= 1 << 20; | ||
171 | if (dma_trigger > 31) | ||
172 | val |= 1 << 19; | ||
132 | 173 | ||
133 | w = omap_readw(OMAP_DMA_CCR(lch)); | 174 | val |= (dma_trigger & 0x1f); |
134 | w &= ~(1 << 5); | ||
135 | if (sync_mode == OMAP_DMA_SYNC_FRAME) | ||
136 | w |= 1 << 5; | ||
137 | omap_writew(w, OMAP_DMA_CCR(lch)); | ||
138 | 175 | ||
139 | w = omap_readw(OMAP_DMA_CCR2(lch)); | 176 | if (sync_mode & OMAP_DMA_SYNC_FRAME) |
140 | w &= ~(1 << 2); | 177 | val |= 1 << 5; |
141 | if (sync_mode == OMAP_DMA_SYNC_BLOCK) | ||
142 | w |= 1 << 2; | ||
143 | omap_writew(w, OMAP_DMA_CCR2(lch)); | ||
144 | 178 | ||
145 | omap_writew(elem_count, OMAP_DMA_CEN(lch)); | 179 | if (sync_mode & OMAP_DMA_SYNC_BLOCK) |
146 | omap_writew(frame_count, OMAP_DMA_CFN(lch)); | 180 | val |= 1 << 18; |
147 | 181 | ||
182 | if (src_or_dst_synch) | ||
183 | val |= 1 << 24; /* source synch */ | ||
184 | else | ||
185 | val &= ~(1 << 24); /* dest synch */ | ||
186 | |||
187 | OMAP_DMA_CCR_REG(lch) = val; | ||
188 | } | ||
189 | |||
190 | OMAP_DMA_CEN_REG(lch) = elem_count; | ||
191 | OMAP_DMA_CFN_REG(lch) = frame_count; | ||
148 | } | 192 | } |
193 | |||
149 | void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) | 194 | void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) |
150 | { | 195 | { |
151 | u16 w; | 196 | u16 w; |
152 | 197 | ||
153 | BUG_ON(omap_dma_in_1510_mode()); | 198 | BUG_ON(omap_dma_in_1510_mode()); |
154 | 199 | ||
155 | w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03; | 200 | if (cpu_is_omap24xx()) { |
201 | REVISIT_24XX(); | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | w = OMAP1_DMA_CCR2_REG(lch) & ~0x03; | ||
156 | switch (mode) { | 206 | switch (mode) { |
157 | case OMAP_DMA_CONSTANT_FILL: | 207 | case OMAP_DMA_CONSTANT_FILL: |
158 | w |= 0x01; | 208 | w |= 0x01; |
@@ -165,63 +215,84 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) | |||
165 | default: | 215 | default: |
166 | BUG(); | 216 | BUG(); |
167 | } | 217 | } |
168 | omap_writew(w, OMAP_DMA_CCR2(lch)); | 218 | OMAP1_DMA_CCR2_REG(lch) = w; |
169 | 219 | ||
170 | w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f; | 220 | w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f; |
171 | /* Default is channel type 2D */ | 221 | /* Default is channel type 2D */ |
172 | if (mode) { | 222 | if (mode) { |
173 | omap_writew((u16)color, OMAP_DMA_COLOR_L(lch)); | 223 | OMAP1_DMA_COLOR_L_REG(lch) = (u16)color; |
174 | omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch)); | 224 | OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16); |
175 | w |= 1; /* Channel type G */ | 225 | w |= 1; /* Channel type G */ |
176 | } | 226 | } |
177 | omap_writew(w, OMAP_DMA_LCH_CTRL(lch)); | 227 | OMAP1_DMA_LCH_CTRL_REG(lch) = w; |
178 | } | 228 | } |
179 | 229 | ||
180 | 230 | /* Note that src_port is only for omap1 */ | |
181 | void omap_set_dma_src_params(int lch, int src_port, int src_amode, | 231 | void omap_set_dma_src_params(int lch, int src_port, int src_amode, |
182 | unsigned long src_start) | 232 | unsigned long src_start, |
233 | int src_ei, int src_fi) | ||
183 | { | 234 | { |
184 | u16 w; | 235 | if (cpu_class_is_omap1()) { |
236 | OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2); | ||
237 | OMAP_DMA_CSDP_REG(lch) |= src_port << 2; | ||
238 | } | ||
239 | |||
240 | OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12); | ||
241 | OMAP_DMA_CCR_REG(lch) |= src_amode << 12; | ||
242 | |||
243 | if (cpu_class_is_omap1()) { | ||
244 | OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16; | ||
245 | OMAP1_DMA_CSSA_L_REG(lch) = src_start; | ||
246 | } | ||
185 | 247 | ||
186 | w = omap_readw(OMAP_DMA_CSDP(lch)); | 248 | if (cpu_is_omap24xx()) |
187 | w &= ~(0x1f << 2); | 249 | OMAP2_DMA_CSSA_REG(lch) = src_start; |
188 | w |= src_port << 2; | ||
189 | omap_writew(w, OMAP_DMA_CSDP(lch)); | ||
190 | 250 | ||
191 | w = omap_readw(OMAP_DMA_CCR(lch)); | 251 | OMAP_DMA_CSEI_REG(lch) = src_ei; |
192 | w &= ~(0x03 << 12); | 252 | OMAP_DMA_CSFI_REG(lch) = src_fi; |
193 | w |= src_amode << 12; | 253 | } |
194 | omap_writew(w, OMAP_DMA_CCR(lch)); | ||
195 | 254 | ||
196 | omap_writew(src_start >> 16, OMAP_DMA_CSSA_U(lch)); | 255 | void omap_set_dma_params(int lch, struct omap_dma_channel_params * params) |
197 | omap_writew(src_start, OMAP_DMA_CSSA_L(lch)); | 256 | { |
257 | omap_set_dma_transfer_params(lch, params->data_type, | ||
258 | params->elem_count, params->frame_count, | ||
259 | params->sync_mode, params->trigger, | ||
260 | params->src_or_dst_synch); | ||
261 | omap_set_dma_src_params(lch, params->src_port, | ||
262 | params->src_amode, params->src_start, | ||
263 | params->src_ei, params->src_fi); | ||
264 | |||
265 | omap_set_dma_dest_params(lch, params->dst_port, | ||
266 | params->dst_amode, params->dst_start, | ||
267 | params->dst_ei, params->dst_fi); | ||
198 | } | 268 | } |
199 | 269 | ||
200 | void omap_set_dma_src_index(int lch, int eidx, int fidx) | 270 | void omap_set_dma_src_index(int lch, int eidx, int fidx) |
201 | { | 271 | { |
202 | omap_writew(eidx, OMAP_DMA_CSEI(lch)); | 272 | if (cpu_is_omap24xx()) { |
203 | omap_writew(fidx, OMAP_DMA_CSFI(lch)); | 273 | REVISIT_24XX(); |
274 | return; | ||
275 | } | ||
276 | OMAP_DMA_CSEI_REG(lch) = eidx; | ||
277 | OMAP_DMA_CSFI_REG(lch) = fidx; | ||
204 | } | 278 | } |
205 | 279 | ||
206 | void omap_set_dma_src_data_pack(int lch, int enable) | 280 | void omap_set_dma_src_data_pack(int lch, int enable) |
207 | { | 281 | { |
208 | u16 w; | 282 | OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6); |
209 | 283 | if (enable) | |
210 | w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 6); | 284 | OMAP_DMA_CSDP_REG(lch) |= (1 << 6); |
211 | w |= enable ? (1 << 6) : 0; | ||
212 | omap_writew(w, OMAP_DMA_CSDP(lch)); | ||
213 | } | 285 | } |
214 | 286 | ||
215 | void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) | 287 | void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) |
216 | { | 288 | { |
217 | u16 w; | 289 | OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7); |
218 | 290 | ||
219 | w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 7); | ||
220 | switch (burst_mode) { | 291 | switch (burst_mode) { |
221 | case OMAP_DMA_DATA_BURST_DIS: | 292 | case OMAP_DMA_DATA_BURST_DIS: |
222 | break; | 293 | break; |
223 | case OMAP_DMA_DATA_BURST_4: | 294 | case OMAP_DMA_DATA_BURST_4: |
224 | w |= (0x01 << 7); | 295 | OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7); |
225 | break; | 296 | break; |
226 | case OMAP_DMA_DATA_BURST_8: | 297 | case OMAP_DMA_DATA_BURST_8: |
227 | /* not supported by current hardware | 298 | /* not supported by current hardware |
@@ -231,110 +302,283 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) | |||
231 | default: | 302 | default: |
232 | BUG(); | 303 | BUG(); |
233 | } | 304 | } |
234 | omap_writew(w, OMAP_DMA_CSDP(lch)); | ||
235 | } | 305 | } |
236 | 306 | ||
307 | /* Note that dest_port is only for OMAP1 */ | ||
237 | void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, | 308 | void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, |
238 | unsigned long dest_start) | 309 | unsigned long dest_start, |
310 | int dst_ei, int dst_fi) | ||
239 | { | 311 | { |
240 | u16 w; | 312 | if (cpu_class_is_omap1()) { |
313 | OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9); | ||
314 | OMAP_DMA_CSDP_REG(lch) |= dest_port << 9; | ||
315 | } | ||
241 | 316 | ||
242 | w = omap_readw(OMAP_DMA_CSDP(lch)); | 317 | OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14); |
243 | w &= ~(0x1f << 9); | 318 | OMAP_DMA_CCR_REG(lch) |= dest_amode << 14; |
244 | w |= dest_port << 9; | 319 | |
245 | omap_writew(w, OMAP_DMA_CSDP(lch)); | 320 | if (cpu_class_is_omap1()) { |
321 | OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16; | ||
322 | OMAP1_DMA_CDSA_L_REG(lch) = dest_start; | ||
323 | } | ||
246 | 324 | ||
247 | w = omap_readw(OMAP_DMA_CCR(lch)); | 325 | if (cpu_is_omap24xx()) |
248 | w &= ~(0x03 << 14); | 326 | OMAP2_DMA_CDSA_REG(lch) = dest_start; |
249 | w |= dest_amode << 14; | ||
250 | omap_writew(w, OMAP_DMA_CCR(lch)); | ||
251 | 327 | ||
252 | omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U(lch)); | 328 | OMAP_DMA_CDEI_REG(lch) = dst_ei; |
253 | omap_writew(dest_start, OMAP_DMA_CDSA_L(lch)); | 329 | OMAP_DMA_CDFI_REG(lch) = dst_fi; |
254 | } | 330 | } |
255 | 331 | ||
256 | void omap_set_dma_dest_index(int lch, int eidx, int fidx) | 332 | void omap_set_dma_dest_index(int lch, int eidx, int fidx) |
257 | { | 333 | { |
258 | omap_writew(eidx, OMAP_DMA_CDEI(lch)); | 334 | if (cpu_is_omap24xx()) { |
259 | omap_writew(fidx, OMAP_DMA_CDFI(lch)); | 335 | REVISIT_24XX(); |
336 | return; | ||
337 | } | ||
338 | OMAP_DMA_CDEI_REG(lch) = eidx; | ||
339 | OMAP_DMA_CDFI_REG(lch) = fidx; | ||
260 | } | 340 | } |
261 | 341 | ||
262 | void omap_set_dma_dest_data_pack(int lch, int enable) | 342 | void omap_set_dma_dest_data_pack(int lch, int enable) |
263 | { | 343 | { |
264 | u16 w; | 344 | OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13); |
265 | 345 | if (enable) | |
266 | w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 13); | 346 | OMAP_DMA_CSDP_REG(lch) |= 1 << 13; |
267 | w |= enable ? (1 << 13) : 0; | ||
268 | omap_writew(w, OMAP_DMA_CSDP(lch)); | ||
269 | } | 347 | } |
270 | 348 | ||
271 | void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) | 349 | void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) |
272 | { | 350 | { |
273 | u16 w; | 351 | OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14); |
274 | 352 | ||
275 | w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 14); | ||
276 | switch (burst_mode) { | 353 | switch (burst_mode) { |
277 | case OMAP_DMA_DATA_BURST_DIS: | 354 | case OMAP_DMA_DATA_BURST_DIS: |
278 | break; | 355 | break; |
279 | case OMAP_DMA_DATA_BURST_4: | 356 | case OMAP_DMA_DATA_BURST_4: |
280 | w |= (0x01 << 14); | 357 | OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14); |
281 | break; | 358 | break; |
282 | case OMAP_DMA_DATA_BURST_8: | 359 | case OMAP_DMA_DATA_BURST_8: |
283 | w |= (0x03 << 14); | 360 | OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14); |
284 | break; | 361 | break; |
285 | default: | 362 | default: |
286 | printk(KERN_ERR "Invalid DMA burst mode\n"); | 363 | printk(KERN_ERR "Invalid DMA burst mode\n"); |
287 | BUG(); | 364 | BUG(); |
288 | return; | 365 | return; |
289 | } | 366 | } |
290 | omap_writew(w, OMAP_DMA_CSDP(lch)); | ||
291 | } | 367 | } |
292 | 368 | ||
293 | static inline void init_intr(int lch) | 369 | static inline void omap_enable_channel_irq(int lch) |
294 | { | 370 | { |
295 | u16 w; | 371 | u32 status; |
296 | 372 | ||
297 | /* Read CSR to make sure it's cleared. */ | 373 | /* Read CSR to make sure it's cleared. */ |
298 | w = omap_readw(OMAP_DMA_CSR(lch)); | 374 | status = OMAP_DMA_CSR_REG(lch); |
375 | |||
299 | /* Enable some nice interrupts. */ | 376 | /* Enable some nice interrupts. */ |
300 | omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR(lch)); | 377 | OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs; |
378 | |||
301 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; | 379 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; |
302 | } | 380 | } |
303 | 381 | ||
304 | static inline void enable_lnk(int lch) | 382 | static void omap_disable_channel_irq(int lch) |
305 | { | 383 | { |
306 | u16 w; | 384 | if (cpu_is_omap24xx()) |
385 | OMAP_DMA_CICR_REG(lch) = 0; | ||
386 | } | ||
387 | |||
388 | void omap_enable_dma_irq(int lch, u16 bits) | ||
389 | { | ||
390 | dma_chan[lch].enabled_irqs |= bits; | ||
391 | } | ||
307 | 392 | ||
308 | /* Clear the STOP_LNK bits */ | 393 | void omap_disable_dma_irq(int lch, u16 bits) |
309 | w = omap_readw(OMAP_DMA_CLNK_CTRL(lch)); | 394 | { |
310 | w &= ~(1 << 14); | 395 | dma_chan[lch].enabled_irqs &= ~bits; |
311 | omap_writew(w, OMAP_DMA_CLNK_CTRL(lch)); | 396 | } |
397 | |||
398 | static inline void enable_lnk(int lch) | ||
399 | { | ||
400 | if (cpu_class_is_omap1()) | ||
401 | OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14); | ||
312 | 402 | ||
313 | /* And set the ENABLE_LNK bits */ | 403 | /* Set the ENABLE_LNK bits */ |
314 | if (dma_chan[lch].next_lch != -1) | 404 | if (dma_chan[lch].next_lch != -1) |
315 | omap_writew(dma_chan[lch].next_lch | (1 << 15), | 405 | OMAP_DMA_CLNK_CTRL_REG(lch) = |
316 | OMAP_DMA_CLNK_CTRL(lch)); | 406 | dma_chan[lch].next_lch | (1 << 15); |
317 | } | 407 | } |
318 | 408 | ||
319 | static inline void disable_lnk(int lch) | 409 | static inline void disable_lnk(int lch) |
320 | { | 410 | { |
321 | u16 w; | ||
322 | |||
323 | /* Disable interrupts */ | 411 | /* Disable interrupts */ |
324 | omap_writew(0, OMAP_DMA_CICR(lch)); | 412 | if (cpu_class_is_omap1()) { |
413 | OMAP_DMA_CICR_REG(lch) = 0; | ||
414 | /* Set the STOP_LNK bit */ | ||
415 | OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14; | ||
416 | } | ||
325 | 417 | ||
326 | /* Set the STOP_LNK bit */ | 418 | if (cpu_is_omap24xx()) { |
327 | w = omap_readw(OMAP_DMA_CLNK_CTRL(lch)); | 419 | omap_disable_channel_irq(lch); |
328 | w |= (1 << 14); | 420 | /* Clear the ENABLE_LNK bit */ |
329 | w = omap_writew(w, OMAP_DMA_CLNK_CTRL(lch)); | 421 | OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15); |
422 | } | ||
330 | 423 | ||
331 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; | 424 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; |
332 | } | 425 | } |
333 | 426 | ||
334 | void omap_start_dma(int lch) | 427 | static inline void omap2_enable_irq_lch(int lch) |
335 | { | 428 | { |
336 | u16 w; | 429 | u32 val; |
430 | |||
431 | if (!cpu_is_omap24xx()) | ||
432 | return; | ||
433 | |||
434 | val = omap_readl(OMAP_DMA4_IRQENABLE_L0); | ||
435 | val |= 1 << lch; | ||
436 | omap_writel(val, OMAP_DMA4_IRQENABLE_L0); | ||
437 | } | ||
438 | |||
439 | int omap_request_dma(int dev_id, const char *dev_name, | ||
440 | void (* callback)(int lch, u16 ch_status, void *data), | ||
441 | void *data, int *dma_ch_out) | ||
442 | { | ||
443 | int ch, free_ch = -1; | ||
444 | unsigned long flags; | ||
445 | struct omap_dma_lch *chan; | ||
446 | |||
447 | spin_lock_irqsave(&dma_chan_lock, flags); | ||
448 | for (ch = 0; ch < dma_chan_count; ch++) { | ||
449 | if (free_ch == -1 && dma_chan[ch].dev_id == -1) { | ||
450 | free_ch = ch; | ||
451 | if (dev_id == 0) | ||
452 | break; | ||
453 | } | ||
454 | } | ||
455 | if (free_ch == -1) { | ||
456 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
457 | return -EBUSY; | ||
458 | } | ||
459 | chan = dma_chan + free_ch; | ||
460 | chan->dev_id = dev_id; | ||
461 | |||
462 | if (cpu_class_is_omap1()) | ||
463 | clear_lch_regs(free_ch); | ||
337 | 464 | ||
465 | if (cpu_is_omap24xx()) | ||
466 | omap_clear_dma(free_ch); | ||
467 | |||
468 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
469 | |||
470 | chan->dev_name = dev_name; | ||
471 | chan->callback = callback; | ||
472 | chan->data = data; | ||
473 | chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | | ||
474 | OMAP_DMA_BLOCK_IRQ; | ||
475 | |||
476 | if (cpu_is_omap24xx()) | ||
477 | chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ; | ||
478 | |||
479 | if (cpu_is_omap16xx()) { | ||
480 | /* If the sync device is set, configure it dynamically. */ | ||
481 | if (dev_id != 0) { | ||
482 | set_gdma_dev(free_ch + 1, dev_id); | ||
483 | dev_id = free_ch + 1; | ||
484 | } | ||
485 | /* Disable the 1510 compatibility mode and set the sync device | ||
486 | * id. */ | ||
487 | OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10); | ||
488 | } else if (cpu_is_omap730() || cpu_is_omap15xx()) { | ||
489 | OMAP_DMA_CCR_REG(free_ch) = dev_id; | ||
490 | } | ||
491 | |||
492 | if (cpu_is_omap24xx()) { | ||
493 | omap2_enable_irq_lch(free_ch); | ||
494 | |||
495 | omap_enable_channel_irq(free_ch); | ||
496 | /* Clear the CSR register and IRQ status register */ | ||
497 | OMAP_DMA_CSR_REG(free_ch) = 0x0; | ||
498 | omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0); | ||
499 | } | ||
500 | |||
501 | *dma_ch_out = free_ch; | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | void omap_free_dma(int lch) | ||
507 | { | ||
508 | unsigned long flags; | ||
509 | |||
510 | spin_lock_irqsave(&dma_chan_lock, flags); | ||
511 | if (dma_chan[lch].dev_id == -1) { | ||
512 | printk("omap_dma: trying to free nonallocated DMA channel %d\n", | ||
513 | lch); | ||
514 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
515 | return; | ||
516 | } | ||
517 | dma_chan[lch].dev_id = -1; | ||
518 | dma_chan[lch].next_lch = -1; | ||
519 | dma_chan[lch].callback = NULL; | ||
520 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
521 | |||
522 | if (cpu_class_is_omap1()) { | ||
523 | /* Disable all DMA interrupts for the channel. */ | ||
524 | OMAP_DMA_CICR_REG(lch) = 0; | ||
525 | /* Make sure the DMA transfer is stopped. */ | ||
526 | OMAP_DMA_CCR_REG(lch) = 0; | ||
527 | } | ||
528 | |||
529 | if (cpu_is_omap24xx()) { | ||
530 | u32 val; | ||
531 | /* Disable interrupts */ | ||
532 | val = omap_readl(OMAP_DMA4_IRQENABLE_L0); | ||
533 | val &= ~(1 << lch); | ||
534 | omap_writel(val, OMAP_DMA4_IRQENABLE_L0); | ||
535 | |||
536 | /* Clear the CSR register and IRQ status register */ | ||
537 | OMAP_DMA_CSR_REG(lch) = 0x0; | ||
538 | |||
539 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); | ||
540 | val |= 1 << lch; | ||
541 | omap_writel(val, OMAP_DMA4_IRQSTATUS_L0); | ||
542 | |||
543 | /* Disable all DMA interrupts for the channel. */ | ||
544 | OMAP_DMA_CICR_REG(lch) = 0; | ||
545 | |||
546 | /* Make sure the DMA transfer is stopped. */ | ||
547 | OMAP_DMA_CCR_REG(lch) = 0; | ||
548 | omap_clear_dma(lch); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Clears any DMA state so the DMA engine is ready to restart with new buffers | ||
554 | * through omap_start_dma(). Any buffers in flight are discarded. | ||
555 | */ | ||
556 | void omap_clear_dma(int lch) | ||
557 | { | ||
558 | unsigned long flags; | ||
559 | |||
560 | local_irq_save(flags); | ||
561 | |||
562 | if (cpu_class_is_omap1()) { | ||
563 | int status; | ||
564 | OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN; | ||
565 | |||
566 | /* Clear pending interrupts */ | ||
567 | status = OMAP_DMA_CSR_REG(lch); | ||
568 | } | ||
569 | |||
570 | if (cpu_is_omap24xx()) { | ||
571 | int i; | ||
572 | u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80; | ||
573 | for (i = 0; i < 0x44; i += 4) | ||
574 | omap_writel(0, lch_base + i); | ||
575 | } | ||
576 | |||
577 | local_irq_restore(flags); | ||
578 | } | ||
579 | |||
580 | void omap_start_dma(int lch) | ||
581 | { | ||
338 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { | 582 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
339 | int next_lch, cur_lch; | 583 | int next_lch, cur_lch; |
340 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; | 584 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; |
@@ -348,31 +592,37 @@ void omap_start_dma(int lch) | |||
348 | do { | 592 | do { |
349 | next_lch = dma_chan[cur_lch].next_lch; | 593 | next_lch = dma_chan[cur_lch].next_lch; |
350 | 594 | ||
351 | /* The loop case: we've been here already */ | 595 | /* The loop case: we've been here already */ |
352 | if (dma_chan_link_map[cur_lch]) | 596 | if (dma_chan_link_map[cur_lch]) |
353 | break; | 597 | break; |
354 | /* Mark the current channel */ | 598 | /* Mark the current channel */ |
355 | dma_chan_link_map[cur_lch] = 1; | 599 | dma_chan_link_map[cur_lch] = 1; |
356 | 600 | ||
357 | enable_lnk(cur_lch); | 601 | enable_lnk(cur_lch); |
358 | init_intr(cur_lch); | 602 | omap_enable_channel_irq(cur_lch); |
359 | 603 | ||
360 | cur_lch = next_lch; | 604 | cur_lch = next_lch; |
361 | } while (next_lch != -1); | 605 | } while (next_lch != -1); |
606 | } else if (cpu_is_omap24xx()) { | ||
607 | /* Errata: Need to write lch even if not using chaining */ | ||
608 | OMAP_DMA_CLNK_CTRL_REG(lch) = lch; | ||
362 | } | 609 | } |
363 | 610 | ||
364 | init_intr(lch); | 611 | omap_enable_channel_irq(lch); |
612 | |||
613 | /* Errata: On ES2.0 BUFFERING disable must be set. | ||
614 | * This will always fail on ES1.0 */ | ||
615 | if (cpu_is_omap24xx()) { | ||
616 | OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN; | ||
617 | } | ||
618 | |||
619 | OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN; | ||
365 | 620 | ||
366 | w = omap_readw(OMAP_DMA_CCR(lch)); | ||
367 | w |= OMAP_DMA_CCR_EN; | ||
368 | omap_writew(w, OMAP_DMA_CCR(lch)); | ||
369 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; | 621 | dma_chan[lch].flags |= OMAP_DMA_ACTIVE; |
370 | } | 622 | } |
371 | 623 | ||
372 | void omap_stop_dma(int lch) | 624 | void omap_stop_dma(int lch) |
373 | { | 625 | { |
374 | u16 w; | ||
375 | |||
376 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { | 626 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
377 | int next_lch, cur_lch = lch; | 627 | int next_lch, cur_lch = lch; |
378 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; | 628 | char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT]; |
@@ -393,146 +643,83 @@ void omap_stop_dma(int lch) | |||
393 | 643 | ||
394 | return; | 644 | return; |
395 | } | 645 | } |
646 | |||
396 | /* Disable all interrupts on the channel */ | 647 | /* Disable all interrupts on the channel */ |
397 | omap_writew(0, OMAP_DMA_CICR(lch)); | 648 | if (cpu_class_is_omap1()) |
649 | OMAP_DMA_CICR_REG(lch) = 0; | ||
398 | 650 | ||
399 | w = omap_readw(OMAP_DMA_CCR(lch)); | 651 | OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN; |
400 | w &= ~OMAP_DMA_CCR_EN; | ||
401 | omap_writew(w, OMAP_DMA_CCR(lch)); | ||
402 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; | 652 | dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; |
403 | } | 653 | } |
404 | 654 | ||
405 | void omap_enable_dma_irq(int lch, u16 bits) | 655 | /* |
656 | * Returns current physical source address for the given DMA channel. | ||
657 | * If the channel is running the caller must disable interrupts prior calling | ||
658 | * this function and process the returned value before re-enabling interrupt to | ||
659 | * prevent races with the interrupt handler. Note that in continuous mode there | ||
660 | * is a chance for CSSA_L register overflow inbetween the two reads resulting | ||
661 | * in incorrect return value. | ||
662 | */ | ||
663 | dma_addr_t omap_get_dma_src_pos(int lch) | ||
406 | { | 664 | { |
407 | dma_chan[lch].enabled_irqs |= bits; | 665 | dma_addr_t offset; |
408 | } | ||
409 | 666 | ||
410 | void omap_disable_dma_irq(int lch, u16 bits) | 667 | if (cpu_class_is_omap1()) |
411 | { | 668 | offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) | |
412 | dma_chan[lch].enabled_irqs &= ~bits; | 669 | (OMAP1_DMA_CSSA_U_REG(lch) << 16)); |
413 | } | ||
414 | 670 | ||
415 | static int dma_handle_ch(int ch) | 671 | if (cpu_is_omap24xx()) |
416 | { | 672 | offset = OMAP_DMA_CSAC_REG(lch); |
417 | u16 csr; | ||
418 | 673 | ||
419 | if (enable_1510_mode && ch >= 6) { | 674 | return offset; |
420 | csr = dma_chan[ch].saved_csr; | ||
421 | dma_chan[ch].saved_csr = 0; | ||
422 | } else | ||
423 | csr = omap_readw(OMAP_DMA_CSR(ch)); | ||
424 | if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { | ||
425 | dma_chan[ch + 6].saved_csr = csr >> 7; | ||
426 | csr &= 0x7f; | ||
427 | } | ||
428 | if ((csr & 0x3f) == 0) | ||
429 | return 0; | ||
430 | if (unlikely(dma_chan[ch].dev_id == -1)) { | ||
431 | printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n", | ||
432 | ch, csr); | ||
433 | return 0; | ||
434 | } | ||
435 | if (unlikely(csr & OMAP_DMA_TOUT_IRQ)) | ||
436 | printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id); | ||
437 | if (unlikely(csr & OMAP_DMA_DROP_IRQ)) | ||
438 | printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n", | ||
439 | dma_chan[ch].dev_id); | ||
440 | if (likely(csr & OMAP_DMA_BLOCK_IRQ)) | ||
441 | dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; | ||
442 | if (likely(dma_chan[ch].callback != NULL)) | ||
443 | dma_chan[ch].callback(ch, csr, dma_chan[ch].data); | ||
444 | return 1; | ||
445 | } | 675 | } |
446 | 676 | ||
447 | static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs) | 677 | /* |
678 | * Returns current physical destination address for the given DMA channel. | ||
679 | * If the channel is running the caller must disable interrupts prior calling | ||
680 | * this function and process the returned value before re-enabling interrupt to | ||
681 | * prevent races with the interrupt handler. Note that in continuous mode there | ||
682 | * is a chance for CDSA_L register overflow inbetween the two reads resulting | ||
683 | * in incorrect return value. | ||
684 | */ | ||
685 | dma_addr_t omap_get_dma_dst_pos(int lch) | ||
448 | { | 686 | { |
449 | int ch = ((int) dev_id) - 1; | 687 | dma_addr_t offset; |
450 | int handled = 0; | ||
451 | 688 | ||
452 | for (;;) { | 689 | if (cpu_class_is_omap1()) |
453 | int handled_now = 0; | 690 | offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) | |
691 | (OMAP1_DMA_CDSA_U_REG(lch) << 16)); | ||
454 | 692 | ||
455 | handled_now += dma_handle_ch(ch); | 693 | if (cpu_is_omap24xx()) |
456 | if (enable_1510_mode && dma_chan[ch + 6].saved_csr) | 694 | offset = OMAP2_DMA_CDSA_REG(lch); |
457 | handled_now += dma_handle_ch(ch + 6); | ||
458 | if (!handled_now) | ||
459 | break; | ||
460 | handled += handled_now; | ||
461 | } | ||
462 | 695 | ||
463 | return handled ? IRQ_HANDLED : IRQ_NONE; | 696 | return offset; |
464 | } | 697 | } |
465 | 698 | ||
466 | int omap_request_dma(int dev_id, const char *dev_name, | 699 | /* |
467 | void (* callback)(int lch, u16 ch_status, void *data), | 700 | * Returns current source transfer counting for the given DMA channel. |
468 | void *data, int *dma_ch_out) | 701 | * Can be used to monitor the progress of a transfer inside a block. |
702 | * It must be called with disabled interrupts. | ||
703 | */ | ||
704 | int omap_get_dma_src_addr_counter(int lch) | ||
469 | { | 705 | { |
470 | int ch, free_ch = -1; | 706 | return (dma_addr_t) OMAP_DMA_CSAC_REG(lch); |
471 | unsigned long flags; | ||
472 | struct omap_dma_lch *chan; | ||
473 | |||
474 | spin_lock_irqsave(&dma_chan_lock, flags); | ||
475 | for (ch = 0; ch < dma_chan_count; ch++) { | ||
476 | if (free_ch == -1 && dma_chan[ch].dev_id == -1) { | ||
477 | free_ch = ch; | ||
478 | if (dev_id == 0) | ||
479 | break; | ||
480 | } | ||
481 | } | ||
482 | if (free_ch == -1) { | ||
483 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
484 | return -EBUSY; | ||
485 | } | ||
486 | chan = dma_chan + free_ch; | ||
487 | chan->dev_id = dev_id; | ||
488 | clear_lch_regs(free_ch); | ||
489 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
490 | |||
491 | chan->dev_id = dev_id; | ||
492 | chan->dev_name = dev_name; | ||
493 | chan->callback = callback; | ||
494 | chan->data = data; | ||
495 | chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ; | ||
496 | |||
497 | if (cpu_is_omap16xx()) { | ||
498 | /* If the sync device is set, configure it dynamically. */ | ||
499 | if (dev_id != 0) { | ||
500 | set_gdma_dev(free_ch + 1, dev_id); | ||
501 | dev_id = free_ch + 1; | ||
502 | } | ||
503 | /* Disable the 1510 compatibility mode and set the sync device | ||
504 | * id. */ | ||
505 | omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR(free_ch)); | ||
506 | } else { | ||
507 | omap_writew(dev_id, OMAP_DMA_CCR(free_ch)); | ||
508 | } | ||
509 | *dma_ch_out = free_ch; | ||
510 | |||
511 | return 0; | ||
512 | } | 707 | } |
513 | 708 | ||
514 | void omap_free_dma(int ch) | 709 | int omap_dma_running(void) |
515 | { | 710 | { |
516 | unsigned long flags; | 711 | int lch; |
517 | 712 | ||
518 | spin_lock_irqsave(&dma_chan_lock, flags); | 713 | /* Check if LCD DMA is running */ |
519 | if (dma_chan[ch].dev_id == -1) { | 714 | if (cpu_is_omap16xx()) |
520 | printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch); | 715 | if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) |
521 | spin_unlock_irqrestore(&dma_chan_lock, flags); | 716 | return 1; |
522 | return; | ||
523 | } | ||
524 | dma_chan[ch].dev_id = -1; | ||
525 | spin_unlock_irqrestore(&dma_chan_lock, flags); | ||
526 | 717 | ||
527 | /* Disable all DMA interrupts for the channel. */ | 718 | for (lch = 0; lch < dma_chan_count; lch++) |
528 | omap_writew(0, OMAP_DMA_CICR(ch)); | 719 | if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN) |
529 | /* Make sure the DMA transfer is stopped. */ | 720 | return 1; |
530 | omap_writew(0, OMAP_DMA_CCR(ch)); | ||
531 | } | ||
532 | 721 | ||
533 | int omap_dma_in_1510_mode(void) | 722 | return 0; |
534 | { | ||
535 | return enable_1510_mode; | ||
536 | } | 723 | } |
537 | 724 | ||
538 | /* | 725 | /* |
@@ -550,7 +737,8 @@ void omap_dma_link_lch (int lch_head, int lch_queue) | |||
550 | 737 | ||
551 | if ((dma_chan[lch_head].dev_id == -1) || | 738 | if ((dma_chan[lch_head].dev_id == -1) || |
552 | (dma_chan[lch_queue].dev_id == -1)) { | 739 | (dma_chan[lch_queue].dev_id == -1)) { |
553 | printk(KERN_ERR "omap_dma: trying to link non requested channels\n"); | 740 | printk(KERN_ERR "omap_dma: trying to link " |
741 | "non requested channels\n"); | ||
554 | dump_stack(); | 742 | dump_stack(); |
555 | } | 743 | } |
556 | 744 | ||
@@ -570,20 +758,149 @@ void omap_dma_unlink_lch (int lch_head, int lch_queue) | |||
570 | 758 | ||
571 | if (dma_chan[lch_head].next_lch != lch_queue || | 759 | if (dma_chan[lch_head].next_lch != lch_queue || |
572 | dma_chan[lch_head].next_lch == -1) { | 760 | dma_chan[lch_head].next_lch == -1) { |
573 | printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n"); | 761 | printk(KERN_ERR "omap_dma: trying to unlink " |
762 | "non linked channels\n"); | ||
574 | dump_stack(); | 763 | dump_stack(); |
575 | } | 764 | } |
576 | 765 | ||
577 | 766 | ||
578 | if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || | 767 | if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || |
579 | (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { | 768 | (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { |
580 | printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n"); | 769 | printk(KERN_ERR "omap_dma: You need to stop the DMA channels " |
770 | "before unlinking\n"); | ||
581 | dump_stack(); | 771 | dump_stack(); |
582 | } | 772 | } |
583 | 773 | ||
584 | dma_chan[lch_head].next_lch = -1; | 774 | dma_chan[lch_head].next_lch = -1; |
585 | } | 775 | } |
586 | 776 | ||
777 | /*----------------------------------------------------------------------------*/ | ||
778 | |||
779 | #ifdef CONFIG_ARCH_OMAP1 | ||
780 | |||
781 | static int omap1_dma_handle_ch(int ch) | ||
782 | { | ||
783 | u16 csr; | ||
784 | |||
785 | if (enable_1510_mode && ch >= 6) { | ||
786 | csr = dma_chan[ch].saved_csr; | ||
787 | dma_chan[ch].saved_csr = 0; | ||
788 | } else | ||
789 | csr = OMAP_DMA_CSR_REG(ch); | ||
790 | if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { | ||
791 | dma_chan[ch + 6].saved_csr = csr >> 7; | ||
792 | csr &= 0x7f; | ||
793 | } | ||
794 | if ((csr & 0x3f) == 0) | ||
795 | return 0; | ||
796 | if (unlikely(dma_chan[ch].dev_id == -1)) { | ||
797 | printk(KERN_WARNING "Spurious interrupt from DMA channel " | ||
798 | "%d (CSR %04x)\n", ch, csr); | ||
799 | return 0; | ||
800 | } | ||
801 | if (unlikely(csr & OMAP_DMA_TOUT_IRQ)) | ||
802 | printk(KERN_WARNING "DMA timeout with device %d\n", | ||
803 | dma_chan[ch].dev_id); | ||
804 | if (unlikely(csr & OMAP_DMA_DROP_IRQ)) | ||
805 | printk(KERN_WARNING "DMA synchronization event drop occurred " | ||
806 | "with device %d\n", dma_chan[ch].dev_id); | ||
807 | if (likely(csr & OMAP_DMA_BLOCK_IRQ)) | ||
808 | dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; | ||
809 | if (likely(dma_chan[ch].callback != NULL)) | ||
810 | dma_chan[ch].callback(ch, csr, dma_chan[ch].data); | ||
811 | return 1; | ||
812 | } | ||
813 | |||
814 | static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id, | ||
815 | struct pt_regs *regs) | ||
816 | { | ||
817 | int ch = ((int) dev_id) - 1; | ||
818 | int handled = 0; | ||
819 | |||
820 | for (;;) { | ||
821 | int handled_now = 0; | ||
822 | |||
823 | handled_now += omap1_dma_handle_ch(ch); | ||
824 | if (enable_1510_mode && dma_chan[ch + 6].saved_csr) | ||
825 | handled_now += omap1_dma_handle_ch(ch + 6); | ||
826 | if (!handled_now) | ||
827 | break; | ||
828 | handled += handled_now; | ||
829 | } | ||
830 | |||
831 | return handled ? IRQ_HANDLED : IRQ_NONE; | ||
832 | } | ||
833 | |||
834 | #else | ||
835 | #define omap1_dma_irq_handler NULL | ||
836 | #endif | ||
837 | |||
838 | #ifdef CONFIG_ARCH_OMAP2 | ||
839 | |||
840 | static int omap2_dma_handle_ch(int ch) | ||
841 | { | ||
842 | u32 status = OMAP_DMA_CSR_REG(ch); | ||
843 | u32 val; | ||
844 | |||
845 | if (!status) | ||
846 | return 0; | ||
847 | if (unlikely(dma_chan[ch].dev_id == -1)) | ||
848 | return 0; | ||
849 | /* REVISIT: According to 24xx TRM, there's no TOUT_IE */ | ||
850 | if (unlikely(status & OMAP_DMA_TOUT_IRQ)) | ||
851 | printk(KERN_INFO "DMA timeout with device %d\n", | ||
852 | dma_chan[ch].dev_id); | ||
853 | if (unlikely(status & OMAP_DMA_DROP_IRQ)) | ||
854 | printk(KERN_INFO | ||
855 | "DMA synchronization event drop occurred with device " | ||
856 | "%d\n", dma_chan[ch].dev_id); | ||
857 | |||
858 | if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) | ||
859 | printk(KERN_INFO "DMA transaction error with device %d\n", | ||
860 | dma_chan[ch].dev_id); | ||
861 | |||
862 | OMAP_DMA_CSR_REG(ch) = 0x20; | ||
863 | |||
864 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); | ||
865 | /* ch in this function is from 0-31 while in register it is 1-32 */ | ||
866 | val = 1 << (ch); | ||
867 | omap_writel(val, OMAP_DMA4_IRQSTATUS_L0); | ||
868 | |||
869 | if (likely(dma_chan[ch].callback != NULL)) | ||
870 | dma_chan[ch].callback(ch, status, dma_chan[ch].data); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | /* STATUS register count is from 1-32 while our is 0-31 */ | ||
876 | static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id, | ||
877 | struct pt_regs *regs) | ||
878 | { | ||
879 | u32 val; | ||
880 | int i; | ||
881 | |||
882 | val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); | ||
883 | |||
884 | for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) { | ||
885 | int active = val & (1 << (i - 1)); | ||
886 | if (active) | ||
887 | omap2_dma_handle_ch(i - 1); | ||
888 | } | ||
889 | |||
890 | return IRQ_HANDLED; | ||
891 | } | ||
892 | |||
893 | static struct irqaction omap24xx_dma_irq = { | ||
894 | .name = "DMA", | ||
895 | .handler = omap2_dma_irq_handler, | ||
896 | .flags = SA_INTERRUPT | ||
897 | }; | ||
898 | |||
899 | #else | ||
900 | static struct irqaction omap24xx_dma_irq; | ||
901 | #endif | ||
902 | |||
903 | /*----------------------------------------------------------------------------*/ | ||
587 | 904 | ||
588 | static struct lcd_dma_info { | 905 | static struct lcd_dma_info { |
589 | spinlock_t lock; | 906 | spinlock_t lock; |
@@ -795,7 +1112,7 @@ static void set_b1_regs(void) | |||
795 | /* Always set the source port as SDRAM for now*/ | 1112 | /* Always set the source port as SDRAM for now*/ |
796 | w &= ~(0x03 << 6); | 1113 | w &= ~(0x03 << 6); |
797 | if (lcd_dma.callback != NULL) | 1114 | if (lcd_dma.callback != NULL) |
798 | w |= 1 << 1; /* Block interrupt enable */ | 1115 | w |= 1 << 1; /* Block interrupt enable */ |
799 | else | 1116 | else |
800 | w &= ~(1 << 1); | 1117 | w &= ~(1 << 1); |
801 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); | 1118 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
@@ -814,7 +1131,8 @@ static void set_b1_regs(void) | |||
814 | omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); | 1131 | omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); |
815 | } | 1132 | } |
816 | 1133 | ||
817 | static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs) | 1134 | static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, |
1135 | struct pt_regs *regs) | ||
818 | { | 1136 | { |
819 | u16 w; | 1137 | u16 w; |
820 | 1138 | ||
@@ -870,7 +1188,8 @@ void omap_free_lcd_dma(void) | |||
870 | return; | 1188 | return; |
871 | } | 1189 | } |
872 | if (!enable_1510_mode) | 1190 | if (!enable_1510_mode) |
873 | omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); | 1191 | omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, |
1192 | OMAP1610_DMA_LCD_CCR); | ||
874 | lcd_dma.reserved = 0; | 1193 | lcd_dma.reserved = 0; |
875 | spin_unlock(&lcd_dma.lock); | 1194 | spin_unlock(&lcd_dma.lock); |
876 | } | 1195 | } |
@@ -939,93 +1258,24 @@ void omap_stop_lcd_dma(void) | |||
939 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); | 1258 | omap_writew(w, OMAP1610_DMA_LCD_CTRL); |
940 | } | 1259 | } |
941 | 1260 | ||
942 | /* | 1261 | /*----------------------------------------------------------------------------*/ |
943 | * Clears any DMA state so the DMA engine is ready to restart with new buffers | ||
944 | * through omap_start_dma(). Any buffers in flight are discarded. | ||
945 | */ | ||
946 | void omap_clear_dma(int lch) | ||
947 | { | ||
948 | unsigned long flags; | ||
949 | int status; | ||
950 | |||
951 | local_irq_save(flags); | ||
952 | omap_writew(omap_readw(OMAP_DMA_CCR(lch)) & ~OMAP_DMA_CCR_EN, | ||
953 | OMAP_DMA_CCR(lch)); | ||
954 | status = OMAP_DMA_CSR(lch); /* clear pending interrupts */ | ||
955 | local_irq_restore(flags); | ||
956 | } | ||
957 | |||
958 | /* | ||
959 | * Returns current physical source address for the given DMA channel. | ||
960 | * If the channel is running the caller must disable interrupts prior calling | ||
961 | * this function and process the returned value before re-enabling interrupt to | ||
962 | * prevent races with the interrupt handler. Note that in continuous mode there | ||
963 | * is a chance for CSSA_L register overflow inbetween the two reads resulting | ||
964 | * in incorrect return value. | ||
965 | */ | ||
966 | dma_addr_t omap_get_dma_src_pos(int lch) | ||
967 | { | ||
968 | return (dma_addr_t) (omap_readw(OMAP_DMA_CSSA_L(lch)) | | ||
969 | (omap_readw(OMAP_DMA_CSSA_U(lch)) << 16)); | ||
970 | } | ||
971 | |||
972 | /* | ||
973 | * Returns current physical destination address for the given DMA channel. | ||
974 | * If the channel is running the caller must disable interrupts prior calling | ||
975 | * this function and process the returned value before re-enabling interrupt to | ||
976 | * prevent races with the interrupt handler. Note that in continuous mode there | ||
977 | * is a chance for CDSA_L register overflow inbetween the two reads resulting | ||
978 | * in incorrect return value. | ||
979 | */ | ||
980 | dma_addr_t omap_get_dma_dst_pos(int lch) | ||
981 | { | ||
982 | return (dma_addr_t) (omap_readw(OMAP_DMA_CDSA_L(lch)) | | ||
983 | (omap_readw(OMAP_DMA_CDSA_U(lch)) << 16)); | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * Returns current source transfer counting for the given DMA channel. | ||
988 | * Can be used to monitor the progress of a transfer inside a block. | ||
989 | * It must be called with disabled interrupts. | ||
990 | */ | ||
991 | int omap_get_dma_src_addr_counter(int lch) | ||
992 | { | ||
993 | return (dma_addr_t) omap_readw(OMAP_DMA_CSAC(lch)); | ||
994 | } | ||
995 | |||
996 | int omap_dma_running(void) | ||
997 | { | ||
998 | int lch; | ||
999 | |||
1000 | /* Check if LCD DMA is running */ | ||
1001 | if (cpu_is_omap16xx()) | ||
1002 | if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) | ||
1003 | return 1; | ||
1004 | |||
1005 | for (lch = 0; lch < dma_chan_count; lch++) { | ||
1006 | u16 w; | ||
1007 | |||
1008 | w = omap_readw(OMAP_DMA_CCR(lch)); | ||
1009 | if (w & OMAP_DMA_CCR_EN) | ||
1010 | return 1; | ||
1011 | } | ||
1012 | return 0; | ||
1013 | } | ||
1014 | 1262 | ||
1015 | static int __init omap_init_dma(void) | 1263 | static int __init omap_init_dma(void) |
1016 | { | 1264 | { |
1017 | int ch, r; | 1265 | int ch, r; |
1018 | 1266 | ||
1019 | if (cpu_is_omap1510()) { | 1267 | if (cpu_is_omap15xx()) { |
1020 | printk(KERN_INFO "DMA support for OMAP1510 initialized\n"); | 1268 | printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); |
1021 | dma_chan_count = 9; | 1269 | dma_chan_count = 9; |
1022 | enable_1510_mode = 1; | 1270 | enable_1510_mode = 1; |
1023 | } else if (cpu_is_omap16xx() || cpu_is_omap730()) { | 1271 | } else if (cpu_is_omap16xx() || cpu_is_omap730()) { |
1024 | printk(KERN_INFO "OMAP DMA hardware version %d\n", | 1272 | printk(KERN_INFO "OMAP DMA hardware version %d\n", |
1025 | omap_readw(OMAP_DMA_HW_ID)); | 1273 | omap_readw(OMAP_DMA_HW_ID)); |
1026 | printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", | 1274 | printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", |
1027 | (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | omap_readw(OMAP_DMA_CAPS_0_L), | 1275 | (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | |
1028 | (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | omap_readw(OMAP_DMA_CAPS_1_L), | 1276 | omap_readw(OMAP_DMA_CAPS_0_L), |
1277 | (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | | ||
1278 | omap_readw(OMAP_DMA_CAPS_1_L), | ||
1029 | omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3), | 1279 | omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3), |
1030 | omap_readw(OMAP_DMA_CAPS_4)); | 1280 | omap_readw(OMAP_DMA_CAPS_4)); |
1031 | if (!enable_1510_mode) { | 1281 | if (!enable_1510_mode) { |
@@ -1038,6 +1288,11 @@ static int __init omap_init_dma(void) | |||
1038 | dma_chan_count = 16; | 1288 | dma_chan_count = 16; |
1039 | } else | 1289 | } else |
1040 | dma_chan_count = 9; | 1290 | dma_chan_count = 9; |
1291 | } else if (cpu_is_omap24xx()) { | ||
1292 | u8 revision = omap_readb(OMAP_DMA4_REVISION); | ||
1293 | printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", | ||
1294 | revision >> 4, revision & 0xf); | ||
1295 | dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT; | ||
1041 | } else { | 1296 | } else { |
1042 | dma_chan_count = 0; | 1297 | dma_chan_count = 0; |
1043 | return 0; | 1298 | return 0; |
@@ -1049,41 +1304,56 @@ static int __init omap_init_dma(void) | |||
1049 | memset(&dma_chan, 0, sizeof(dma_chan)); | 1304 | memset(&dma_chan, 0, sizeof(dma_chan)); |
1050 | 1305 | ||
1051 | for (ch = 0; ch < dma_chan_count; ch++) { | 1306 | for (ch = 0; ch < dma_chan_count; ch++) { |
1307 | omap_clear_dma(ch); | ||
1052 | dma_chan[ch].dev_id = -1; | 1308 | dma_chan[ch].dev_id = -1; |
1053 | dma_chan[ch].next_lch = -1; | 1309 | dma_chan[ch].next_lch = -1; |
1054 | 1310 | ||
1055 | if (ch >= 6 && enable_1510_mode) | 1311 | if (ch >= 6 && enable_1510_mode) |
1056 | continue; | 1312 | continue; |
1057 | 1313 | ||
1058 | /* request_irq() doesn't like dev_id (ie. ch) being zero, | 1314 | if (cpu_class_is_omap1()) { |
1059 | * so we have to kludge around this. */ | 1315 | /* request_irq() doesn't like dev_id (ie. ch) being |
1060 | r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA", | 1316 | * zero, so we have to kludge around this. */ |
1061 | (void *) (ch + 1)); | 1317 | r = request_irq(omap1_dma_irq[ch], |
1318 | omap1_dma_irq_handler, 0, "DMA", | ||
1319 | (void *) (ch + 1)); | ||
1320 | if (r != 0) { | ||
1321 | int i; | ||
1322 | |||
1323 | printk(KERN_ERR "unable to request IRQ %d " | ||
1324 | "for DMA (error %d)\n", | ||
1325 | omap1_dma_irq[ch], r); | ||
1326 | for (i = 0; i < ch; i++) | ||
1327 | free_irq(omap1_dma_irq[i], | ||
1328 | (void *) (i + 1)); | ||
1329 | return r; | ||
1330 | } | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | if (cpu_is_omap24xx()) | ||
1335 | setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq); | ||
1336 | |||
1337 | /* FIXME: Update LCD DMA to work on 24xx */ | ||
1338 | if (cpu_class_is_omap1()) { | ||
1339 | r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, | ||
1340 | "LCD DMA", NULL); | ||
1062 | if (r != 0) { | 1341 | if (r != 0) { |
1063 | int i; | 1342 | int i; |
1064 | 1343 | ||
1065 | printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n", | 1344 | printk(KERN_ERR "unable to request IRQ for LCD DMA " |
1066 | dma_irq[ch], r); | 1345 | "(error %d)\n", r); |
1067 | for (i = 0; i < ch; i++) | 1346 | for (i = 0; i < dma_chan_count; i++) |
1068 | free_irq(dma_irq[i], (void *) (i + 1)); | 1347 | free_irq(omap1_dma_irq[i], (void *) (i + 1)); |
1069 | return r; | 1348 | return r; |
1070 | } | 1349 | } |
1071 | } | 1350 | } |
1072 | r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL); | ||
1073 | if (r != 0) { | ||
1074 | int i; | ||
1075 | 1351 | ||
1076 | printk(KERN_ERR "unable to request IRQ for LCD DMA (error %d)\n", r); | ||
1077 | for (i = 0; i < dma_chan_count; i++) | ||
1078 | free_irq(dma_irq[i], (void *) (i + 1)); | ||
1079 | return r; | ||
1080 | } | ||
1081 | return 0; | 1352 | return 0; |
1082 | } | 1353 | } |
1083 | 1354 | ||
1084 | arch_initcall(omap_init_dma); | 1355 | arch_initcall(omap_init_dma); |
1085 | 1356 | ||
1086 | |||
1087 | EXPORT_SYMBOL(omap_get_dma_src_pos); | 1357 | EXPORT_SYMBOL(omap_get_dma_src_pos); |
1088 | EXPORT_SYMBOL(omap_get_dma_dst_pos); | 1358 | EXPORT_SYMBOL(omap_get_dma_dst_pos); |
1089 | EXPORT_SYMBOL(omap_get_dma_src_addr_counter); | 1359 | EXPORT_SYMBOL(omap_get_dma_src_addr_counter); |
@@ -1109,6 +1379,8 @@ EXPORT_SYMBOL(omap_set_dma_dest_index); | |||
1109 | EXPORT_SYMBOL(omap_set_dma_dest_data_pack); | 1379 | EXPORT_SYMBOL(omap_set_dma_dest_data_pack); |
1110 | EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); | 1380 | EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); |
1111 | 1381 | ||
1382 | EXPORT_SYMBOL(omap_set_dma_params); | ||
1383 | |||
1112 | EXPORT_SYMBOL(omap_dma_link_lch); | 1384 | EXPORT_SYMBOL(omap_dma_link_lch); |
1113 | EXPORT_SYMBOL(omap_dma_unlink_lch); | 1385 | EXPORT_SYMBOL(omap_dma_unlink_lch); |
1114 | 1386 | ||