diff options
Diffstat (limited to 'arch/m68k/include/asm/dma_no.h')
-rw-r--r-- | arch/m68k/include/asm/dma_no.h | 494 |
1 files changed, 494 insertions, 0 deletions
diff --git a/arch/m68k/include/asm/dma_no.h b/arch/m68k/include/asm/dma_no.h new file mode 100644 index 00000000000..939a0205621 --- /dev/null +++ b/arch/m68k/include/asm/dma_no.h | |||
@@ -0,0 +1,494 @@ | |||
1 | #ifndef _M68K_DMA_H | ||
2 | #define _M68K_DMA_H 1 | ||
3 | |||
4 | //#define DMA_DEBUG 1 | ||
5 | |||
6 | |||
7 | #ifdef CONFIG_COLDFIRE | ||
8 | /* | ||
9 | * ColdFire DMA Model: | ||
10 | * ColdFire DMA supports two forms of DMA: Single and Dual address. Single | ||
11 | * address mode emits a source address, and expects that the device will either | ||
12 | * pick up the data (DMA READ) or source data (DMA WRITE). This implies that | ||
13 | * the device will place data on the correct byte(s) of the data bus, as the | ||
14 | * memory transactions are always 32 bits. This implies that only 32 bit | ||
15 | * devices will find single mode transfers useful. Dual address DMA mode | ||
16 | * performs two cycles: source read and destination write. ColdFire will | ||
17 | * align the data so that the device will always get the correct bytes, thus | ||
18 | * is useful for 8 and 16 bit devices. This is the mode that is supported | ||
19 | * below. | ||
20 | * | ||
21 | * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 | ||
22 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
23 | * | ||
24 | * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000 | ||
25 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
26 | * | ||
27 | * APR/18/2002 : added proper support for MCF5272 DMA controller. | ||
28 | * Arthur Shipkowski (art@videon-central.com) | ||
29 | */ | ||
30 | |||
31 | #include <asm/coldfire.h> | ||
32 | #include <asm/mcfsim.h> | ||
33 | #include <asm/mcfdma.h> | ||
34 | |||
35 | /* | ||
36 | * Set number of channels of DMA on ColdFire for different implementations. | ||
37 | */ | ||
38 | #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \ | ||
39 | defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | ||
40 | #define MAX_M68K_DMA_CHANNELS 4 | ||
41 | #elif defined(CONFIG_M5272) | ||
42 | #define MAX_M68K_DMA_CHANNELS 1 | ||
43 | #elif defined(CONFIG_M532x) | ||
44 | #define MAX_M68K_DMA_CHANNELS 0 | ||
45 | #else | ||
46 | #define MAX_M68K_DMA_CHANNELS 2 | ||
47 | #endif | ||
48 | |||
49 | extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS]; | ||
50 | extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; | ||
51 | |||
52 | #if !defined(CONFIG_M5272) | ||
53 | #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */ | ||
54 | #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */ | ||
55 | #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */ | ||
56 | #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */ | ||
57 | |||
58 | /* I/O to memory, 8 bits, mode */ | ||
59 | #define DMA_MODE_READ 0 | ||
60 | /* memory to I/O, 8 bits, mode */ | ||
61 | #define DMA_MODE_WRITE 1 | ||
62 | /* I/O to memory, 16 bits, mode */ | ||
63 | #define DMA_MODE_READ_WORD 2 | ||
64 | /* memory to I/O, 16 bits, mode */ | ||
65 | #define DMA_MODE_WRITE_WORD 3 | ||
66 | /* I/O to memory, 32 bits, mode */ | ||
67 | #define DMA_MODE_READ_LONG 4 | ||
68 | /* memory to I/O, 32 bits, mode */ | ||
69 | #define DMA_MODE_WRITE_LONG 5 | ||
70 | /* I/O to memory, 8 bits, single-address-mode */ | ||
71 | #define DMA_MODE_READ_SINGLE 8 | ||
72 | /* memory to I/O, 8 bits, single-address-mode */ | ||
73 | #define DMA_MODE_WRITE_SINGLE 9 | ||
74 | /* I/O to memory, 16 bits, single-address-mode */ | ||
75 | #define DMA_MODE_READ_WORD_SINGLE 10 | ||
76 | /* memory to I/O, 16 bits, single-address-mode */ | ||
77 | #define DMA_MODE_WRITE_WORD_SINGLE 11 | ||
78 | /* I/O to memory, 32 bits, single-address-mode */ | ||
79 | #define DMA_MODE_READ_LONG_SINGLE 12 | ||
80 | /* memory to I/O, 32 bits, single-address-mode */ | ||
81 | #define DMA_MODE_WRITE_LONG_SINGLE 13 | ||
82 | |||
83 | #else /* CONFIG_M5272 is defined */ | ||
84 | |||
85 | /* Source static-address mode */ | ||
86 | #define DMA_MODE_SRC_SA_BIT 0x01 | ||
87 | /* Two bits to select between all four modes */ | ||
88 | #define DMA_MODE_SSIZE_MASK 0x06 | ||
89 | /* Offset to shift bits in */ | ||
90 | #define DMA_MODE_SSIZE_OFF 0x01 | ||
91 | /* Destination static-address mode */ | ||
92 | #define DMA_MODE_DES_SA_BIT 0x10 | ||
93 | /* Two bits to select between all four modes */ | ||
94 | #define DMA_MODE_DSIZE_MASK 0x60 | ||
95 | /* Offset to shift bits in */ | ||
96 | #define DMA_MODE_DSIZE_OFF 0x05 | ||
97 | /* Size modifiers */ | ||
98 | #define DMA_MODE_SIZE_LONG 0x00 | ||
99 | #define DMA_MODE_SIZE_BYTE 0x01 | ||
100 | #define DMA_MODE_SIZE_WORD 0x02 | ||
101 | #define DMA_MODE_SIZE_LINE 0x03 | ||
102 | |||
103 | /* | ||
104 | * Aliases to help speed quick ports; these may be suboptimal, however. They | ||
105 | * do not include the SINGLE mode modifiers since the MCF5272 does not have a | ||
106 | * mode where the device is in control of its addressing. | ||
107 | */ | ||
108 | |||
109 | /* I/O to memory, 8 bits, mode */ | ||
110 | #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
111 | /* memory to I/O, 8 bits, mode */ | ||
112 | #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
113 | /* I/O to memory, 16 bits, mode */ | ||
114 | #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
115 | /* memory to I/O, 16 bits, mode */ | ||
116 | #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
117 | /* I/O to memory, 32 bits, mode */ | ||
118 | #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
119 | /* memory to I/O, 32 bits, mode */ | ||
120 | #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
121 | |||
122 | #endif /* !defined(CONFIG_M5272) */ | ||
123 | |||
124 | #if !defined(CONFIG_M5272) | ||
125 | /* enable/disable a specific DMA channel */ | ||
126 | static __inline__ void enable_dma(unsigned int dmanr) | ||
127 | { | ||
128 | volatile unsigned short *dmawp; | ||
129 | |||
130 | #ifdef DMA_DEBUG | ||
131 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
132 | #endif | ||
133 | |||
134 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
135 | dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT; | ||
136 | } | ||
137 | |||
138 | static __inline__ void disable_dma(unsigned int dmanr) | ||
139 | { | ||
140 | volatile unsigned short *dmawp; | ||
141 | volatile unsigned char *dmapb; | ||
142 | |||
143 | #ifdef DMA_DEBUG | ||
144 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
145 | #endif | ||
146 | |||
147 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
148 | dmapb = (unsigned char *) dma_base_addr[dmanr]; | ||
149 | |||
150 | /* Turn off external requests, and stop any DMA in progress */ | ||
151 | dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT; | ||
152 | dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Clear the 'DMA Pointer Flip Flop'. | ||
157 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
158 | * Use this once to initialize the FF to a known state. | ||
159 | * After that, keep track of it. :-) | ||
160 | * --- In order to do that, the DMA routines below should --- | ||
161 | * --- only be used while interrupts are disabled! --- | ||
162 | * | ||
163 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
164 | */ | ||
165 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
166 | { | ||
167 | } | ||
168 | |||
169 | /* set mode (above) for a specific DMA channel */ | ||
170 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
171 | { | ||
172 | |||
173 | volatile unsigned char *dmabp; | ||
174 | volatile unsigned short *dmawp; | ||
175 | |||
176 | #ifdef DMA_DEBUG | ||
177 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
178 | #endif | ||
179 | |||
180 | dmabp = (unsigned char *) dma_base_addr[dmanr]; | ||
181 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
182 | |||
183 | // Clear config errors | ||
184 | dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
185 | |||
186 | // Set command register | ||
187 | dmawp[MCFDMA_DCR] = | ||
188 | MCFDMA_DCR_INT | // Enable completion irq | ||
189 | MCFDMA_DCR_CS | // Force one xfer per request | ||
190 | MCFDMA_DCR_AA | // Enable auto alignment | ||
191 | // single-address-mode | ||
192 | ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) | | ||
193 | // sets s_rw (-> r/w) high if Memory to I/0 | ||
194 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) | | ||
195 | // Memory to I/O or I/O to Memory | ||
196 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) | | ||
197 | // 32 bit, 16 bit or 8 bit transfers | ||
198 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD : | ||
199 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG : | ||
200 | MCFDMA_DCR_SSIZE_BYTE)) | | ||
201 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD : | ||
202 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG : | ||
203 | MCFDMA_DCR_DSIZE_BYTE)); | ||
204 | |||
205 | #ifdef DEBUG_DMA | ||
206 | printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__, | ||
207 | dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR], | ||
208 | (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]); | ||
209 | #endif | ||
210 | } | ||
211 | |||
212 | /* Set transfer address for specific DMA channel */ | ||
213 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
214 | { | ||
215 | volatile unsigned short *dmawp; | ||
216 | volatile unsigned int *dmalp; | ||
217 | |||
218 | #ifdef DMA_DEBUG | ||
219 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
220 | #endif | ||
221 | |||
222 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
223 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
224 | |||
225 | // Determine which address registers are used for memory/device accesses | ||
226 | if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) { | ||
227 | // Source incrementing, must be memory | ||
228 | dmalp[MCFDMA_SAR] = a; | ||
229 | // Set dest address, must be device | ||
230 | dmalp[MCFDMA_DAR] = dma_device_address[dmanr]; | ||
231 | } else { | ||
232 | // Destination incrementing, must be memory | ||
233 | dmalp[MCFDMA_DAR] = a; | ||
234 | // Set source address, must be device | ||
235 | dmalp[MCFDMA_SAR] = dma_device_address[dmanr]; | ||
236 | } | ||
237 | |||
238 | #ifdef DEBUG_DMA | ||
239 | printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
240 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR], | ||
241 | (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR], | ||
242 | (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]); | ||
243 | #endif | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Specific for Coldfire - sets device address. | ||
248 | * Should be called after the mode set call, and before set DMA address. | ||
249 | */ | ||
250 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
251 | { | ||
252 | #ifdef DMA_DEBUG | ||
253 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
254 | #endif | ||
255 | |||
256 | dma_device_address[dmanr] = a; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * NOTE 2: "count" represents _bytes_. | ||
261 | */ | ||
262 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
263 | { | ||
264 | volatile unsigned short *dmawp; | ||
265 | |||
266 | #ifdef DMA_DEBUG | ||
267 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
268 | #endif | ||
269 | |||
270 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
271 | dmawp[MCFDMA_BCR] = (unsigned short)count; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Get DMA residue count. After a DMA transfer, this | ||
276 | * should return zero. Reading this while a DMA transfer is | ||
277 | * still in progress will return unpredictable results. | ||
278 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
279 | */ | ||
280 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
281 | { | ||
282 | volatile unsigned short *dmawp; | ||
283 | unsigned short count; | ||
284 | |||
285 | #ifdef DMA_DEBUG | ||
286 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
287 | #endif | ||
288 | |||
289 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
290 | count = dmawp[MCFDMA_BCR]; | ||
291 | return((int) count); | ||
292 | } | ||
293 | #else /* CONFIG_M5272 is defined */ | ||
294 | |||
295 | /* | ||
296 | * The MCF5272 DMA controller is very different than the controller defined above | ||
297 | * in terms of register mapping. For instance, with the exception of the 16-bit | ||
298 | * interrupt register (IRQ#85, for reference), all of the registers are 32-bit. | ||
299 | * | ||
300 | * The big difference, however, is the lack of device-requested DMA. All modes | ||
301 | * are dual address transfer, and there is no 'device' setup or direction bit. | ||
302 | * You can DMA between a device and memory, between memory and memory, or even between | ||
303 | * two devices directly, with any combination of incrementing and non-incrementing | ||
304 | * addresses you choose. This puts a crimp in distinguishing between the 'device | ||
305 | * address' set up by set_dma_device_addr. | ||
306 | * | ||
307 | * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr, | ||
308 | * which will act exactly as above in -- it will look to see if the source is set to | ||
309 | * autoincrement, and if so it will make the source use the set_dma_addr value and the | ||
310 | * destination the set_dma_device_addr value. Otherwise the source will be set to the | ||
311 | * set_dma_device_addr value and the destination will get the set_dma_addr value. | ||
312 | * | ||
313 | * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions | ||
314 | * and make it explicit. Depending on what you're doing, one of these two should work | ||
315 | * for you, but don't mix them in the same transfer setup. | ||
316 | */ | ||
317 | |||
318 | /* enable/disable a specific DMA channel */ | ||
319 | static __inline__ void enable_dma(unsigned int dmanr) | ||
320 | { | ||
321 | volatile unsigned int *dmalp; | ||
322 | |||
323 | #ifdef DMA_DEBUG | ||
324 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
325 | #endif | ||
326 | |||
327 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
328 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN; | ||
329 | } | ||
330 | |||
331 | static __inline__ void disable_dma(unsigned int dmanr) | ||
332 | { | ||
333 | volatile unsigned int *dmalp; | ||
334 | |||
335 | #ifdef DMA_DEBUG | ||
336 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
337 | #endif | ||
338 | |||
339 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
340 | |||
341 | /* Turn off external requests, and stop any DMA in progress */ | ||
342 | dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN; | ||
343 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Clear the 'DMA Pointer Flip Flop'. | ||
348 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
349 | * Use this once to initialize the FF to a known state. | ||
350 | * After that, keep track of it. :-) | ||
351 | * --- In order to do that, the DMA routines below should --- | ||
352 | * --- only be used while interrupts are disabled! --- | ||
353 | * | ||
354 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
355 | */ | ||
356 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
357 | { | ||
358 | } | ||
359 | |||
360 | /* set mode (above) for a specific DMA channel */ | ||
361 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
362 | { | ||
363 | |||
364 | volatile unsigned int *dmalp; | ||
365 | volatile unsigned short *dmawp; | ||
366 | |||
367 | #ifdef DMA_DEBUG | ||
368 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
369 | #endif | ||
370 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
371 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
372 | |||
373 | // Clear config errors | ||
374 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
375 | |||
376 | // Set command register | ||
377 | dmalp[MCFDMA_DMR] = | ||
378 | MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting | ||
379 | MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data. | ||
380 | MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data. | ||
381 | // source static-address-mode | ||
382 | ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) | | ||
383 | // dest static-address-mode | ||
384 | ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) | | ||
385 | // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 | ||
386 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) | | ||
387 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF); | ||
388 | |||
389 | dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */ | ||
390 | |||
391 | #ifdef DEBUG_DMA | ||
392 | printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__, | ||
393 | dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR], | ||
394 | (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]); | ||
395 | #endif | ||
396 | } | ||
397 | |||
398 | /* Set transfer address for specific DMA channel */ | ||
399 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
400 | { | ||
401 | volatile unsigned int *dmalp; | ||
402 | |||
403 | #ifdef DMA_DEBUG | ||
404 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
405 | #endif | ||
406 | |||
407 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
408 | |||
409 | // Determine which address registers are used for memory/device accesses | ||
410 | if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) { | ||
411 | // Source incrementing, must be memory | ||
412 | dmalp[MCFDMA_DSAR] = a; | ||
413 | // Set dest address, must be device | ||
414 | dmalp[MCFDMA_DDAR] = dma_device_address[dmanr]; | ||
415 | } else { | ||
416 | // Destination incrementing, must be memory | ||
417 | dmalp[MCFDMA_DDAR] = a; | ||
418 | // Set source address, must be device | ||
419 | dmalp[MCFDMA_DSAR] = dma_device_address[dmanr]; | ||
420 | } | ||
421 | |||
422 | #ifdef DEBUG_DMA | ||
423 | printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
424 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR], | ||
425 | (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR], | ||
426 | (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]); | ||
427 | #endif | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * Specific for Coldfire - sets device address. | ||
432 | * Should be called after the mode set call, and before set DMA address. | ||
433 | */ | ||
434 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
435 | { | ||
436 | #ifdef DMA_DEBUG | ||
437 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
438 | #endif | ||
439 | |||
440 | dma_device_address[dmanr] = a; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * NOTE 2: "count" represents _bytes_. | ||
445 | * | ||
446 | * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value. | ||
447 | */ | ||
448 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
449 | { | ||
450 | volatile unsigned int *dmalp; | ||
451 | |||
452 | #ifdef DMA_DEBUG | ||
453 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
454 | #endif | ||
455 | |||
456 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
457 | dmalp[MCFDMA_DBCR] = count; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Get DMA residue count. After a DMA transfer, this | ||
462 | * should return zero. Reading this while a DMA transfer is | ||
463 | * still in progress will return unpredictable results. | ||
464 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
465 | */ | ||
466 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
467 | { | ||
468 | volatile unsigned int *dmalp; | ||
469 | unsigned int count; | ||
470 | |||
471 | #ifdef DMA_DEBUG | ||
472 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
473 | #endif | ||
474 | |||
475 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
476 | count = dmalp[MCFDMA_DBCR]; | ||
477 | return(count); | ||
478 | } | ||
479 | |||
480 | #endif /* !defined(CONFIG_M5272) */ | ||
481 | #endif /* CONFIG_COLDFIRE */ | ||
482 | |||
483 | #define MAX_DMA_CHANNELS 8 | ||
484 | |||
485 | /* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any | ||
486 | occurrence should be flagged as an error. */ | ||
487 | /* under 2.4 it is actually needed by the new bootmem allocator */ | ||
488 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
489 | |||
490 | /* These are in kernel/dma.c: */ | ||
491 | extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ | ||
492 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
493 | |||
494 | #endif /* _M68K_DMA_H */ | ||