diff options
Diffstat (limited to 'arch/m68k/include/asm/dma.h')
-rw-r--r-- | arch/m68k/include/asm/dma.h | 492 |
1 files changed, 489 insertions, 3 deletions
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h index b82e660cf1c2..6fbdfe895104 100644 --- a/arch/m68k/include/asm/dma.h +++ b/arch/m68k/include/asm/dma.h | |||
@@ -1,5 +1,491 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_DMA_H |
2 | #include "dma_no.h" | 2 | #define _M68K_DMA_H 1 |
3 | |||
4 | #ifdef CONFIG_COLDFIRE | ||
5 | /* | ||
6 | * ColdFire DMA Model: | ||
7 | * ColdFire DMA supports two forms of DMA: Single and Dual address. Single | ||
8 | * address mode emits a source address, and expects that the device will either | ||
9 | * pick up the data (DMA READ) or source data (DMA WRITE). This implies that | ||
10 | * the device will place data on the correct byte(s) of the data bus, as the | ||
11 | * memory transactions are always 32 bits. This implies that only 32 bit | ||
12 | * devices will find single mode transfers useful. Dual address DMA mode | ||
13 | * performs two cycles: source read and destination write. ColdFire will | ||
14 | * align the data so that the device will always get the correct bytes, thus | ||
15 | * is useful for 8 and 16 bit devices. This is the mode that is supported | ||
16 | * below. | ||
17 | * | ||
18 | * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 | ||
19 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
20 | * | ||
21 | * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000 | ||
22 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
23 | * | ||
24 | * APR/18/2002 : added proper support for MCF5272 DMA controller. | ||
25 | * Arthur Shipkowski (art@videon-central.com) | ||
26 | */ | ||
27 | |||
28 | #include <asm/coldfire.h> | ||
29 | #include <asm/mcfsim.h> | ||
30 | #include <asm/mcfdma.h> | ||
31 | |||
32 | /* | ||
33 | * Set number of channels of DMA on ColdFire for different implementations. | ||
34 | */ | ||
35 | #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \ | ||
36 | defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | ||
37 | #define MAX_M68K_DMA_CHANNELS 4 | ||
38 | #elif defined(CONFIG_M5272) | ||
39 | #define MAX_M68K_DMA_CHANNELS 1 | ||
40 | #elif defined(CONFIG_M532x) | ||
41 | #define MAX_M68K_DMA_CHANNELS 0 | ||
3 | #else | 42 | #else |
4 | #include "dma_mm.h" | 43 | #define MAX_M68K_DMA_CHANNELS 2 |
5 | #endif | 44 | #endif |
45 | |||
46 | extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS]; | ||
47 | extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; | ||
48 | |||
49 | #if !defined(CONFIG_M5272) | ||
50 | #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */ | ||
51 | #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */ | ||
52 | #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */ | ||
53 | #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */ | ||
54 | |||
55 | /* I/O to memory, 8 bits, mode */ | ||
56 | #define DMA_MODE_READ 0 | ||
57 | /* memory to I/O, 8 bits, mode */ | ||
58 | #define DMA_MODE_WRITE 1 | ||
59 | /* I/O to memory, 16 bits, mode */ | ||
60 | #define DMA_MODE_READ_WORD 2 | ||
61 | /* memory to I/O, 16 bits, mode */ | ||
62 | #define DMA_MODE_WRITE_WORD 3 | ||
63 | /* I/O to memory, 32 bits, mode */ | ||
64 | #define DMA_MODE_READ_LONG 4 | ||
65 | /* memory to I/O, 32 bits, mode */ | ||
66 | #define DMA_MODE_WRITE_LONG 5 | ||
67 | /* I/O to memory, 8 bits, single-address-mode */ | ||
68 | #define DMA_MODE_READ_SINGLE 8 | ||
69 | /* memory to I/O, 8 bits, single-address-mode */ | ||
70 | #define DMA_MODE_WRITE_SINGLE 9 | ||
71 | /* I/O to memory, 16 bits, single-address-mode */ | ||
72 | #define DMA_MODE_READ_WORD_SINGLE 10 | ||
73 | /* memory to I/O, 16 bits, single-address-mode */ | ||
74 | #define DMA_MODE_WRITE_WORD_SINGLE 11 | ||
75 | /* I/O to memory, 32 bits, single-address-mode */ | ||
76 | #define DMA_MODE_READ_LONG_SINGLE 12 | ||
77 | /* memory to I/O, 32 bits, single-address-mode */ | ||
78 | #define DMA_MODE_WRITE_LONG_SINGLE 13 | ||
79 | |||
80 | #else /* CONFIG_M5272 is defined */ | ||
81 | |||
82 | /* Source static-address mode */ | ||
83 | #define DMA_MODE_SRC_SA_BIT 0x01 | ||
84 | /* Two bits to select between all four modes */ | ||
85 | #define DMA_MODE_SSIZE_MASK 0x06 | ||
86 | /* Offset to shift bits in */ | ||
87 | #define DMA_MODE_SSIZE_OFF 0x01 | ||
88 | /* Destination static-address mode */ | ||
89 | #define DMA_MODE_DES_SA_BIT 0x10 | ||
90 | /* Two bits to select between all four modes */ | ||
91 | #define DMA_MODE_DSIZE_MASK 0x60 | ||
92 | /* Offset to shift bits in */ | ||
93 | #define DMA_MODE_DSIZE_OFF 0x05 | ||
94 | /* Size modifiers */ | ||
95 | #define DMA_MODE_SIZE_LONG 0x00 | ||
96 | #define DMA_MODE_SIZE_BYTE 0x01 | ||
97 | #define DMA_MODE_SIZE_WORD 0x02 | ||
98 | #define DMA_MODE_SIZE_LINE 0x03 | ||
99 | |||
100 | /* | ||
101 | * Aliases to help speed quick ports; these may be suboptimal, however. They | ||
102 | * do not include the SINGLE mode modifiers since the MCF5272 does not have a | ||
103 | * mode where the device is in control of its addressing. | ||
104 | */ | ||
105 | |||
106 | /* I/O to memory, 8 bits, mode */ | ||
107 | #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
108 | /* memory to I/O, 8 bits, mode */ | ||
109 | #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
110 | /* I/O to memory, 16 bits, mode */ | ||
111 | #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
112 | /* memory to I/O, 16 bits, mode */ | ||
113 | #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
114 | /* I/O to memory, 32 bits, mode */ | ||
115 | #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
116 | /* memory to I/O, 32 bits, mode */ | ||
117 | #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
118 | |||
119 | #endif /* !defined(CONFIG_M5272) */ | ||
120 | |||
121 | #if !defined(CONFIG_M5272) | ||
122 | /* enable/disable a specific DMA channel */ | ||
123 | static __inline__ void enable_dma(unsigned int dmanr) | ||
124 | { | ||
125 | volatile unsigned short *dmawp; | ||
126 | |||
127 | #ifdef DMA_DEBUG | ||
128 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
129 | #endif | ||
130 | |||
131 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
132 | dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT; | ||
133 | } | ||
134 | |||
135 | static __inline__ void disable_dma(unsigned int dmanr) | ||
136 | { | ||
137 | volatile unsigned short *dmawp; | ||
138 | volatile unsigned char *dmapb; | ||
139 | |||
140 | #ifdef DMA_DEBUG | ||
141 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
142 | #endif | ||
143 | |||
144 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
145 | dmapb = (unsigned char *) dma_base_addr[dmanr]; | ||
146 | |||
147 | /* Turn off external requests, and stop any DMA in progress */ | ||
148 | dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT; | ||
149 | dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Clear the 'DMA Pointer Flip Flop'. | ||
154 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
155 | * Use this once to initialize the FF to a known state. | ||
156 | * After that, keep track of it. :-) | ||
157 | * --- In order to do that, the DMA routines below should --- | ||
158 | * --- only be used while interrupts are disabled! --- | ||
159 | * | ||
160 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
161 | */ | ||
162 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
163 | { | ||
164 | } | ||
165 | |||
166 | /* set mode (above) for a specific DMA channel */ | ||
167 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
168 | { | ||
169 | |||
170 | volatile unsigned char *dmabp; | ||
171 | volatile unsigned short *dmawp; | ||
172 | |||
173 | #ifdef DMA_DEBUG | ||
174 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
175 | #endif | ||
176 | |||
177 | dmabp = (unsigned char *) dma_base_addr[dmanr]; | ||
178 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
179 | |||
180 | /* Clear config errors */ | ||
181 | dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
182 | |||
183 | /* Set command register */ | ||
184 | dmawp[MCFDMA_DCR] = | ||
185 | MCFDMA_DCR_INT | /* Enable completion irq */ | ||
186 | MCFDMA_DCR_CS | /* Force one xfer per request */ | ||
187 | MCFDMA_DCR_AA | /* Enable auto alignment */ | ||
188 | /* single-address-mode */ | ||
189 | ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) | | ||
190 | /* sets s_rw (-> r/w) high if Memory to I/0 */ | ||
191 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) | | ||
192 | /* Memory to I/O or I/O to Memory */ | ||
193 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) | | ||
194 | /* 32 bit, 16 bit or 8 bit transfers */ | ||
195 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD : | ||
196 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG : | ||
197 | MCFDMA_DCR_SSIZE_BYTE)) | | ||
198 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD : | ||
199 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG : | ||
200 | MCFDMA_DCR_DSIZE_BYTE)); | ||
201 | |||
202 | #ifdef DEBUG_DMA | ||
203 | printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__, | ||
204 | dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR], | ||
205 | (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]); | ||
206 | #endif | ||
207 | } | ||
208 | |||
209 | /* Set transfer address for specific DMA channel */ | ||
210 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
211 | { | ||
212 | volatile unsigned short *dmawp; | ||
213 | volatile unsigned int *dmalp; | ||
214 | |||
215 | #ifdef DMA_DEBUG | ||
216 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
217 | #endif | ||
218 | |||
219 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
220 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
221 | |||
222 | /* Determine which address registers are used for memory/device accesses */ | ||
223 | if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) { | ||
224 | /* Source incrementing, must be memory */ | ||
225 | dmalp[MCFDMA_SAR] = a; | ||
226 | /* Set dest address, must be device */ | ||
227 | dmalp[MCFDMA_DAR] = dma_device_address[dmanr]; | ||
228 | } else { | ||
229 | /* Destination incrementing, must be memory */ | ||
230 | dmalp[MCFDMA_DAR] = a; | ||
231 | /* Set source address, must be device */ | ||
232 | dmalp[MCFDMA_SAR] = dma_device_address[dmanr]; | ||
233 | } | ||
234 | |||
235 | #ifdef DEBUG_DMA | ||
236 | printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
237 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR], | ||
238 | (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR], | ||
239 | (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]); | ||
240 | #endif | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Specific for Coldfire - sets device address. | ||
245 | * Should be called after the mode set call, and before set DMA address. | ||
246 | */ | ||
247 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
248 | { | ||
249 | #ifdef DMA_DEBUG | ||
250 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
251 | #endif | ||
252 | |||
253 | dma_device_address[dmanr] = a; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * NOTE 2: "count" represents _bytes_. | ||
258 | */ | ||
259 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
260 | { | ||
261 | volatile unsigned short *dmawp; | ||
262 | |||
263 | #ifdef DMA_DEBUG | ||
264 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
265 | #endif | ||
266 | |||
267 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
268 | dmawp[MCFDMA_BCR] = (unsigned short)count; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Get DMA residue count. After a DMA transfer, this | ||
273 | * should return zero. Reading this while a DMA transfer is | ||
274 | * still in progress will return unpredictable results. | ||
275 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
276 | */ | ||
277 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
278 | { | ||
279 | volatile unsigned short *dmawp; | ||
280 | unsigned short count; | ||
281 | |||
282 | #ifdef DMA_DEBUG | ||
283 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
284 | #endif | ||
285 | |||
286 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
287 | count = dmawp[MCFDMA_BCR]; | ||
288 | return((int) count); | ||
289 | } | ||
290 | #else /* CONFIG_M5272 is defined */ | ||
291 | |||
292 | /* | ||
293 | * The MCF5272 DMA controller is very different than the controller defined above | ||
294 | * in terms of register mapping. For instance, with the exception of the 16-bit | ||
295 | * interrupt register (IRQ#85, for reference), all of the registers are 32-bit. | ||
296 | * | ||
297 | * The big difference, however, is the lack of device-requested DMA. All modes | ||
298 | * are dual address transfer, and there is no 'device' setup or direction bit. | ||
299 | * You can DMA between a device and memory, between memory and memory, or even between | ||
300 | * two devices directly, with any combination of incrementing and non-incrementing | ||
301 | * addresses you choose. This puts a crimp in distinguishing between the 'device | ||
302 | * address' set up by set_dma_device_addr. | ||
303 | * | ||
304 | * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr, | ||
305 | * which will act exactly as above in -- it will look to see if the source is set to | ||
306 | * autoincrement, and if so it will make the source use the set_dma_addr value and the | ||
307 | * destination the set_dma_device_addr value. Otherwise the source will be set to the | ||
308 | * set_dma_device_addr value and the destination will get the set_dma_addr value. | ||
309 | * | ||
310 | * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions | ||
311 | * and make it explicit. Depending on what you're doing, one of these two should work | ||
312 | * for you, but don't mix them in the same transfer setup. | ||
313 | */ | ||
314 | |||
315 | /* enable/disable a specific DMA channel */ | ||
316 | static __inline__ void enable_dma(unsigned int dmanr) | ||
317 | { | ||
318 | volatile unsigned int *dmalp; | ||
319 | |||
320 | #ifdef DMA_DEBUG | ||
321 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
322 | #endif | ||
323 | |||
324 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
325 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN; | ||
326 | } | ||
327 | |||
328 | static __inline__ void disable_dma(unsigned int dmanr) | ||
329 | { | ||
330 | volatile unsigned int *dmalp; | ||
331 | |||
332 | #ifdef DMA_DEBUG | ||
333 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
334 | #endif | ||
335 | |||
336 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
337 | |||
338 | /* Turn off external requests, and stop any DMA in progress */ | ||
339 | dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN; | ||
340 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Clear the 'DMA Pointer Flip Flop'. | ||
345 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
346 | * Use this once to initialize the FF to a known state. | ||
347 | * After that, keep track of it. :-) | ||
348 | * --- In order to do that, the DMA routines below should --- | ||
349 | * --- only be used while interrupts are disabled! --- | ||
350 | * | ||
351 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
352 | */ | ||
353 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
354 | { | ||
355 | } | ||
356 | |||
357 | /* set mode (above) for a specific DMA channel */ | ||
358 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
359 | { | ||
360 | |||
361 | volatile unsigned int *dmalp; | ||
362 | volatile unsigned short *dmawp; | ||
363 | |||
364 | #ifdef DMA_DEBUG | ||
365 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
366 | #endif | ||
367 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
368 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
369 | |||
370 | /* Clear config errors */ | ||
371 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
372 | |||
373 | /* Set command register */ | ||
374 | dmalp[MCFDMA_DMR] = | ||
375 | MCFDMA_DMR_RQM_DUAL | /* Mandatory Request Mode setting */ | ||
376 | MCFDMA_DMR_DSTT_SD | /* Set up addressing types; set to supervisor-data. */ | ||
377 | MCFDMA_DMR_SRCT_SD | /* Set up addressing types; set to supervisor-data. */ | ||
378 | /* source static-address-mode */ | ||
379 | ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) | | ||
380 | /* dest static-address-mode */ | ||
381 | ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) | | ||
382 | /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */ | ||
383 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) | | ||
384 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF); | ||
385 | |||
386 | dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */ | ||
387 | |||
388 | #ifdef DEBUG_DMA | ||
389 | printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__, | ||
390 | dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR], | ||
391 | (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]); | ||
392 | #endif | ||
393 | } | ||
394 | |||
395 | /* Set transfer address for specific DMA channel */ | ||
396 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
397 | { | ||
398 | volatile unsigned int *dmalp; | ||
399 | |||
400 | #ifdef DMA_DEBUG | ||
401 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
402 | #endif | ||
403 | |||
404 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
405 | |||
406 | /* Determine which address registers are used for memory/device accesses */ | ||
407 | if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) { | ||
408 | /* Source incrementing, must be memory */ | ||
409 | dmalp[MCFDMA_DSAR] = a; | ||
410 | /* Set dest address, must be device */ | ||
411 | dmalp[MCFDMA_DDAR] = dma_device_address[dmanr]; | ||
412 | } else { | ||
413 | /* Destination incrementing, must be memory */ | ||
414 | dmalp[MCFDMA_DDAR] = a; | ||
415 | /* Set source address, must be device */ | ||
416 | dmalp[MCFDMA_DSAR] = dma_device_address[dmanr]; | ||
417 | } | ||
418 | |||
419 | #ifdef DEBUG_DMA | ||
420 | printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
421 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR], | ||
422 | (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR], | ||
423 | (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]); | ||
424 | #endif | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Specific for Coldfire - sets device address. | ||
429 | * Should be called after the mode set call, and before set DMA address. | ||
430 | */ | ||
431 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
432 | { | ||
433 | #ifdef DMA_DEBUG | ||
434 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
435 | #endif | ||
436 | |||
437 | dma_device_address[dmanr] = a; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * NOTE 2: "count" represents _bytes_. | ||
442 | * | ||
443 | * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value. | ||
444 | */ | ||
445 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
446 | { | ||
447 | volatile unsigned int *dmalp; | ||
448 | |||
449 | #ifdef DMA_DEBUG | ||
450 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
451 | #endif | ||
452 | |||
453 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
454 | dmalp[MCFDMA_DBCR] = count; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Get DMA residue count. After a DMA transfer, this | ||
459 | * should return zero. Reading this while a DMA transfer is | ||
460 | * still in progress will return unpredictable results. | ||
461 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
462 | */ | ||
463 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
464 | { | ||
465 | volatile unsigned int *dmalp; | ||
466 | unsigned int count; | ||
467 | |||
468 | #ifdef DMA_DEBUG | ||
469 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
470 | #endif | ||
471 | |||
472 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
473 | count = dmalp[MCFDMA_DBCR]; | ||
474 | return(count); | ||
475 | } | ||
476 | |||
477 | #endif /* !defined(CONFIG_M5272) */ | ||
478 | #endif /* CONFIG_COLDFIRE */ | ||
479 | |||
480 | /* it's useless on the m68k, but unfortunately needed by the new | ||
481 | bootmem allocator (but this should do it for this) */ | ||
482 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
483 | |||
484 | #define MAX_DMA_CHANNELS 8 | ||
485 | |||
486 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
487 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
488 | |||
489 | #define isa_dma_bridge_buggy (0) | ||
490 | |||
491 | #endif /* _M68K_DMA_H */ | ||