diff options
author | Greg Ungerer <gerg@uclinux.org> | 2009-06-12 01:11:17 -0400 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2009-09-15 19:43:38 -0400 |
commit | cba89e231f97139dc6013030210624efd1087f68 (patch) | |
tree | 51a1c248e3188f171526b1df88f86b3dc7cd64f0 /arch | |
parent | 6192c1ea0ac5806592c5c9cc2b2b94b0298df02b (diff) |
m68k: merge mmu and non-mmu versions of dma.h
The non-mmu version of dma.h contains a lot of ColdFire specific DMA
support, but also all of the base m68k support. So use the non-mmu
version of dma.h for all.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/m68k/include/asm/dma.h | 492 | ||||
-rw-r--r-- | arch/m68k/include/asm/dma_mm.h | 16 | ||||
-rw-r--r-- | arch/m68k/include/asm/dma_no.h | 494 |
3 files changed, 489 insertions, 513 deletions
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h index b82e660cf1c2..3b85f6e6c098 100644 --- a/arch/m68k/include/asm/dma.h +++ b/arch/m68k/include/asm/dma.h | |||
@@ -1,5 +1,491 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_DMA_H |
2 | #include "dma_no.h" | 2 | #define _M68K_DMA_H 1 |
3 | |||
4 | #ifdef CONFIG_COLDFIRE | ||
5 | /* | ||
6 | * ColdFire DMA Model: | ||
7 | * ColdFire DMA supports two forms of DMA: Single and Dual address. Single | ||
8 | * address mode emits a source address, and expects that the device will either | ||
9 | * pick up the data (DMA READ) or source data (DMA WRITE). This implies that | ||
10 | * the device will place data on the correct byte(s) of the data bus, as the | ||
11 | * memory transactions are always 32 bits. This implies that only 32 bit | ||
12 | * devices will find single mode transfers useful. Dual address DMA mode | ||
13 | * performs two cycles: source read and destination write. ColdFire will | ||
14 | * align the data so that the device will always get the correct bytes, thus | ||
15 | * is useful for 8 and 16 bit devices. This is the mode that is supported | ||
16 | * below. | ||
17 | * | ||
18 | * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 | ||
19 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
20 | * | ||
21 | * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000 | ||
22 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
23 | * | ||
24 | * APR/18/2002 : added proper support for MCF5272 DMA controller. | ||
25 | * Arthur Shipkowski (art@videon-central.com) | ||
26 | */ | ||
27 | |||
28 | #include <asm/coldfire.h> | ||
29 | #include <asm/mcfsim.h> | ||
30 | #include <asm/mcfdma.h> | ||
31 | |||
32 | /* | ||
33 | * Set number of channels of DMA on ColdFire for different implementations. | ||
34 | */ | ||
35 | #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \ | ||
36 | defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | ||
37 | #define MAX_M68K_DMA_CHANNELS 4 | ||
38 | #elif defined(CONFIG_M5272) | ||
39 | #define MAX_M68K_DMA_CHANNELS 1 | ||
40 | #elif defined(CONFIG_M532x) | ||
41 | #define MAX_M68K_DMA_CHANNELS 0 | ||
3 | #else | 42 | #else |
4 | #include "dma_mm.h" | 43 | #define MAX_M68K_DMA_CHANNELS 2 |
5 | #endif | 44 | #endif |
45 | |||
46 | extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS]; | ||
47 | extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; | ||
48 | |||
49 | #if !defined(CONFIG_M5272) | ||
50 | #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */ | ||
51 | #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */ | ||
52 | #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */ | ||
53 | #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */ | ||
54 | |||
55 | /* I/O to memory, 8 bits, mode */ | ||
56 | #define DMA_MODE_READ 0 | ||
57 | /* memory to I/O, 8 bits, mode */ | ||
58 | #define DMA_MODE_WRITE 1 | ||
59 | /* I/O to memory, 16 bits, mode */ | ||
60 | #define DMA_MODE_READ_WORD 2 | ||
61 | /* memory to I/O, 16 bits, mode */ | ||
62 | #define DMA_MODE_WRITE_WORD 3 | ||
63 | /* I/O to memory, 32 bits, mode */ | ||
64 | #define DMA_MODE_READ_LONG 4 | ||
65 | /* memory to I/O, 32 bits, mode */ | ||
66 | #define DMA_MODE_WRITE_LONG 5 | ||
67 | /* I/O to memory, 8 bits, single-address-mode */ | ||
68 | #define DMA_MODE_READ_SINGLE 8 | ||
69 | /* memory to I/O, 8 bits, single-address-mode */ | ||
70 | #define DMA_MODE_WRITE_SINGLE 9 | ||
71 | /* I/O to memory, 16 bits, single-address-mode */ | ||
72 | #define DMA_MODE_READ_WORD_SINGLE 10 | ||
73 | /* memory to I/O, 16 bits, single-address-mode */ | ||
74 | #define DMA_MODE_WRITE_WORD_SINGLE 11 | ||
75 | /* I/O to memory, 32 bits, single-address-mode */ | ||
76 | #define DMA_MODE_READ_LONG_SINGLE 12 | ||
77 | /* memory to I/O, 32 bits, single-address-mode */ | ||
78 | #define DMA_MODE_WRITE_LONG_SINGLE 13 | ||
79 | |||
80 | #else /* CONFIG_M5272 is defined */ | ||
81 | |||
82 | /* Source static-address mode */ | ||
83 | #define DMA_MODE_SRC_SA_BIT 0x01 | ||
84 | /* Two bits to select between all four modes */ | ||
85 | #define DMA_MODE_SSIZE_MASK 0x06 | ||
86 | /* Offset to shift bits in */ | ||
87 | #define DMA_MODE_SSIZE_OFF 0x01 | ||
88 | /* Destination static-address mode */ | ||
89 | #define DMA_MODE_DES_SA_BIT 0x10 | ||
90 | /* Two bits to select between all four modes */ | ||
91 | #define DMA_MODE_DSIZE_MASK 0x60 | ||
92 | /* Offset to shift bits in */ | ||
93 | #define DMA_MODE_DSIZE_OFF 0x05 | ||
94 | /* Size modifiers */ | ||
95 | #define DMA_MODE_SIZE_LONG 0x00 | ||
96 | #define DMA_MODE_SIZE_BYTE 0x01 | ||
97 | #define DMA_MODE_SIZE_WORD 0x02 | ||
98 | #define DMA_MODE_SIZE_LINE 0x03 | ||
99 | |||
100 | /* | ||
101 | * Aliases to help speed quick ports; these may be suboptimal, however. They | ||
102 | * do not include the SINGLE mode modifiers since the MCF5272 does not have a | ||
103 | * mode where the device is in control of its addressing. | ||
104 | */ | ||
105 | |||
106 | /* I/O to memory, 8 bits, mode */ | ||
107 | #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
108 | /* memory to I/O, 8 bits, mode */ | ||
109 | #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
110 | /* I/O to memory, 16 bits, mode */ | ||
111 | #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
112 | /* memory to I/O, 16 bits, mode */ | ||
113 | #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
114 | /* I/O to memory, 32 bits, mode */ | ||
115 | #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
116 | /* memory to I/O, 32 bits, mode */ | ||
117 | #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
118 | |||
119 | #endif /* !defined(CONFIG_M5272) */ | ||
120 | |||
121 | #if !defined(CONFIG_M5272) | ||
122 | /* enable/disable a specific DMA channel */ | ||
123 | static __inline__ void enable_dma(unsigned int dmanr) | ||
124 | { | ||
125 | volatile unsigned short *dmawp; | ||
126 | |||
127 | #ifdef DMA_DEBUG | ||
128 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
129 | #endif | ||
130 | |||
131 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
132 | dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT; | ||
133 | } | ||
134 | |||
135 | static __inline__ void disable_dma(unsigned int dmanr) | ||
136 | { | ||
137 | volatile unsigned short *dmawp; | ||
138 | volatile unsigned char *dmapb; | ||
139 | |||
140 | #ifdef DMA_DEBUG | ||
141 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
142 | #endif | ||
143 | |||
144 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
145 | dmapb = (unsigned char *) dma_base_addr[dmanr]; | ||
146 | |||
147 | /* Turn off external requests, and stop any DMA in progress */ | ||
148 | dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT; | ||
149 | dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Clear the 'DMA Pointer Flip Flop'. | ||
154 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
155 | * Use this once to initialize the FF to a known state. | ||
156 | * After that, keep track of it. :-) | ||
157 | * --- In order to do that, the DMA routines below should --- | ||
158 | * --- only be used while interrupts are disabled! --- | ||
159 | * | ||
160 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
161 | */ | ||
162 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
163 | { | ||
164 | } | ||
165 | |||
166 | /* set mode (above) for a specific DMA channel */ | ||
167 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
168 | { | ||
169 | |||
170 | volatile unsigned char *dmabp; | ||
171 | volatile unsigned short *dmawp; | ||
172 | |||
173 | #ifdef DMA_DEBUG | ||
174 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
175 | #endif | ||
176 | |||
177 | dmabp = (unsigned char *) dma_base_addr[dmanr]; | ||
178 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
179 | |||
180 | // Clear config errors | ||
181 | dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
182 | |||
183 | // Set command register | ||
184 | dmawp[MCFDMA_DCR] = | ||
185 | MCFDMA_DCR_INT | // Enable completion irq | ||
186 | MCFDMA_DCR_CS | // Force one xfer per request | ||
187 | MCFDMA_DCR_AA | // Enable auto alignment | ||
188 | // single-address-mode | ||
189 | ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) | | ||
190 | // sets s_rw (-> r/w) high if Memory to I/0 | ||
191 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) | | ||
192 | // Memory to I/O or I/O to Memory | ||
193 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) | | ||
194 | // 32 bit, 16 bit or 8 bit transfers | ||
195 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD : | ||
196 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG : | ||
197 | MCFDMA_DCR_SSIZE_BYTE)) | | ||
198 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD : | ||
199 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG : | ||
200 | MCFDMA_DCR_DSIZE_BYTE)); | ||
201 | |||
202 | #ifdef DEBUG_DMA | ||
203 | printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__, | ||
204 | dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR], | ||
205 | (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]); | ||
206 | #endif | ||
207 | } | ||
208 | |||
209 | /* Set transfer address for specific DMA channel */ | ||
210 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
211 | { | ||
212 | volatile unsigned short *dmawp; | ||
213 | volatile unsigned int *dmalp; | ||
214 | |||
215 | #ifdef DMA_DEBUG | ||
216 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
217 | #endif | ||
218 | |||
219 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
220 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
221 | |||
222 | // Determine which address registers are used for memory/device accesses | ||
223 | if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) { | ||
224 | // Source incrementing, must be memory | ||
225 | dmalp[MCFDMA_SAR] = a; | ||
226 | // Set dest address, must be device | ||
227 | dmalp[MCFDMA_DAR] = dma_device_address[dmanr]; | ||
228 | } else { | ||
229 | // Destination incrementing, must be memory | ||
230 | dmalp[MCFDMA_DAR] = a; | ||
231 | // Set source address, must be device | ||
232 | dmalp[MCFDMA_SAR] = dma_device_address[dmanr]; | ||
233 | } | ||
234 | |||
235 | #ifdef DEBUG_DMA | ||
236 | printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
237 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR], | ||
238 | (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR], | ||
239 | (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]); | ||
240 | #endif | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Specific for Coldfire - sets device address. | ||
245 | * Should be called after the mode set call, and before set DMA address. | ||
246 | */ | ||
247 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
248 | { | ||
249 | #ifdef DMA_DEBUG | ||
250 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
251 | #endif | ||
252 | |||
253 | dma_device_address[dmanr] = a; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * NOTE 2: "count" represents _bytes_. | ||
258 | */ | ||
259 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
260 | { | ||
261 | volatile unsigned short *dmawp; | ||
262 | |||
263 | #ifdef DMA_DEBUG | ||
264 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
265 | #endif | ||
266 | |||
267 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
268 | dmawp[MCFDMA_BCR] = (unsigned short)count; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Get DMA residue count. After a DMA transfer, this | ||
273 | * should return zero. Reading this while a DMA transfer is | ||
274 | * still in progress will return unpredictable results. | ||
275 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
276 | */ | ||
277 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
278 | { | ||
279 | volatile unsigned short *dmawp; | ||
280 | unsigned short count; | ||
281 | |||
282 | #ifdef DMA_DEBUG | ||
283 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
284 | #endif | ||
285 | |||
286 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
287 | count = dmawp[MCFDMA_BCR]; | ||
288 | return((int) count); | ||
289 | } | ||
290 | #else /* CONFIG_M5272 is defined */ | ||
291 | |||
292 | /* | ||
293 | * The MCF5272 DMA controller is very different than the controller defined above | ||
294 | * in terms of register mapping. For instance, with the exception of the 16-bit | ||
295 | * interrupt register (IRQ#85, for reference), all of the registers are 32-bit. | ||
296 | * | ||
297 | * The big difference, however, is the lack of device-requested DMA. All modes | ||
298 | * are dual address transfer, and there is no 'device' setup or direction bit. | ||
299 | * You can DMA between a device and memory, between memory and memory, or even between | ||
300 | * two devices directly, with any combination of incrementing and non-incrementing | ||
301 | * addresses you choose. This puts a crimp in distinguishing between the 'device | ||
302 | * address' set up by set_dma_device_addr. | ||
303 | * | ||
304 | * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr, | ||
305 | * which will act exactly as above in -- it will look to see if the source is set to | ||
306 | * autoincrement, and if so it will make the source use the set_dma_addr value and the | ||
307 | * destination the set_dma_device_addr value. Otherwise the source will be set to the | ||
308 | * set_dma_device_addr value and the destination will get the set_dma_addr value. | ||
309 | * | ||
310 | * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions | ||
311 | * and make it explicit. Depending on what you're doing, one of these two should work | ||
312 | * for you, but don't mix them in the same transfer setup. | ||
313 | */ | ||
314 | |||
315 | /* enable/disable a specific DMA channel */ | ||
316 | static __inline__ void enable_dma(unsigned int dmanr) | ||
317 | { | ||
318 | volatile unsigned int *dmalp; | ||
319 | |||
320 | #ifdef DMA_DEBUG | ||
321 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
322 | #endif | ||
323 | |||
324 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
325 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN; | ||
326 | } | ||
327 | |||
328 | static __inline__ void disable_dma(unsigned int dmanr) | ||
329 | { | ||
330 | volatile unsigned int *dmalp; | ||
331 | |||
332 | #ifdef DMA_DEBUG | ||
333 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
334 | #endif | ||
335 | |||
336 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
337 | |||
338 | /* Turn off external requests, and stop any DMA in progress */ | ||
339 | dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN; | ||
340 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Clear the 'DMA Pointer Flip Flop'. | ||
345 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
346 | * Use this once to initialize the FF to a known state. | ||
347 | * After that, keep track of it. :-) | ||
348 | * --- In order to do that, the DMA routines below should --- | ||
349 | * --- only be used while interrupts are disabled! --- | ||
350 | * | ||
351 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
352 | */ | ||
353 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
354 | { | ||
355 | } | ||
356 | |||
357 | /* set mode (above) for a specific DMA channel */ | ||
358 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
359 | { | ||
360 | |||
361 | volatile unsigned int *dmalp; | ||
362 | volatile unsigned short *dmawp; | ||
363 | |||
364 | #ifdef DMA_DEBUG | ||
365 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
366 | #endif | ||
367 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
368 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
369 | |||
370 | // Clear config errors | ||
371 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
372 | |||
373 | // Set command register | ||
374 | dmalp[MCFDMA_DMR] = | ||
375 | MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting | ||
376 | MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data. | ||
377 | MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data. | ||
378 | // source static-address-mode | ||
379 | ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) | | ||
380 | // dest static-address-mode | ||
381 | ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) | | ||
382 | // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 | ||
383 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) | | ||
384 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF); | ||
385 | |||
386 | dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */ | ||
387 | |||
388 | #ifdef DEBUG_DMA | ||
389 | printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__, | ||
390 | dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR], | ||
391 | (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]); | ||
392 | #endif | ||
393 | } | ||
394 | |||
395 | /* Set transfer address for specific DMA channel */ | ||
396 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
397 | { | ||
398 | volatile unsigned int *dmalp; | ||
399 | |||
400 | #ifdef DMA_DEBUG | ||
401 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
402 | #endif | ||
403 | |||
404 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
405 | |||
406 | // Determine which address registers are used for memory/device accesses | ||
407 | if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) { | ||
408 | // Source incrementing, must be memory | ||
409 | dmalp[MCFDMA_DSAR] = a; | ||
410 | // Set dest address, must be device | ||
411 | dmalp[MCFDMA_DDAR] = dma_device_address[dmanr]; | ||
412 | } else { | ||
413 | // Destination incrementing, must be memory | ||
414 | dmalp[MCFDMA_DDAR] = a; | ||
415 | // Set source address, must be device | ||
416 | dmalp[MCFDMA_DSAR] = dma_device_address[dmanr]; | ||
417 | } | ||
418 | |||
419 | #ifdef DEBUG_DMA | ||
420 | printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
421 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR], | ||
422 | (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR], | ||
423 | (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]); | ||
424 | #endif | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Specific for Coldfire - sets device address. | ||
429 | * Should be called after the mode set call, and before set DMA address. | ||
430 | */ | ||
431 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
432 | { | ||
433 | #ifdef DMA_DEBUG | ||
434 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
435 | #endif | ||
436 | |||
437 | dma_device_address[dmanr] = a; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * NOTE 2: "count" represents _bytes_. | ||
442 | * | ||
443 | * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value. | ||
444 | */ | ||
445 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
446 | { | ||
447 | volatile unsigned int *dmalp; | ||
448 | |||
449 | #ifdef DMA_DEBUG | ||
450 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
451 | #endif | ||
452 | |||
453 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
454 | dmalp[MCFDMA_DBCR] = count; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * Get DMA residue count. After a DMA transfer, this | ||
459 | * should return zero. Reading this while a DMA transfer is | ||
460 | * still in progress will return unpredictable results. | ||
461 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
462 | */ | ||
463 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
464 | { | ||
465 | volatile unsigned int *dmalp; | ||
466 | unsigned int count; | ||
467 | |||
468 | #ifdef DMA_DEBUG | ||
469 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
470 | #endif | ||
471 | |||
472 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
473 | count = dmalp[MCFDMA_DBCR]; | ||
474 | return(count); | ||
475 | } | ||
476 | |||
477 | #endif /* !defined(CONFIG_M5272) */ | ||
478 | #endif /* CONFIG_COLDFIRE */ | ||
479 | |||
480 | /* it's useless on the m68k, but unfortunately needed by the new | ||
481 | bootmem allocator (but this should do it for this) */ | ||
482 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
483 | |||
484 | #define MAX_DMA_CHANNELS 8 | ||
485 | |||
486 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
487 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
488 | |||
489 | #define isa_dma_bridge_buggy (0) | ||
490 | |||
491 | #endif /* _M68K_DMA_H */ | ||
diff --git a/arch/m68k/include/asm/dma_mm.h b/arch/m68k/include/asm/dma_mm.h deleted file mode 100644 index 4240fbc946f8..000000000000 --- a/arch/m68k/include/asm/dma_mm.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef _M68K_DMA_H | ||
2 | #define _M68K_DMA_H 1 | ||
3 | |||
4 | |||
5 | /* it's useless on the m68k, but unfortunately needed by the new | ||
6 | bootmem allocator (but this should do it for this) */ | ||
7 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
8 | |||
9 | #define MAX_DMA_CHANNELS 8 | ||
10 | |||
11 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
12 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
13 | |||
14 | #define isa_dma_bridge_buggy (0) | ||
15 | |||
16 | #endif /* _M68K_DMA_H */ | ||
diff --git a/arch/m68k/include/asm/dma_no.h b/arch/m68k/include/asm/dma_no.h deleted file mode 100644 index 939a02056217..000000000000 --- a/arch/m68k/include/asm/dma_no.h +++ /dev/null | |||
@@ -1,494 +0,0 @@ | |||
1 | #ifndef _M68K_DMA_H | ||
2 | #define _M68K_DMA_H 1 | ||
3 | |||
4 | //#define DMA_DEBUG 1 | ||
5 | |||
6 | |||
7 | #ifdef CONFIG_COLDFIRE | ||
8 | /* | ||
9 | * ColdFire DMA Model: | ||
10 | * ColdFire DMA supports two forms of DMA: Single and Dual address. Single | ||
11 | * address mode emits a source address, and expects that the device will either | ||
12 | * pick up the data (DMA READ) or source data (DMA WRITE). This implies that | ||
13 | * the device will place data on the correct byte(s) of the data bus, as the | ||
14 | * memory transactions are always 32 bits. This implies that only 32 bit | ||
15 | * devices will find single mode transfers useful. Dual address DMA mode | ||
16 | * performs two cycles: source read and destination write. ColdFire will | ||
17 | * align the data so that the device will always get the correct bytes, thus | ||
18 | * is useful for 8 and 16 bit devices. This is the mode that is supported | ||
19 | * below. | ||
20 | * | ||
21 | * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 | ||
22 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
23 | * | ||
24 | * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000 | ||
25 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
26 | * | ||
27 | * APR/18/2002 : added proper support for MCF5272 DMA controller. | ||
28 | * Arthur Shipkowski (art@videon-central.com) | ||
29 | */ | ||
30 | |||
31 | #include <asm/coldfire.h> | ||
32 | #include <asm/mcfsim.h> | ||
33 | #include <asm/mcfdma.h> | ||
34 | |||
35 | /* | ||
36 | * Set number of channels of DMA on ColdFire for different implementations. | ||
37 | */ | ||
38 | #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \ | ||
39 | defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | ||
40 | #define MAX_M68K_DMA_CHANNELS 4 | ||
41 | #elif defined(CONFIG_M5272) | ||
42 | #define MAX_M68K_DMA_CHANNELS 1 | ||
43 | #elif defined(CONFIG_M532x) | ||
44 | #define MAX_M68K_DMA_CHANNELS 0 | ||
45 | #else | ||
46 | #define MAX_M68K_DMA_CHANNELS 2 | ||
47 | #endif | ||
48 | |||
49 | extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS]; | ||
50 | extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; | ||
51 | |||
52 | #if !defined(CONFIG_M5272) | ||
53 | #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */ | ||
54 | #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */ | ||
55 | #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */ | ||
56 | #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */ | ||
57 | |||
58 | /* I/O to memory, 8 bits, mode */ | ||
59 | #define DMA_MODE_READ 0 | ||
60 | /* memory to I/O, 8 bits, mode */ | ||
61 | #define DMA_MODE_WRITE 1 | ||
62 | /* I/O to memory, 16 bits, mode */ | ||
63 | #define DMA_MODE_READ_WORD 2 | ||
64 | /* memory to I/O, 16 bits, mode */ | ||
65 | #define DMA_MODE_WRITE_WORD 3 | ||
66 | /* I/O to memory, 32 bits, mode */ | ||
67 | #define DMA_MODE_READ_LONG 4 | ||
68 | /* memory to I/O, 32 bits, mode */ | ||
69 | #define DMA_MODE_WRITE_LONG 5 | ||
70 | /* I/O to memory, 8 bits, single-address-mode */ | ||
71 | #define DMA_MODE_READ_SINGLE 8 | ||
72 | /* memory to I/O, 8 bits, single-address-mode */ | ||
73 | #define DMA_MODE_WRITE_SINGLE 9 | ||
74 | /* I/O to memory, 16 bits, single-address-mode */ | ||
75 | #define DMA_MODE_READ_WORD_SINGLE 10 | ||
76 | /* memory to I/O, 16 bits, single-address-mode */ | ||
77 | #define DMA_MODE_WRITE_WORD_SINGLE 11 | ||
78 | /* I/O to memory, 32 bits, single-address-mode */ | ||
79 | #define DMA_MODE_READ_LONG_SINGLE 12 | ||
80 | /* memory to I/O, 32 bits, single-address-mode */ | ||
81 | #define DMA_MODE_WRITE_LONG_SINGLE 13 | ||
82 | |||
83 | #else /* CONFIG_M5272 is defined */ | ||
84 | |||
85 | /* Source static-address mode */ | ||
86 | #define DMA_MODE_SRC_SA_BIT 0x01 | ||
87 | /* Two bits to select between all four modes */ | ||
88 | #define DMA_MODE_SSIZE_MASK 0x06 | ||
89 | /* Offset to shift bits in */ | ||
90 | #define DMA_MODE_SSIZE_OFF 0x01 | ||
91 | /* Destination static-address mode */ | ||
92 | #define DMA_MODE_DES_SA_BIT 0x10 | ||
93 | /* Two bits to select between all four modes */ | ||
94 | #define DMA_MODE_DSIZE_MASK 0x60 | ||
95 | /* Offset to shift bits in */ | ||
96 | #define DMA_MODE_DSIZE_OFF 0x05 | ||
97 | /* Size modifiers */ | ||
98 | #define DMA_MODE_SIZE_LONG 0x00 | ||
99 | #define DMA_MODE_SIZE_BYTE 0x01 | ||
100 | #define DMA_MODE_SIZE_WORD 0x02 | ||
101 | #define DMA_MODE_SIZE_LINE 0x03 | ||
102 | |||
103 | /* | ||
104 | * Aliases to help speed quick ports; these may be suboptimal, however. They | ||
105 | * do not include the SINGLE mode modifiers since the MCF5272 does not have a | ||
106 | * mode where the device is in control of its addressing. | ||
107 | */ | ||
108 | |||
109 | /* I/O to memory, 8 bits, mode */ | ||
110 | #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
111 | /* memory to I/O, 8 bits, mode */ | ||
112 | #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
113 | /* I/O to memory, 16 bits, mode */ | ||
114 | #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
115 | /* memory to I/O, 16 bits, mode */ | ||
116 | #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
117 | /* I/O to memory, 32 bits, mode */ | ||
118 | #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
119 | /* memory to I/O, 32 bits, mode */ | ||
120 | #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
121 | |||
122 | #endif /* !defined(CONFIG_M5272) */ | ||
123 | |||
124 | #if !defined(CONFIG_M5272) | ||
125 | /* enable/disable a specific DMA channel */ | ||
126 | static __inline__ void enable_dma(unsigned int dmanr) | ||
127 | { | ||
128 | volatile unsigned short *dmawp; | ||
129 | |||
130 | #ifdef DMA_DEBUG | ||
131 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
132 | #endif | ||
133 | |||
134 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
135 | dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT; | ||
136 | } | ||
137 | |||
138 | static __inline__ void disable_dma(unsigned int dmanr) | ||
139 | { | ||
140 | volatile unsigned short *dmawp; | ||
141 | volatile unsigned char *dmapb; | ||
142 | |||
143 | #ifdef DMA_DEBUG | ||
144 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
145 | #endif | ||
146 | |||
147 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
148 | dmapb = (unsigned char *) dma_base_addr[dmanr]; | ||
149 | |||
150 | /* Turn off external requests, and stop any DMA in progress */ | ||
151 | dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT; | ||
152 | dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Clear the 'DMA Pointer Flip Flop'. | ||
157 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
158 | * Use this once to initialize the FF to a known state. | ||
159 | * After that, keep track of it. :-) | ||
160 | * --- In order to do that, the DMA routines below should --- | ||
161 | * --- only be used while interrupts are disabled! --- | ||
162 | * | ||
163 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
164 | */ | ||
165 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
166 | { | ||
167 | } | ||
168 | |||
169 | /* set mode (above) for a specific DMA channel */ | ||
170 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
171 | { | ||
172 | |||
173 | volatile unsigned char *dmabp; | ||
174 | volatile unsigned short *dmawp; | ||
175 | |||
176 | #ifdef DMA_DEBUG | ||
177 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
178 | #endif | ||
179 | |||
180 | dmabp = (unsigned char *) dma_base_addr[dmanr]; | ||
181 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
182 | |||
183 | // Clear config errors | ||
184 | dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
185 | |||
186 | // Set command register | ||
187 | dmawp[MCFDMA_DCR] = | ||
188 | MCFDMA_DCR_INT | // Enable completion irq | ||
189 | MCFDMA_DCR_CS | // Force one xfer per request | ||
190 | MCFDMA_DCR_AA | // Enable auto alignment | ||
191 | // single-address-mode | ||
192 | ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) | | ||
193 | // sets s_rw (-> r/w) high if Memory to I/0 | ||
194 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) | | ||
195 | // Memory to I/O or I/O to Memory | ||
196 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) | | ||
197 | // 32 bit, 16 bit or 8 bit transfers | ||
198 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD : | ||
199 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG : | ||
200 | MCFDMA_DCR_SSIZE_BYTE)) | | ||
201 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD : | ||
202 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG : | ||
203 | MCFDMA_DCR_DSIZE_BYTE)); | ||
204 | |||
205 | #ifdef DEBUG_DMA | ||
206 | printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__, | ||
207 | dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR], | ||
208 | (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]); | ||
209 | #endif | ||
210 | } | ||
211 | |||
212 | /* Set transfer address for specific DMA channel */ | ||
213 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
214 | { | ||
215 | volatile unsigned short *dmawp; | ||
216 | volatile unsigned int *dmalp; | ||
217 | |||
218 | #ifdef DMA_DEBUG | ||
219 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
220 | #endif | ||
221 | |||
222 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
223 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
224 | |||
225 | // Determine which address registers are used for memory/device accesses | ||
226 | if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) { | ||
227 | // Source incrementing, must be memory | ||
228 | dmalp[MCFDMA_SAR] = a; | ||
229 | // Set dest address, must be device | ||
230 | dmalp[MCFDMA_DAR] = dma_device_address[dmanr]; | ||
231 | } else { | ||
232 | // Destination incrementing, must be memory | ||
233 | dmalp[MCFDMA_DAR] = a; | ||
234 | // Set source address, must be device | ||
235 | dmalp[MCFDMA_SAR] = dma_device_address[dmanr]; | ||
236 | } | ||
237 | |||
238 | #ifdef DEBUG_DMA | ||
239 | printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
240 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR], | ||
241 | (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR], | ||
242 | (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]); | ||
243 | #endif | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Specific for Coldfire - sets device address. | ||
248 | * Should be called after the mode set call, and before set DMA address. | ||
249 | */ | ||
250 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
251 | { | ||
252 | #ifdef DMA_DEBUG | ||
253 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
254 | #endif | ||
255 | |||
256 | dma_device_address[dmanr] = a; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * NOTE 2: "count" represents _bytes_. | ||
261 | */ | ||
262 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
263 | { | ||
264 | volatile unsigned short *dmawp; | ||
265 | |||
266 | #ifdef DMA_DEBUG | ||
267 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
268 | #endif | ||
269 | |||
270 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
271 | dmawp[MCFDMA_BCR] = (unsigned short)count; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Get DMA residue count. After a DMA transfer, this | ||
276 | * should return zero. Reading this while a DMA transfer is | ||
277 | * still in progress will return unpredictable results. | ||
278 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
279 | */ | ||
280 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
281 | { | ||
282 | volatile unsigned short *dmawp; | ||
283 | unsigned short count; | ||
284 | |||
285 | #ifdef DMA_DEBUG | ||
286 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
287 | #endif | ||
288 | |||
289 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
290 | count = dmawp[MCFDMA_BCR]; | ||
291 | return((int) count); | ||
292 | } | ||
293 | #else /* CONFIG_M5272 is defined */ | ||
294 | |||
295 | /* | ||
296 | * The MCF5272 DMA controller is very different than the controller defined above | ||
297 | * in terms of register mapping. For instance, with the exception of the 16-bit | ||
298 | * interrupt register (IRQ#85, for reference), all of the registers are 32-bit. | ||
299 | * | ||
300 | * The big difference, however, is the lack of device-requested DMA. All modes | ||
301 | * are dual address transfer, and there is no 'device' setup or direction bit. | ||
302 | * You can DMA between a device and memory, between memory and memory, or even between | ||
303 | * two devices directly, with any combination of incrementing and non-incrementing | ||
304 | * addresses you choose. This puts a crimp in distinguishing between the 'device | ||
305 | * address' set up by set_dma_device_addr. | ||
306 | * | ||
307 | * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr, | ||
308 | * which will act exactly as above in -- it will look to see if the source is set to | ||
309 | * autoincrement, and if so it will make the source use the set_dma_addr value and the | ||
310 | * destination the set_dma_device_addr value. Otherwise the source will be set to the | ||
311 | * set_dma_device_addr value and the destination will get the set_dma_addr value. | ||
312 | * | ||
313 | * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions | ||
314 | * and make it explicit. Depending on what you're doing, one of these two should work | ||
315 | * for you, but don't mix them in the same transfer setup. | ||
316 | */ | ||
317 | |||
318 | /* enable/disable a specific DMA channel */ | ||
319 | static __inline__ void enable_dma(unsigned int dmanr) | ||
320 | { | ||
321 | volatile unsigned int *dmalp; | ||
322 | |||
323 | #ifdef DMA_DEBUG | ||
324 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
325 | #endif | ||
326 | |||
327 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
328 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN; | ||
329 | } | ||
330 | |||
331 | static __inline__ void disable_dma(unsigned int dmanr) | ||
332 | { | ||
333 | volatile unsigned int *dmalp; | ||
334 | |||
335 | #ifdef DMA_DEBUG | ||
336 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
337 | #endif | ||
338 | |||
339 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
340 | |||
341 | /* Turn off external requests, and stop any DMA in progress */ | ||
342 | dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN; | ||
343 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Clear the 'DMA Pointer Flip Flop'. | ||
348 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
349 | * Use this once to initialize the FF to a known state. | ||
350 | * After that, keep track of it. :-) | ||
351 | * --- In order to do that, the DMA routines below should --- | ||
352 | * --- only be used while interrupts are disabled! --- | ||
353 | * | ||
354 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
355 | */ | ||
356 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
357 | { | ||
358 | } | ||
359 | |||
360 | /* set mode (above) for a specific DMA channel */ | ||
361 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
362 | { | ||
363 | |||
364 | volatile unsigned int *dmalp; | ||
365 | volatile unsigned short *dmawp; | ||
366 | |||
367 | #ifdef DMA_DEBUG | ||
368 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
369 | #endif | ||
370 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
371 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
372 | |||
373 | // Clear config errors | ||
374 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
375 | |||
376 | // Set command register | ||
377 | dmalp[MCFDMA_DMR] = | ||
378 | MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting | ||
379 | MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data. | ||
380 | MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data. | ||
381 | // source static-address-mode | ||
382 | ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) | | ||
383 | // dest static-address-mode | ||
384 | ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) | | ||
385 | // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 | ||
386 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) | | ||
387 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF); | ||
388 | |||
389 | dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */ | ||
390 | |||
391 | #ifdef DEBUG_DMA | ||
392 | printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__, | ||
393 | dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR], | ||
394 | (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]); | ||
395 | #endif | ||
396 | } | ||
397 | |||
398 | /* Set transfer address for specific DMA channel */ | ||
399 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
400 | { | ||
401 | volatile unsigned int *dmalp; | ||
402 | |||
403 | #ifdef DMA_DEBUG | ||
404 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
405 | #endif | ||
406 | |||
407 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
408 | |||
409 | // Determine which address registers are used for memory/device accesses | ||
410 | if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) { | ||
411 | // Source incrementing, must be memory | ||
412 | dmalp[MCFDMA_DSAR] = a; | ||
413 | // Set dest address, must be device | ||
414 | dmalp[MCFDMA_DDAR] = dma_device_address[dmanr]; | ||
415 | } else { | ||
416 | // Destination incrementing, must be memory | ||
417 | dmalp[MCFDMA_DDAR] = a; | ||
418 | // Set source address, must be device | ||
419 | dmalp[MCFDMA_DSAR] = dma_device_address[dmanr]; | ||
420 | } | ||
421 | |||
422 | #ifdef DEBUG_DMA | ||
423 | printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
424 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR], | ||
425 | (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR], | ||
426 | (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]); | ||
427 | #endif | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * Specific for Coldfire - sets device address. | ||
432 | * Should be called after the mode set call, and before set DMA address. | ||
433 | */ | ||
434 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
435 | { | ||
436 | #ifdef DMA_DEBUG | ||
437 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
438 | #endif | ||
439 | |||
440 | dma_device_address[dmanr] = a; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * NOTE 2: "count" represents _bytes_. | ||
445 | * | ||
446 | * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value. | ||
447 | */ | ||
448 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
449 | { | ||
450 | volatile unsigned int *dmalp; | ||
451 | |||
452 | #ifdef DMA_DEBUG | ||
453 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
454 | #endif | ||
455 | |||
456 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
457 | dmalp[MCFDMA_DBCR] = count; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Get DMA residue count. After a DMA transfer, this | ||
462 | * should return zero. Reading this while a DMA transfer is | ||
463 | * still in progress will return unpredictable results. | ||
464 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
465 | */ | ||
466 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
467 | { | ||
468 | volatile unsigned int *dmalp; | ||
469 | unsigned int count; | ||
470 | |||
471 | #ifdef DMA_DEBUG | ||
472 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
473 | #endif | ||
474 | |||
475 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
476 | count = dmalp[MCFDMA_DBCR]; | ||
477 | return(count); | ||
478 | } | ||
479 | |||
480 | #endif /* !defined(CONFIG_M5272) */ | ||
481 | #endif /* CONFIG_COLDFIRE */ | ||
482 | |||
483 | #define MAX_DMA_CHANNELS 8 | ||
484 | |||
485 | /* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any | ||
486 | occurrence should be flagged as an error. */ | ||
487 | /* under 2.4 it is actually needed by the new bootmem allocator */ | ||
488 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
489 | |||
490 | /* These are in kernel/dma.c: */ | ||
491 | extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ | ||
492 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
493 | |||
494 | #endif /* _M68K_DMA_H */ | ||