diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-m68knommu/dma.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-m68knommu/dma.h')
-rw-r--r-- | include/asm-m68knommu/dma.h | 492 |
1 files changed, 492 insertions, 0 deletions
diff --git a/include/asm-m68knommu/dma.h b/include/asm-m68knommu/dma.h new file mode 100644 index 000000000000..43e98c96a5c2 --- /dev/null +++ b/include/asm-m68knommu/dma.h | |||
@@ -0,0 +1,492 @@ | |||
1 | #ifndef _M68K_DMA_H | ||
2 | #define _M68K_DMA_H 1 | ||
3 | |||
4 | //#define DMA_DEBUG 1 | ||
5 | |||
6 | #include <linux/config.h> | ||
7 | |||
8 | #ifdef CONFIG_COLDFIRE | ||
9 | /* | ||
10 | * ColdFire DMA Model: | ||
11 | * ColdFire DMA supports two forms of DMA: Single and Dual address. Single | ||
12 | * address mode emits a source address, and expects that the device will either | ||
13 | * pick up the data (DMA READ) or source data (DMA WRITE). This implies that | ||
14 | * the device will place data on the correct byte(s) of the data bus, as the | ||
15 | * memory transactions are always 32 bits. This implies that only 32 bit | ||
16 | * devices will find single mode transfers useful. Dual address DMA mode | ||
17 | * performs two cycles: source read and destination write. ColdFire will | ||
18 | * align the data so that the device will always get the correct bytes, thus | ||
19 | * is useful for 8 and 16 bit devices. This is the mode that is supported | ||
20 | * below. | ||
21 | * | ||
22 | * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000 | ||
23 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
24 | * | ||
25 | * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000 | ||
26 | * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de) | ||
27 | * | ||
28 | * APR/18/2002 : added proper support for MCF5272 DMA controller. | ||
29 | * Arthur Shipkowski (art@videon-central.com) | ||
30 | */ | ||
31 | |||
32 | #include <asm/coldfire.h> | ||
33 | #include <asm/mcfsim.h> | ||
34 | #include <asm/mcfdma.h> | ||
35 | |||
36 | /* | ||
37 | * Set number of channels of DMA on ColdFire for different implementations. | ||
38 | */ | ||
39 | #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) | ||
40 | #define MAX_M68K_DMA_CHANNELS 4 | ||
41 | #elif defined(CONFIG_M5272) | ||
42 | #define MAX_M68K_DMA_CHANNELS 1 | ||
43 | #else | ||
44 | #define MAX_M68K_DMA_CHANNELS 2 | ||
45 | #endif | ||
46 | |||
47 | extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS]; | ||
48 | extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; | ||
49 | |||
50 | #if !defined(CONFIG_M5272) | ||
51 | #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */ | ||
52 | #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */ | ||
53 | #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */ | ||
54 | #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */ | ||
55 | |||
56 | /* I/O to memory, 8 bits, mode */ | ||
57 | #define DMA_MODE_READ 0 | ||
58 | /* memory to I/O, 8 bits, mode */ | ||
59 | #define DMA_MODE_WRITE 1 | ||
60 | /* I/O to memory, 16 bits, mode */ | ||
61 | #define DMA_MODE_READ_WORD 2 | ||
62 | /* memory to I/O, 16 bits, mode */ | ||
63 | #define DMA_MODE_WRITE_WORD 3 | ||
64 | /* I/O to memory, 32 bits, mode */ | ||
65 | #define DMA_MODE_READ_LONG 4 | ||
66 | /* memory to I/O, 32 bits, mode */ | ||
67 | #define DMA_MODE_WRITE_LONG 5 | ||
68 | /* I/O to memory, 8 bits, single-address-mode */ | ||
69 | #define DMA_MODE_READ_SINGLE 8 | ||
70 | /* memory to I/O, 8 bits, single-address-mode */ | ||
71 | #define DMA_MODE_WRITE_SINGLE 9 | ||
72 | /* I/O to memory, 16 bits, single-address-mode */ | ||
73 | #define DMA_MODE_READ_WORD_SINGLE 10 | ||
74 | /* memory to I/O, 16 bits, single-address-mode */ | ||
75 | #define DMA_MODE_WRITE_WORD_SINGLE 11 | ||
76 | /* I/O to memory, 32 bits, single-address-mode */ | ||
77 | #define DMA_MODE_READ_LONG_SINGLE 12 | ||
78 | /* memory to I/O, 32 bits, single-address-mode */ | ||
79 | #define DMA_MODE_WRITE_LONG_SINGLE 13 | ||
80 | |||
81 | #else /* CONFIG_M5272 is defined */ | ||
82 | |||
83 | /* Source static-address mode */ | ||
84 | #define DMA_MODE_SRC_SA_BIT 0x01 | ||
85 | /* Two bits to select between all four modes */ | ||
86 | #define DMA_MODE_SSIZE_MASK 0x06 | ||
87 | /* Offset to shift bits in */ | ||
88 | #define DMA_MODE_SSIZE_OFF 0x01 | ||
89 | /* Destination static-address mode */ | ||
90 | #define DMA_MODE_DES_SA_BIT 0x10 | ||
91 | /* Two bits to select between all four modes */ | ||
92 | #define DMA_MODE_DSIZE_MASK 0x60 | ||
93 | /* Offset to shift bits in */ | ||
94 | #define DMA_MODE_DSIZE_OFF 0x05 | ||
95 | /* Size modifiers */ | ||
96 | #define DMA_MODE_SIZE_LONG 0x00 | ||
97 | #define DMA_MODE_SIZE_BYTE 0x01 | ||
98 | #define DMA_MODE_SIZE_WORD 0x02 | ||
99 | #define DMA_MODE_SIZE_LINE 0x03 | ||
100 | |||
101 | /* | ||
102 | * Aliases to help speed quick ports; these may be suboptimal, however. They | ||
103 | * do not include the SINGLE mode modifiers since the MCF5272 does not have a | ||
104 | * mode where the device is in control of its addressing. | ||
105 | */ | ||
106 | |||
107 | /* I/O to memory, 8 bits, mode */ | ||
108 | #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
109 | /* memory to I/O, 8 bits, mode */ | ||
110 | #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
111 | /* I/O to memory, 16 bits, mode */ | ||
112 | #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
113 | /* memory to I/O, 16 bits, mode */ | ||
114 | #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
115 | /* I/O to memory, 32 bits, mode */ | ||
116 | #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT) | ||
117 | /* memory to I/O, 32 bits, mode */ | ||
118 | #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT) | ||
119 | |||
120 | #endif /* !defined(CONFIG_M5272) */ | ||
121 | |||
122 | #if !defined(CONFIG_M5272) | ||
123 | /* enable/disable a specific DMA channel */ | ||
124 | static __inline__ void enable_dma(unsigned int dmanr) | ||
125 | { | ||
126 | volatile unsigned short *dmawp; | ||
127 | |||
128 | #ifdef DMA_DEBUG | ||
129 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
130 | #endif | ||
131 | |||
132 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
133 | dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT; | ||
134 | } | ||
135 | |||
136 | static __inline__ void disable_dma(unsigned int dmanr) | ||
137 | { | ||
138 | volatile unsigned short *dmawp; | ||
139 | volatile unsigned char *dmapb; | ||
140 | |||
141 | #ifdef DMA_DEBUG | ||
142 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
143 | #endif | ||
144 | |||
145 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
146 | dmapb = (unsigned char *) dma_base_addr[dmanr]; | ||
147 | |||
148 | /* Turn off external requests, and stop any DMA in progress */ | ||
149 | dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT; | ||
150 | dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Clear the 'DMA Pointer Flip Flop'. | ||
155 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
156 | * Use this once to initialize the FF to a known state. | ||
157 | * After that, keep track of it. :-) | ||
158 | * --- In order to do that, the DMA routines below should --- | ||
159 | * --- only be used while interrupts are disabled! --- | ||
160 | * | ||
161 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
162 | */ | ||
163 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
164 | { | ||
165 | } | ||
166 | |||
167 | /* set mode (above) for a specific DMA channel */ | ||
168 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
169 | { | ||
170 | |||
171 | volatile unsigned char *dmabp; | ||
172 | volatile unsigned short *dmawp; | ||
173 | |||
174 | #ifdef DMA_DEBUG | ||
175 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
176 | #endif | ||
177 | |||
178 | dmabp = (unsigned char *) dma_base_addr[dmanr]; | ||
179 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
180 | |||
181 | // Clear config errors | ||
182 | dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE; | ||
183 | |||
184 | // Set command register | ||
185 | dmawp[MCFDMA_DCR] = | ||
186 | MCFDMA_DCR_INT | // Enable completion irq | ||
187 | MCFDMA_DCR_CS | // Force one xfer per request | ||
188 | MCFDMA_DCR_AA | // Enable auto alignment | ||
189 | // single-address-mode | ||
190 | ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) | | ||
191 | // sets s_rw (-> r/w) high if Memory to I/0 | ||
192 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) | | ||
193 | // Memory to I/O or I/O to Memory | ||
194 | ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) | | ||
195 | // 32 bit, 16 bit or 8 bit transfers | ||
196 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD : | ||
197 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG : | ||
198 | MCFDMA_DCR_SSIZE_BYTE)) | | ||
199 | ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD : | ||
200 | ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG : | ||
201 | MCFDMA_DCR_DSIZE_BYTE)); | ||
202 | |||
203 | #ifdef DEBUG_DMA | ||
204 | printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__, | ||
205 | dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR], | ||
206 | (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]); | ||
207 | #endif | ||
208 | } | ||
209 | |||
210 | /* Set transfer address for specific DMA channel */ | ||
211 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
212 | { | ||
213 | volatile unsigned short *dmawp; | ||
214 | volatile unsigned int *dmalp; | ||
215 | |||
216 | #ifdef DMA_DEBUG | ||
217 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
218 | #endif | ||
219 | |||
220 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
221 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
222 | |||
223 | // Determine which address registers are used for memory/device accesses | ||
224 | if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) { | ||
225 | // Source incrementing, must be memory | ||
226 | dmalp[MCFDMA_SAR] = a; | ||
227 | // Set dest address, must be device | ||
228 | dmalp[MCFDMA_DAR] = dma_device_address[dmanr]; | ||
229 | } else { | ||
230 | // Destination incrementing, must be memory | ||
231 | dmalp[MCFDMA_DAR] = a; | ||
232 | // Set source address, must be device | ||
233 | dmalp[MCFDMA_SAR] = dma_device_address[dmanr]; | ||
234 | } | ||
235 | |||
236 | #ifdef DEBUG_DMA | ||
237 | printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
238 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR], | ||
239 | (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR], | ||
240 | (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]); | ||
241 | #endif | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Specific for Coldfire - sets device address. | ||
246 | * Should be called after the mode set call, and before set DMA address. | ||
247 | */ | ||
248 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
249 | { | ||
250 | #ifdef DMA_DEBUG | ||
251 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
252 | #endif | ||
253 | |||
254 | dma_device_address[dmanr] = a; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * NOTE 2: "count" represents _bytes_. | ||
259 | */ | ||
260 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
261 | { | ||
262 | volatile unsigned short *dmawp; | ||
263 | |||
264 | #ifdef DMA_DEBUG | ||
265 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
266 | #endif | ||
267 | |||
268 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
269 | dmawp[MCFDMA_BCR] = (unsigned short)count; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Get DMA residue count. After a DMA transfer, this | ||
274 | * should return zero. Reading this while a DMA transfer is | ||
275 | * still in progress will return unpredictable results. | ||
276 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
277 | */ | ||
278 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
279 | { | ||
280 | volatile unsigned short *dmawp; | ||
281 | unsigned short count; | ||
282 | |||
283 | #ifdef DMA_DEBUG | ||
284 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
285 | #endif | ||
286 | |||
287 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
288 | count = dmawp[MCFDMA_BCR]; | ||
289 | return((int) count); | ||
290 | } | ||
291 | #else /* CONFIG_M5272 is defined */ | ||
292 | |||
293 | /* | ||
294 | * The MCF5272 DMA controller is very different than the controller defined above | ||
295 | * in terms of register mapping. For instance, with the exception of the 16-bit | ||
296 | * interrupt register (IRQ#85, for reference), all of the registers are 32-bit. | ||
297 | * | ||
298 | * The big difference, however, is the lack of device-requested DMA. All modes | ||
299 | * are dual address transfer, and there is no 'device' setup or direction bit. | ||
300 | * You can DMA between a device and memory, between memory and memory, or even between | ||
301 | * two devices directly, with any combination of incrementing and non-incrementing | ||
302 | * addresses you choose. This puts a crimp in distinguishing between the 'device | ||
303 | * address' set up by set_dma_device_addr. | ||
304 | * | ||
305 | * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr, | ||
306 | * which will act exactly as above in -- it will look to see if the source is set to | ||
307 | * autoincrement, and if so it will make the source use the set_dma_addr value and the | ||
308 | * destination the set_dma_device_addr value. Otherwise the source will be set to the | ||
309 | * set_dma_device_addr value and the destination will get the set_dma_addr value. | ||
310 | * | ||
311 | * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions | ||
312 | * and make it explicit. Depending on what you're doing, one of these two should work | ||
313 | * for you, but don't mix them in the same transfer setup. | ||
314 | */ | ||
315 | |||
316 | /* enable/disable a specific DMA channel */ | ||
317 | static __inline__ void enable_dma(unsigned int dmanr) | ||
318 | { | ||
319 | volatile unsigned int *dmalp; | ||
320 | |||
321 | #ifdef DMA_DEBUG | ||
322 | printk("enable_dma(dmanr=%d)\n", dmanr); | ||
323 | #endif | ||
324 | |||
325 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
326 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN; | ||
327 | } | ||
328 | |||
329 | static __inline__ void disable_dma(unsigned int dmanr) | ||
330 | { | ||
331 | volatile unsigned int *dmalp; | ||
332 | |||
333 | #ifdef DMA_DEBUG | ||
334 | printk("disable_dma(dmanr=%d)\n", dmanr); | ||
335 | #endif | ||
336 | |||
337 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
338 | |||
339 | /* Turn off external requests, and stop any DMA in progress */ | ||
340 | dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN; | ||
341 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Clear the 'DMA Pointer Flip Flop'. | ||
346 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
347 | * Use this once to initialize the FF to a known state. | ||
348 | * After that, keep track of it. :-) | ||
349 | * --- In order to do that, the DMA routines below should --- | ||
350 | * --- only be used while interrupts are disabled! --- | ||
351 | * | ||
352 | * This is a NOP for ColdFire. Provide a stub for compatibility. | ||
353 | */ | ||
354 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
355 | { | ||
356 | } | ||
357 | |||
358 | /* set mode (above) for a specific DMA channel */ | ||
359 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
360 | { | ||
361 | |||
362 | volatile unsigned int *dmalp; | ||
363 | volatile unsigned short *dmawp; | ||
364 | |||
365 | #ifdef DMA_DEBUG | ||
366 | printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode); | ||
367 | #endif | ||
368 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
369 | dmawp = (unsigned short *) dma_base_addr[dmanr]; | ||
370 | |||
371 | // Clear config errors | ||
372 | dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET; | ||
373 | |||
374 | // Set command register | ||
375 | dmalp[MCFDMA_DMR] = | ||
376 | MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting | ||
377 | MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data. | ||
378 | MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data. | ||
379 | // source static-address-mode | ||
380 | ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) | | ||
381 | // dest static-address-mode | ||
382 | ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) | | ||
383 | // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 | ||
384 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) | | ||
385 | (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF); | ||
386 | |||
387 | dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */ | ||
388 | |||
389 | #ifdef DEBUG_DMA | ||
390 | printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__, | ||
391 | dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR], | ||
392 | (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]); | ||
393 | #endif | ||
394 | } | ||
395 | |||
396 | /* Set transfer address for specific DMA channel */ | ||
397 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
398 | { | ||
399 | volatile unsigned int *dmalp; | ||
400 | |||
401 | #ifdef DMA_DEBUG | ||
402 | printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
403 | #endif | ||
404 | |||
405 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
406 | |||
407 | // Determine which address registers are used for memory/device accesses | ||
408 | if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) { | ||
409 | // Source incrementing, must be memory | ||
410 | dmalp[MCFDMA_DSAR] = a; | ||
411 | // Set dest address, must be device | ||
412 | dmalp[MCFDMA_DDAR] = dma_device_address[dmanr]; | ||
413 | } else { | ||
414 | // Destination incrementing, must be memory | ||
415 | dmalp[MCFDMA_DDAR] = a; | ||
416 | // Set source address, must be device | ||
417 | dmalp[MCFDMA_DSAR] = dma_device_address[dmanr]; | ||
418 | } | ||
419 | |||
420 | #ifdef DEBUG_DMA | ||
421 | printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n", | ||
422 | __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR], | ||
423 | (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR], | ||
424 | (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]); | ||
425 | #endif | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * Specific for Coldfire - sets device address. | ||
430 | * Should be called after the mode set call, and before set DMA address. | ||
431 | */ | ||
432 | static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) | ||
433 | { | ||
434 | #ifdef DMA_DEBUG | ||
435 | printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a); | ||
436 | #endif | ||
437 | |||
438 | dma_device_address[dmanr] = a; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * NOTE 2: "count" represents _bytes_. | ||
443 | * | ||
444 | * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value. | ||
445 | */ | ||
446 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
447 | { | ||
448 | volatile unsigned int *dmalp; | ||
449 | |||
450 | #ifdef DMA_DEBUG | ||
451 | printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count); | ||
452 | #endif | ||
453 | |||
454 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
455 | dmalp[MCFDMA_DBCR] = count; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Get DMA residue count. After a DMA transfer, this | ||
460 | * should return zero. Reading this while a DMA transfer is | ||
461 | * still in progress will return unpredictable results. | ||
462 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
463 | */ | ||
464 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
465 | { | ||
466 | volatile unsigned int *dmalp; | ||
467 | unsigned int count; | ||
468 | |||
469 | #ifdef DMA_DEBUG | ||
470 | printk("get_dma_residue(dmanr=%d)\n", dmanr); | ||
471 | #endif | ||
472 | |||
473 | dmalp = (unsigned int *) dma_base_addr[dmanr]; | ||
474 | count = dmalp[MCFDMA_DBCR]; | ||
475 | return(count); | ||
476 | } | ||
477 | |||
478 | #endif /* !defined(CONFIG_M5272) */ | ||
479 | #endif /* CONFIG_COLDFIRE */ | ||
480 | |||
481 | #define MAX_DMA_CHANNELS 8 | ||
482 | |||
483 | /* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any | ||
484 | occurrence should be flagged as an error. */ | ||
485 | /* under 2.4 it is actually needed by the new bootmem allocator */ | ||
486 | #define MAX_DMA_ADDRESS PAGE_OFFSET | ||
487 | |||
488 | /* These are in kernel/dma.c: */ | ||
489 | extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ | ||
490 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
491 | |||
492 | #endif /* _M68K_DMA_H */ | ||