diff options
Diffstat (limited to 'arch/arm/mach-bcmring/dma.c')
-rw-r--r-- | arch/arm/mach-bcmring/dma.c | 1518 |
1 files changed, 0 insertions, 1518 deletions
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c deleted file mode 100644 index e5fd241fccdc..000000000000 --- a/arch/arm/mach-bcmring/dma.c +++ /dev/null | |||
@@ -1,1518 +0,0 @@ | |||
1 | /***************************************************************************** | ||
2 | * Copyright 2004 - 2008 Broadcom Corporation. All rights reserved. | ||
3 | * | ||
4 | * Unless you and Broadcom execute a separate written software license | ||
5 | * agreement governing use of this software, this software is licensed to you | ||
6 | * under the terms of the GNU General Public License version 2, available at | ||
7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
8 | * | ||
9 | * Notwithstanding the above, under no circumstances may you combine this | ||
10 | * software in any way with any other Broadcom software provided under a | ||
11 | * license other than the GPL, without Broadcom's express prior written | ||
12 | * consent. | ||
13 | *****************************************************************************/ | ||
14 | |||
15 | /****************************************************************************/ | ||
16 | /** | ||
17 | * @file dma.c | ||
18 | * | ||
19 | * @brief Implements the DMA interface. | ||
20 | */ | ||
21 | /****************************************************************************/ | ||
22 | |||
23 | /* ---- Include Files ---------------------------------------------------- */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/irqreturn.h> | ||
31 | #include <linux/proc_fs.h> | ||
32 | #include <linux/slab.h> | ||
33 | |||
34 | #include <mach/timer.h> | ||
35 | |||
36 | #include <linux/pfn.h> | ||
37 | #include <linux/atomic.h> | ||
38 | #include <mach/dma.h> | ||
39 | |||
40 | /* ---- Public Variables ------------------------------------------------- */ | ||
41 | |||
42 | /* ---- Private Constants and Types -------------------------------------- */ | ||
43 | |||
44 | #define MAKE_HANDLE(controllerIdx, channelIdx) (((controllerIdx) << 4) | (channelIdx)) | ||
45 | |||
46 | #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) | ||
47 | #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) | ||
48 | |||
49 | |||
50 | /* ---- Private Variables ------------------------------------------------ */ | ||
51 | |||
52 | static DMA_Global_t gDMA; | ||
53 | static struct proc_dir_entry *gDmaDir; | ||
54 | |||
55 | #include "dma_device.c" | ||
56 | |||
57 | /* ---- Private Function Prototypes -------------------------------------- */ | ||
58 | |||
59 | /* ---- Functions ------------------------------------------------------- */ | ||
60 | |||
61 | /****************************************************************************/ | ||
62 | /** | ||
63 | * Displays information for /proc/dma/channels | ||
64 | */ | ||
65 | /****************************************************************************/ | ||
66 | |||
67 | static int dma_proc_read_channels(char *buf, char **start, off_t offset, | ||
68 | int count, int *eof, void *data) | ||
69 | { | ||
70 | int controllerIdx; | ||
71 | int channelIdx; | ||
72 | int limit = count - 200; | ||
73 | int len = 0; | ||
74 | DMA_Channel_t *channel; | ||
75 | |||
76 | if (down_interruptible(&gDMA.lock) < 0) { | ||
77 | return -ERESTARTSYS; | ||
78 | } | ||
79 | |||
80 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
81 | controllerIdx++) { | ||
82 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
83 | channelIdx++) { | ||
84 | if (len >= limit) { | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | channel = | ||
89 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
90 | |||
91 | len += | ||
92 | sprintf(buf + len, "%d:%d ", controllerIdx, | ||
93 | channelIdx); | ||
94 | |||
95 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != | ||
96 | 0) { | ||
97 | len += | ||
98 | sprintf(buf + len, "Dedicated for %s ", | ||
99 | DMA_gDeviceAttribute[channel-> | ||
100 | devType].name); | ||
101 | } else { | ||
102 | len += sprintf(buf + len, "Shared "); | ||
103 | } | ||
104 | |||
105 | if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) { | ||
106 | len += sprintf(buf + len, "No ISR "); | ||
107 | } | ||
108 | |||
109 | if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) { | ||
110 | len += sprintf(buf + len, "Fifo: 128 "); | ||
111 | } else { | ||
112 | len += sprintf(buf + len, "Fifo: 64 "); | ||
113 | } | ||
114 | |||
115 | if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { | ||
116 | len += | ||
117 | sprintf(buf + len, "InUse by %s", | ||
118 | DMA_gDeviceAttribute[channel-> | ||
119 | devType].name); | ||
120 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
121 | len += | ||
122 | sprintf(buf + len, " (%s:%d)", | ||
123 | channel->fileName, | ||
124 | channel->lineNum); | ||
125 | #endif | ||
126 | } else { | ||
127 | len += sprintf(buf + len, "Avail "); | ||
128 | } | ||
129 | |||
130 | if (channel->lastDevType != DMA_DEVICE_NONE) { | ||
131 | len += | ||
132 | sprintf(buf + len, "Last use: %s ", | ||
133 | DMA_gDeviceAttribute[channel-> | ||
134 | lastDevType]. | ||
135 | name); | ||
136 | } | ||
137 | |||
138 | len += sprintf(buf + len, "\n"); | ||
139 | } | ||
140 | } | ||
141 | up(&gDMA.lock); | ||
142 | *eof = 1; | ||
143 | |||
144 | return len; | ||
145 | } | ||
146 | |||
147 | /****************************************************************************/ | ||
148 | /** | ||
149 | * Displays information for /proc/dma/devices | ||
150 | */ | ||
151 | /****************************************************************************/ | ||
152 | |||
153 | static int dma_proc_read_devices(char *buf, char **start, off_t offset, | ||
154 | int count, int *eof, void *data) | ||
155 | { | ||
156 | int limit = count - 200; | ||
157 | int len = 0; | ||
158 | int devIdx; | ||
159 | |||
160 | if (down_interruptible(&gDMA.lock) < 0) { | ||
161 | return -ERESTARTSYS; | ||
162 | } | ||
163 | |||
164 | for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { | ||
165 | DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; | ||
166 | |||
167 | if (devAttr->name == NULL) { | ||
168 | continue; | ||
169 | } | ||
170 | |||
171 | if (len >= limit) { | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | len += sprintf(buf + len, "%-12s ", devAttr->name); | ||
176 | |||
177 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
178 | len += | ||
179 | sprintf(buf + len, "Dedicated %d:%d ", | ||
180 | devAttr->dedicatedController, | ||
181 | devAttr->dedicatedChannel); | ||
182 | } else { | ||
183 | len += sprintf(buf + len, "Shared DMA:"); | ||
184 | if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) { | ||
185 | len += sprintf(buf + len, "0"); | ||
186 | } | ||
187 | if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) { | ||
188 | len += sprintf(buf + len, "1"); | ||
189 | } | ||
190 | len += sprintf(buf + len, " "); | ||
191 | } | ||
192 | if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) { | ||
193 | len += sprintf(buf + len, "NoISR "); | ||
194 | } | ||
195 | if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) { | ||
196 | len += sprintf(buf + len, "Allow-128 "); | ||
197 | } | ||
198 | |||
199 | len += | ||
200 | sprintf(buf + len, | ||
201 | "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n", | ||
202 | devAttr->numTransfers, devAttr->transferTicks, | ||
203 | devAttr->transferBytes, | ||
204 | devAttr->ring.bytesAllocated); | ||
205 | |||
206 | } | ||
207 | |||
208 | up(&gDMA.lock); | ||
209 | *eof = 1; | ||
210 | |||
211 | return len; | ||
212 | } | ||
213 | |||
214 | /****************************************************************************/ | ||
215 | /** | ||
216 | * Determines if a DMA_Device_t is "valid". | ||
217 | * | ||
218 | * @return | ||
219 | * TRUE - dma device is valid | ||
220 | * FALSE - dma device isn't valid | ||
221 | */ | ||
222 | /****************************************************************************/ | ||
223 | |||
224 | static inline int IsDeviceValid(DMA_Device_t device) | ||
225 | { | ||
226 | return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES); | ||
227 | } | ||
228 | |||
229 | /****************************************************************************/ | ||
230 | /** | ||
231 | * Translates a DMA handle into a pointer to a channel. | ||
232 | * | ||
233 | * @return | ||
234 | * non-NULL - pointer to DMA_Channel_t | ||
235 | * NULL - DMA Handle was invalid | ||
236 | */ | ||
237 | /****************************************************************************/ | ||
238 | |||
239 | static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle) | ||
240 | { | ||
241 | int controllerIdx; | ||
242 | int channelIdx; | ||
243 | |||
244 | controllerIdx = CONTROLLER_FROM_HANDLE(handle); | ||
245 | channelIdx = CHANNEL_FROM_HANDLE(handle); | ||
246 | |||
247 | if ((controllerIdx > DMA_NUM_CONTROLLERS) | ||
248 | || (channelIdx > DMA_NUM_CHANNELS)) { | ||
249 | return NULL; | ||
250 | } | ||
251 | return &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
252 | } | ||
253 | |||
254 | /****************************************************************************/ | ||
255 | /** | ||
256 | * Interrupt handler which is called to process DMA interrupts. | ||
257 | */ | ||
258 | /****************************************************************************/ | ||
259 | |||
260 | static irqreturn_t dma_interrupt_handler(int irq, void *dev_id) | ||
261 | { | ||
262 | DMA_Channel_t *channel; | ||
263 | DMA_DeviceAttribute_t *devAttr; | ||
264 | int irqStatus; | ||
265 | |||
266 | channel = (DMA_Channel_t *) dev_id; | ||
267 | |||
268 | /* Figure out why we were called, and knock down the interrupt */ | ||
269 | |||
270 | irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle); | ||
271 | dmacHw_clearInterrupt(channel->dmacHwHandle); | ||
272 | |||
273 | if ((channel->devType < 0) | ||
274 | || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) { | ||
275 | printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n", | ||
276 | channel->devType); | ||
277 | return IRQ_NONE; | ||
278 | } | ||
279 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
280 | |||
281 | /* Update stats */ | ||
282 | |||
283 | if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) { | ||
284 | devAttr->transferTicks += | ||
285 | (timer_get_tick_count() - devAttr->transferStartTime); | ||
286 | } | ||
287 | |||
288 | if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) { | ||
289 | printk(KERN_ERR | ||
290 | "dma_interrupt_handler: devType :%d DMA error (%s)\n", | ||
291 | channel->devType, devAttr->name); | ||
292 | } else { | ||
293 | devAttr->numTransfers++; | ||
294 | devAttr->transferBytes += devAttr->numBytes; | ||
295 | } | ||
296 | |||
297 | /* Call any installed handler */ | ||
298 | |||
299 | if (devAttr->devHandler != NULL) { | ||
300 | devAttr->devHandler(channel->devType, irqStatus, | ||
301 | devAttr->userData); | ||
302 | } | ||
303 | |||
304 | return IRQ_HANDLED; | ||
305 | } | ||
306 | |||
307 | /****************************************************************************/ | ||
308 | /** | ||
309 | * Allocates memory to hold a descriptor ring. The descriptor ring then | ||
310 | * needs to be populated by making one or more calls to | ||
311 | * dna_add_descriptors. | ||
312 | * | ||
313 | * The returned descriptor ring will be automatically initialized. | ||
314 | * | ||
315 | * @return | ||
316 | * 0 Descriptor ring was allocated successfully | ||
317 | * -EINVAL Invalid parameters passed in | ||
318 | * -ENOMEM Unable to allocate memory for the desired number of descriptors. | ||
319 | */ | ||
320 | /****************************************************************************/ | ||
321 | |||
322 | int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to populate */ | ||
323 | int numDescriptors /* Number of descriptors that need to be allocated. */ | ||
324 | ) { | ||
325 | size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors); | ||
326 | |||
327 | if ((ring == NULL) || (numDescriptors <= 0)) { | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | |||
331 | ring->physAddr = 0; | ||
332 | ring->descriptorsAllocated = 0; | ||
333 | ring->bytesAllocated = 0; | ||
334 | |||
335 | ring->virtAddr = dma_alloc_writecombine(NULL, | ||
336 | bytesToAlloc, | ||
337 | &ring->physAddr, | ||
338 | GFP_KERNEL); | ||
339 | if (ring->virtAddr == NULL) { | ||
340 | return -ENOMEM; | ||
341 | } | ||
342 | |||
343 | ring->bytesAllocated = bytesToAlloc; | ||
344 | ring->descriptorsAllocated = numDescriptors; | ||
345 | |||
346 | return dma_init_descriptor_ring(ring, numDescriptors); | ||
347 | } | ||
348 | |||
349 | EXPORT_SYMBOL(dma_alloc_descriptor_ring); | ||
350 | |||
351 | /****************************************************************************/ | ||
352 | /** | ||
353 | * Releases the memory which was previously allocated for a descriptor ring. | ||
354 | */ | ||
355 | /****************************************************************************/ | ||
356 | |||
357 | void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring /* Descriptor to release */ | ||
358 | ) { | ||
359 | if (ring->virtAddr != NULL) { | ||
360 | dma_free_writecombine(NULL, | ||
361 | ring->bytesAllocated, | ||
362 | ring->virtAddr, ring->physAddr); | ||
363 | } | ||
364 | |||
365 | ring->bytesAllocated = 0; | ||
366 | ring->descriptorsAllocated = 0; | ||
367 | ring->virtAddr = NULL; | ||
368 | ring->physAddr = 0; | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL(dma_free_descriptor_ring); | ||
372 | |||
373 | /****************************************************************************/ | ||
374 | /** | ||
375 | * Initializes a descriptor ring, so that descriptors can be added to it. | ||
376 | * Once a descriptor ring has been allocated, it may be reinitialized for | ||
377 | * use with additional/different regions of memory. | ||
378 | * | ||
379 | * Note that if 7 descriptors are allocated, it's perfectly acceptable to | ||
380 | * initialize the ring with a smaller number of descriptors. The amount | ||
381 | * of memory allocated for the descriptor ring will not be reduced, and | ||
382 | * the descriptor ring may be reinitialized later | ||
383 | * | ||
384 | * @return | ||
385 | * 0 Descriptor ring was initialized successfully | ||
386 | * -ENOMEM The descriptor which was passed in has insufficient space | ||
387 | * to hold the desired number of descriptors. | ||
388 | */ | ||
389 | /****************************************************************************/ | ||
390 | |||
391 | int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to initialize */ | ||
392 | int numDescriptors /* Number of descriptors to initialize. */ | ||
393 | ) { | ||
394 | if (ring->virtAddr == NULL) { | ||
395 | return -EINVAL; | ||
396 | } | ||
397 | if (dmacHw_initDescriptor(ring->virtAddr, | ||
398 | ring->physAddr, | ||
399 | ring->bytesAllocated, numDescriptors) < 0) { | ||
400 | printk(KERN_ERR | ||
401 | "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n"); | ||
402 | return -ENOMEM; | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | EXPORT_SYMBOL(dma_init_descriptor_ring); | ||
409 | |||
410 | /****************************************************************************/ | ||
411 | /** | ||
412 | * Determines the number of descriptors which would be required for a | ||
413 | * transfer of the indicated memory region. | ||
414 | * | ||
415 | * This function also needs to know which DMA device this transfer will | ||
416 | * be destined for, so that the appropriate DMA configuration can be retrieved. | ||
417 | * DMA parameters such as transfer width, and whether this is a memory-to-memory | ||
418 | * or memory-to-peripheral, etc can all affect the actual number of descriptors | ||
419 | * required. | ||
420 | * | ||
421 | * @return | ||
422 | * > 0 Returns the number of descriptors required for the indicated transfer | ||
423 | * -ENODEV - Device handed in is invalid. | ||
424 | * -EINVAL Invalid parameters | ||
425 | * -ENOMEM Memory exhausted | ||
426 | */ | ||
427 | /****************************************************************************/ | ||
428 | |||
429 | int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */ | ||
430 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
431 | dma_addr_t dstData, /* Pointer to device data address */ | ||
432 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
433 | ) { | ||
434 | int numDescriptors; | ||
435 | DMA_DeviceAttribute_t *devAttr; | ||
436 | |||
437 | if (!IsDeviceValid(device)) { | ||
438 | return -ENODEV; | ||
439 | } | ||
440 | devAttr = &DMA_gDeviceAttribute[device]; | ||
441 | |||
442 | numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, | ||
443 | (void *)srcData, | ||
444 | (void *)dstData, | ||
445 | numBytes); | ||
446 | if (numDescriptors < 0) { | ||
447 | printk(KERN_ERR | ||
448 | "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n"); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | return numDescriptors; | ||
453 | } | ||
454 | |||
455 | EXPORT_SYMBOL(dma_calculate_descriptor_count); | ||
456 | |||
457 | /****************************************************************************/ | ||
458 | /** | ||
459 | * Adds a region of memory to the descriptor ring. Note that it may take | ||
460 | * multiple descriptors for each region of memory. It is the callers | ||
461 | * responsibility to allocate a sufficiently large descriptor ring. | ||
462 | * | ||
463 | * @return | ||
464 | * 0 Descriptors were added successfully | ||
465 | * -ENODEV Device handed in is invalid. | ||
466 | * -EINVAL Invalid parameters | ||
467 | * -ENOMEM Memory exhausted | ||
468 | */ | ||
469 | /****************************************************************************/ | ||
470 | |||
471 | int dma_add_descriptors(DMA_DescriptorRing_t *ring, /* Descriptor ring to add descriptors to */ | ||
472 | DMA_Device_t device, /* DMA Device that descriptors are for */ | ||
473 | dma_addr_t srcData, /* Place to get data (memory or device) */ | ||
474 | dma_addr_t dstData, /* Place to put data (memory or device) */ | ||
475 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
476 | ) { | ||
477 | int rc; | ||
478 | DMA_DeviceAttribute_t *devAttr; | ||
479 | |||
480 | if (!IsDeviceValid(device)) { | ||
481 | return -ENODEV; | ||
482 | } | ||
483 | devAttr = &DMA_gDeviceAttribute[device]; | ||
484 | |||
485 | rc = dmacHw_setDataDescriptor(&devAttr->config, | ||
486 | ring->virtAddr, | ||
487 | (void *)srcData, | ||
488 | (void *)dstData, numBytes); | ||
489 | if (rc < 0) { | ||
490 | printk(KERN_ERR | ||
491 | "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n", | ||
492 | rc); | ||
493 | return -ENOMEM; | ||
494 | } | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | EXPORT_SYMBOL(dma_add_descriptors); | ||
500 | |||
501 | /****************************************************************************/ | ||
502 | /** | ||
503 | * Sets the descriptor ring associated with a device. | ||
504 | * | ||
505 | * Once set, the descriptor ring will be associated with the device, even | ||
506 | * across channel request/free calls. Passing in a NULL descriptor ring | ||
507 | * will release any descriptor ring currently associated with the device. | ||
508 | * | ||
509 | * Note: If you call dma_transfer, or one of the other dma_alloc_ functions | ||
510 | * the descriptor ring may be released and reallocated. | ||
511 | * | ||
512 | * Note: This function will release the descriptor memory for any current | ||
513 | * descriptor ring associated with this device. | ||
514 | * | ||
515 | * @return | ||
516 | * 0 Descriptors were added successfully | ||
517 | * -ENODEV Device handed in is invalid. | ||
518 | */ | ||
519 | /****************************************************************************/ | ||
520 | |||
521 | int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */ | ||
522 | DMA_DescriptorRing_t *ring /* Descriptor ring to add descriptors to */ | ||
523 | ) { | ||
524 | DMA_DeviceAttribute_t *devAttr; | ||
525 | |||
526 | if (!IsDeviceValid(device)) { | ||
527 | return -ENODEV; | ||
528 | } | ||
529 | devAttr = &DMA_gDeviceAttribute[device]; | ||
530 | |||
531 | /* Free the previously allocated descriptor ring */ | ||
532 | |||
533 | dma_free_descriptor_ring(&devAttr->ring); | ||
534 | |||
535 | if (ring != NULL) { | ||
536 | /* Copy in the new one */ | ||
537 | |||
538 | devAttr->ring = *ring; | ||
539 | } | ||
540 | |||
541 | /* Set things up so that if dma_transfer is called then this descriptor */ | ||
542 | /* ring will get freed. */ | ||
543 | |||
544 | devAttr->prevSrcData = 0; | ||
545 | devAttr->prevDstData = 0; | ||
546 | devAttr->prevNumBytes = 0; | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | EXPORT_SYMBOL(dma_set_device_descriptor_ring); | ||
552 | |||
553 | /****************************************************************************/ | ||
554 | /** | ||
555 | * Retrieves the descriptor ring associated with a device. | ||
556 | * | ||
557 | * @return | ||
558 | * 0 Descriptors were added successfully | ||
559 | * -ENODEV Device handed in is invalid. | ||
560 | */ | ||
561 | /****************************************************************************/ | ||
562 | |||
563 | int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */ | ||
564 | DMA_DescriptorRing_t *ring /* Place to store retrieved ring */ | ||
565 | ) { | ||
566 | DMA_DeviceAttribute_t *devAttr; | ||
567 | |||
568 | memset(ring, 0, sizeof(*ring)); | ||
569 | |||
570 | if (!IsDeviceValid(device)) { | ||
571 | return -ENODEV; | ||
572 | } | ||
573 | devAttr = &DMA_gDeviceAttribute[device]; | ||
574 | |||
575 | *ring = devAttr->ring; | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | EXPORT_SYMBOL(dma_get_device_descriptor_ring); | ||
581 | |||
582 | /****************************************************************************/ | ||
583 | /** | ||
584 | * Configures a DMA channel. | ||
585 | * | ||
586 | * @return | ||
587 | * >= 0 - Initialization was successful. | ||
588 | * | ||
589 | * -EBUSY - Device is currently being used. | ||
590 | * -ENODEV - Device handed in is invalid. | ||
591 | */ | ||
592 | /****************************************************************************/ | ||
593 | |||
594 | static int ConfigChannel(DMA_Handle_t handle) | ||
595 | { | ||
596 | DMA_Channel_t *channel; | ||
597 | DMA_DeviceAttribute_t *devAttr; | ||
598 | int controllerIdx; | ||
599 | |||
600 | channel = HandleToChannel(handle); | ||
601 | if (channel == NULL) { | ||
602 | return -ENODEV; | ||
603 | } | ||
604 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
605 | controllerIdx = CONTROLLER_FROM_HANDLE(handle); | ||
606 | |||
607 | if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) { | ||
608 | if (devAttr->config.transferType == | ||
609 | dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) { | ||
610 | devAttr->config.dstPeripheralPort = | ||
611 | devAttr->dmacPort[controllerIdx]; | ||
612 | } else if (devAttr->config.transferType == | ||
613 | dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) { | ||
614 | devAttr->config.srcPeripheralPort = | ||
615 | devAttr->dmacPort[controllerIdx]; | ||
616 | } | ||
617 | } | ||
618 | |||
619 | if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) { | ||
620 | printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n"); | ||
621 | return -EIO; | ||
622 | } | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | /****************************************************************************/ | ||
628 | /** | ||
629 | * Initializes all of the data structures associated with the DMA. | ||
630 | * @return | ||
631 | * >= 0 - Initialization was successful. | ||
632 | * | ||
633 | * -EBUSY - Device is currently being used. | ||
634 | * -ENODEV - Device handed in is invalid. | ||
635 | */ | ||
636 | /****************************************************************************/ | ||
637 | |||
638 | int dma_init(void) | ||
639 | { | ||
640 | int rc = 0; | ||
641 | int controllerIdx; | ||
642 | int channelIdx; | ||
643 | DMA_Device_t devIdx; | ||
644 | DMA_Channel_t *channel; | ||
645 | DMA_Handle_t dedicatedHandle; | ||
646 | |||
647 | memset(&gDMA, 0, sizeof(gDMA)); | ||
648 | |||
649 | sema_init(&gDMA.lock, 0); | ||
650 | init_waitqueue_head(&gDMA.freeChannelQ); | ||
651 | |||
652 | /* Initialize the Hardware */ | ||
653 | |||
654 | dmacHw_initDma(); | ||
655 | |||
656 | /* Start off by marking all of the DMA channels as shared. */ | ||
657 | |||
658 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
659 | controllerIdx++) { | ||
660 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
661 | channelIdx++) { | ||
662 | channel = | ||
663 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
664 | |||
665 | channel->flags = 0; | ||
666 | channel->devType = DMA_DEVICE_NONE; | ||
667 | channel->lastDevType = DMA_DEVICE_NONE; | ||
668 | |||
669 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
670 | channel->fileName = ""; | ||
671 | channel->lineNum = 0; | ||
672 | #endif | ||
673 | |||
674 | channel->dmacHwHandle = | ||
675 | dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID | ||
676 | (controllerIdx, | ||
677 | channelIdx)); | ||
678 | dmacHw_initChannel(channel->dmacHwHandle); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /* Record any special attributes that channels may have */ | ||
683 | |||
684 | gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
685 | gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
686 | gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
687 | gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
688 | |||
689 | /* Now walk through and record the dedicated channels. */ | ||
690 | |||
691 | for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { | ||
692 | DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; | ||
693 | |||
694 | if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) | ||
695 | && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) { | ||
696 | printk(KERN_ERR | ||
697 | "DMA Device: %s Can only request NO_ISR for dedicated devices\n", | ||
698 | devAttr->name); | ||
699 | rc = -EINVAL; | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
704 | /* This is a dedicated device. Mark the channel as being reserved. */ | ||
705 | |||
706 | if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) { | ||
707 | printk(KERN_ERR | ||
708 | "DMA Device: %s DMA Controller %d is out of range\n", | ||
709 | devAttr->name, | ||
710 | devAttr->dedicatedController); | ||
711 | rc = -EINVAL; | ||
712 | goto out; | ||
713 | } | ||
714 | |||
715 | if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) { | ||
716 | printk(KERN_ERR | ||
717 | "DMA Device: %s DMA Channel %d is out of range\n", | ||
718 | devAttr->name, | ||
719 | devAttr->dedicatedChannel); | ||
720 | rc = -EINVAL; | ||
721 | goto out; | ||
722 | } | ||
723 | |||
724 | dedicatedHandle = | ||
725 | MAKE_HANDLE(devAttr->dedicatedController, | ||
726 | devAttr->dedicatedChannel); | ||
727 | channel = HandleToChannel(dedicatedHandle); | ||
728 | |||
729 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != | ||
730 | 0) { | ||
731 | printk | ||
732 | ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n", | ||
733 | devAttr->name, | ||
734 | devAttr->dedicatedController, | ||
735 | devAttr->dedicatedChannel, | ||
736 | DMA_gDeviceAttribute[channel->devType]. | ||
737 | name); | ||
738 | rc = -EBUSY; | ||
739 | goto out; | ||
740 | } | ||
741 | |||
742 | channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED; | ||
743 | channel->devType = devIdx; | ||
744 | |||
745 | if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) { | ||
746 | channel->flags |= DMA_CHANNEL_FLAG_NO_ISR; | ||
747 | } | ||
748 | |||
749 | /* For dedicated channels, we can go ahead and configure the DMA channel now */ | ||
750 | /* as well. */ | ||
751 | |||
752 | ConfigChannel(dedicatedHandle); | ||
753 | } | ||
754 | } | ||
755 | |||
756 | /* Go through and register the interrupt handlers */ | ||
757 | |||
758 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
759 | controllerIdx++) { | ||
760 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
761 | channelIdx++) { | ||
762 | channel = | ||
763 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
764 | |||
765 | if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) { | ||
766 | snprintf(channel->name, sizeof(channel->name), | ||
767 | "dma %d:%d %s", controllerIdx, | ||
768 | channelIdx, | ||
769 | channel->devType == | ||
770 | DMA_DEVICE_NONE ? "" : | ||
771 | DMA_gDeviceAttribute[channel->devType]. | ||
772 | name); | ||
773 | |||
774 | rc = | ||
775 | request_irq(IRQ_DMA0C0 + | ||
776 | (controllerIdx * | ||
777 | DMA_NUM_CHANNELS) + | ||
778 | channelIdx, | ||
779 | dma_interrupt_handler, | ||
780 | IRQF_DISABLED, channel->name, | ||
781 | channel); | ||
782 | if (rc != 0) { | ||
783 | printk(KERN_ERR | ||
784 | "request_irq for IRQ_DMA%dC%d failed\n", | ||
785 | controllerIdx, channelIdx); | ||
786 | } | ||
787 | } | ||
788 | } | ||
789 | } | ||
790 | |||
791 | /* Create /proc/dma/channels and /proc/dma/devices */ | ||
792 | |||
793 | gDmaDir = proc_mkdir("dma", NULL); | ||
794 | |||
795 | if (gDmaDir == NULL) { | ||
796 | printk(KERN_ERR "Unable to create /proc/dma\n"); | ||
797 | } else { | ||
798 | create_proc_read_entry("channels", 0, gDmaDir, | ||
799 | dma_proc_read_channels, NULL); | ||
800 | create_proc_read_entry("devices", 0, gDmaDir, | ||
801 | dma_proc_read_devices, NULL); | ||
802 | } | ||
803 | |||
804 | out: | ||
805 | |||
806 | up(&gDMA.lock); | ||
807 | |||
808 | return rc; | ||
809 | } | ||
810 | |||
811 | /****************************************************************************/ | ||
812 | /** | ||
813 | * Reserves a channel for use with @a dev. If the device is setup to use | ||
814 | * a shared channel, then this function will block until a free channel | ||
815 | * becomes available. | ||
816 | * | ||
817 | * @return | ||
818 | * >= 0 - A valid DMA Handle. | ||
819 | * -EBUSY - Device is currently being used. | ||
820 | * -ENODEV - Device handed in is invalid. | ||
821 | */ | ||
822 | /****************************************************************************/ | ||
823 | |||
824 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
825 | DMA_Handle_t dma_request_channel_dbg | ||
826 | (DMA_Device_t dev, const char *fileName, int lineNum) | ||
827 | #else | ||
828 | DMA_Handle_t dma_request_channel(DMA_Device_t dev) | ||
829 | #endif | ||
830 | { | ||
831 | DMA_Handle_t handle; | ||
832 | DMA_DeviceAttribute_t *devAttr; | ||
833 | DMA_Channel_t *channel; | ||
834 | int controllerIdx; | ||
835 | int controllerIdx2; | ||
836 | int channelIdx; | ||
837 | |||
838 | if (down_interruptible(&gDMA.lock) < 0) { | ||
839 | return -ERESTARTSYS; | ||
840 | } | ||
841 | |||
842 | if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) { | ||
843 | handle = -ENODEV; | ||
844 | goto out; | ||
845 | } | ||
846 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
847 | |||
848 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
849 | { | ||
850 | char *s; | ||
851 | |||
852 | s = strrchr(fileName, '/'); | ||
853 | if (s != NULL) { | ||
854 | fileName = s + 1; | ||
855 | } | ||
856 | } | ||
857 | #endif | ||
858 | if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) { | ||
859 | /* This device has already been requested and not been freed */ | ||
860 | |||
861 | printk(KERN_ERR "%s: device %s is already requested\n", | ||
862 | __func__, devAttr->name); | ||
863 | handle = -EBUSY; | ||
864 | goto out; | ||
865 | } | ||
866 | |||
867 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
868 | /* This device has a dedicated channel. */ | ||
869 | |||
870 | channel = | ||
871 | &gDMA.controller[devAttr->dedicatedController]. | ||
872 | channel[devAttr->dedicatedChannel]; | ||
873 | if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { | ||
874 | handle = -EBUSY; | ||
875 | goto out; | ||
876 | } | ||
877 | |||
878 | channel->flags |= DMA_CHANNEL_FLAG_IN_USE; | ||
879 | devAttr->flags |= DMA_DEVICE_FLAG_IN_USE; | ||
880 | |||
881 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
882 | channel->fileName = fileName; | ||
883 | channel->lineNum = lineNum; | ||
884 | #endif | ||
885 | handle = | ||
886 | MAKE_HANDLE(devAttr->dedicatedController, | ||
887 | devAttr->dedicatedChannel); | ||
888 | goto out; | ||
889 | } | ||
890 | |||
891 | /* This device needs to use one of the shared channels. */ | ||
892 | |||
893 | handle = DMA_INVALID_HANDLE; | ||
894 | while (handle == DMA_INVALID_HANDLE) { | ||
895 | /* Scan through the shared channels and see if one is available */ | ||
896 | |||
897 | for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS; | ||
898 | controllerIdx2++) { | ||
899 | /* Check to see if we should try on controller 1 first. */ | ||
900 | |||
901 | controllerIdx = controllerIdx2; | ||
902 | if ((devAttr-> | ||
903 | flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) { | ||
904 | controllerIdx = 1 - controllerIdx; | ||
905 | } | ||
906 | |||
907 | /* See if the device is available on the controller being tested */ | ||
908 | |||
909 | if ((devAttr-> | ||
910 | flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx)) | ||
911 | != 0) { | ||
912 | for (channelIdx = 0; | ||
913 | channelIdx < DMA_NUM_CHANNELS; | ||
914 | channelIdx++) { | ||
915 | channel = | ||
916 | &gDMA.controller[controllerIdx]. | ||
917 | channel[channelIdx]; | ||
918 | |||
919 | if (((channel-> | ||
920 | flags & | ||
921 | DMA_CHANNEL_FLAG_IS_DEDICATED) == | ||
922 | 0) | ||
923 | && | ||
924 | ((channel-> | ||
925 | flags & DMA_CHANNEL_FLAG_IN_USE) | ||
926 | == 0)) { | ||
927 | if (((channel-> | ||
928 | flags & | ||
929 | DMA_CHANNEL_FLAG_LARGE_FIFO) | ||
930 | != 0) | ||
931 | && | ||
932 | ((devAttr-> | ||
933 | flags & | ||
934 | DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) | ||
935 | == 0)) { | ||
936 | /* This channel is a large fifo - don't tie it up */ | ||
937 | /* with devices that we don't want using it. */ | ||
938 | |||
939 | continue; | ||
940 | } | ||
941 | |||
942 | channel->flags |= | ||
943 | DMA_CHANNEL_FLAG_IN_USE; | ||
944 | channel->devType = dev; | ||
945 | devAttr->flags |= | ||
946 | DMA_DEVICE_FLAG_IN_USE; | ||
947 | |||
948 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
949 | channel->fileName = fileName; | ||
950 | channel->lineNum = lineNum; | ||
951 | #endif | ||
952 | handle = | ||
953 | MAKE_HANDLE(controllerIdx, | ||
954 | channelIdx); | ||
955 | |||
956 | /* Now that we've reserved the channel - we can go ahead and configure it */ | ||
957 | |||
958 | if (ConfigChannel(handle) != 0) { | ||
959 | handle = -EIO; | ||
960 | printk(KERN_ERR | ||
961 | "dma_request_channel: ConfigChannel failed\n"); | ||
962 | } | ||
963 | goto out; | ||
964 | } | ||
965 | } | ||
966 | } | ||
967 | } | ||
968 | |||
969 | /* No channels are currently available. Let's wait for one to free up. */ | ||
970 | |||
971 | { | ||
972 | DEFINE_WAIT(wait); | ||
973 | |||
974 | prepare_to_wait(&gDMA.freeChannelQ, &wait, | ||
975 | TASK_INTERRUPTIBLE); | ||
976 | up(&gDMA.lock); | ||
977 | schedule(); | ||
978 | finish_wait(&gDMA.freeChannelQ, &wait); | ||
979 | |||
980 | if (signal_pending(current)) { | ||
981 | /* We don't currently hold gDMA.lock, so we return directly */ | ||
982 | |||
983 | return -ERESTARTSYS; | ||
984 | } | ||
985 | } | ||
986 | |||
987 | if (down_interruptible(&gDMA.lock)) { | ||
988 | return -ERESTARTSYS; | ||
989 | } | ||
990 | } | ||
991 | |||
992 | out: | ||
993 | up(&gDMA.lock); | ||
994 | |||
995 | return handle; | ||
996 | } | ||
997 | |||
998 | /* Create both _dbg and non _dbg functions for modules. */ | ||
999 | |||
1000 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
1001 | #undef dma_request_channel | ||
1002 | DMA_Handle_t dma_request_channel(DMA_Device_t dev) | ||
1003 | { | ||
1004 | return dma_request_channel_dbg(dev, __FILE__, __LINE__); | ||
1005 | } | ||
1006 | |||
1007 | EXPORT_SYMBOL(dma_request_channel_dbg); | ||
1008 | #endif | ||
1009 | EXPORT_SYMBOL(dma_request_channel); | ||
1010 | |||
1011 | /****************************************************************************/ | ||
1012 | /** | ||
1013 | * Frees a previously allocated DMA Handle. | ||
1014 | */ | ||
1015 | /****************************************************************************/ | ||
1016 | |||
1017 | int dma_free_channel(DMA_Handle_t handle /* DMA handle. */ | ||
1018 | ) { | ||
1019 | int rc = 0; | ||
1020 | DMA_Channel_t *channel; | ||
1021 | DMA_DeviceAttribute_t *devAttr; | ||
1022 | |||
1023 | if (down_interruptible(&gDMA.lock) < 0) { | ||
1024 | return -ERESTARTSYS; | ||
1025 | } | ||
1026 | |||
1027 | channel = HandleToChannel(handle); | ||
1028 | if (channel == NULL) { | ||
1029 | rc = -EINVAL; | ||
1030 | goto out; | ||
1031 | } | ||
1032 | |||
1033 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1034 | |||
1035 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) { | ||
1036 | channel->lastDevType = channel->devType; | ||
1037 | channel->devType = DMA_DEVICE_NONE; | ||
1038 | } | ||
1039 | channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE; | ||
1040 | devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE; | ||
1041 | |||
1042 | out: | ||
1043 | up(&gDMA.lock); | ||
1044 | |||
1045 | wake_up_interruptible(&gDMA.freeChannelQ); | ||
1046 | |||
1047 | return rc; | ||
1048 | } | ||
1049 | |||
1050 | EXPORT_SYMBOL(dma_free_channel); | ||
1051 | |||
1052 | /****************************************************************************/ | ||
1053 | /** | ||
1054 | * Determines if a given device has been configured as using a shared | ||
1055 | * channel. | ||
1056 | * | ||
1057 | * @return | ||
1058 | * 0 Device uses a dedicated channel | ||
1059 | * > zero Device uses a shared channel | ||
1060 | * < zero Error code | ||
1061 | */ | ||
1062 | /****************************************************************************/ | ||
1063 | |||
1064 | int dma_device_is_channel_shared(DMA_Device_t device /* Device to check. */ | ||
1065 | ) { | ||
1066 | DMA_DeviceAttribute_t *devAttr; | ||
1067 | |||
1068 | if (!IsDeviceValid(device)) { | ||
1069 | return -ENODEV; | ||
1070 | } | ||
1071 | devAttr = &DMA_gDeviceAttribute[device]; | ||
1072 | |||
1073 | return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0); | ||
1074 | } | ||
1075 | |||
1076 | EXPORT_SYMBOL(dma_device_is_channel_shared); | ||
1077 | |||
1078 | /****************************************************************************/ | ||
1079 | /** | ||
1080 | * Allocates buffers for the descriptors. This is normally done automatically | ||
1081 | * but needs to be done explicitly when initiating a dma from interrupt | ||
1082 | * context. | ||
1083 | * | ||
1084 | * @return | ||
1085 | * 0 Descriptors were allocated successfully | ||
1086 | * -EINVAL Invalid device type for this kind of transfer | ||
1087 | * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) | ||
1088 | * -ENOMEM Memory exhausted | ||
1089 | */ | ||
1090 | /****************************************************************************/ | ||
1091 | |||
1092 | int dma_alloc_descriptors(DMA_Handle_t handle, /* DMA Handle */ | ||
1093 | dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ | ||
1094 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
1095 | dma_addr_t dstData, /* Pointer to device data address */ | ||
1096 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
1097 | ) { | ||
1098 | DMA_Channel_t *channel; | ||
1099 | DMA_DeviceAttribute_t *devAttr; | ||
1100 | int numDescriptors; | ||
1101 | size_t ringBytesRequired; | ||
1102 | int rc = 0; | ||
1103 | |||
1104 | channel = HandleToChannel(handle); | ||
1105 | if (channel == NULL) { | ||
1106 | return -ENODEV; | ||
1107 | } | ||
1108 | |||
1109 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1110 | |||
1111 | if (devAttr->config.transferType != transferType) { | ||
1112 | return -EINVAL; | ||
1113 | } | ||
1114 | |||
1115 | /* Figure out how many descriptors we need. */ | ||
1116 | |||
1117 | /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ | ||
1118 | /* srcData, dstData, numBytes); */ | ||
1119 | |||
1120 | numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, | ||
1121 | (void *)srcData, | ||
1122 | (void *)dstData, | ||
1123 | numBytes); | ||
1124 | if (numDescriptors < 0) { | ||
1125 | printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n", | ||
1126 | __func__); | ||
1127 | return -EINVAL; | ||
1128 | } | ||
1129 | |||
1130 | /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ | ||
1131 | /* a new one. */ | ||
1132 | |||
1133 | ringBytesRequired = dmacHw_descriptorLen(numDescriptors); | ||
1134 | |||
1135 | /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ | ||
1136 | |||
1137 | if (ringBytesRequired > devAttr->ring.bytesAllocated) { | ||
1138 | /* Make sure that this code path is never taken from interrupt context. */ | ||
1139 | /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ | ||
1140 | /* allocation needs to have already been done. */ | ||
1141 | |||
1142 | might_sleep(); | ||
1143 | |||
1144 | /* Free the old descriptor ring and allocate a new one. */ | ||
1145 | |||
1146 | dma_free_descriptor_ring(&devAttr->ring); | ||
1147 | |||
1148 | /* And allocate a new one. */ | ||
1149 | |||
1150 | rc = | ||
1151 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
1152 | numDescriptors); | ||
1153 | if (rc < 0) { | ||
1154 | printk(KERN_ERR | ||
1155 | "%s: dma_alloc_descriptor_ring(%d) failed\n", | ||
1156 | __func__, numDescriptors); | ||
1157 | return rc; | ||
1158 | } | ||
1159 | /* Setup the descriptor for this transfer */ | ||
1160 | |||
1161 | if (dmacHw_initDescriptor(devAttr->ring.virtAddr, | ||
1162 | devAttr->ring.physAddr, | ||
1163 | devAttr->ring.bytesAllocated, | ||
1164 | numDescriptors) < 0) { | ||
1165 | printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", | ||
1166 | __func__); | ||
1167 | return -EINVAL; | ||
1168 | } | ||
1169 | } else { | ||
1170 | /* We've already got enough ring buffer allocated. All we need to do is reset */ | ||
1171 | /* any control information, just in case the previous DMA was stopped. */ | ||
1172 | |||
1173 | dmacHw_resetDescriptorControl(devAttr->ring.virtAddr); | ||
1174 | } | ||
1175 | |||
1176 | /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ | ||
1177 | /* as last time, then we don't need to call setDataDescriptor again. */ | ||
1178 | |||
1179 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1180 | devAttr->ring.virtAddr, | ||
1181 | (void *)srcData, | ||
1182 | (void *)dstData, numBytes) < 0) { | ||
1183 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n", | ||
1184 | __func__); | ||
1185 | return -EINVAL; | ||
1186 | } | ||
1187 | |||
1188 | /* Remember the critical information for this transfer so that we can eliminate */ | ||
1189 | /* another call to dma_alloc_descriptors if the caller reuses the same buffers */ | ||
1190 | |||
1191 | devAttr->prevSrcData = srcData; | ||
1192 | devAttr->prevDstData = dstData; | ||
1193 | devAttr->prevNumBytes = numBytes; | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | EXPORT_SYMBOL(dma_alloc_descriptors); | ||
1199 | |||
1200 | /****************************************************************************/ | ||
1201 | /** | ||
1202 | * Allocates and sets up descriptors for a double buffered circular buffer. | ||
1203 | * | ||
1204 | * This is primarily intended to be used for things like the ingress samples | ||
1205 | * from a microphone. | ||
1206 | * | ||
1207 | * @return | ||
1208 | * > 0 Number of descriptors actually allocated. | ||
1209 | * -EINVAL Invalid device type for this kind of transfer | ||
1210 | * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) | ||
1211 | * -ENOMEM Memory exhausted | ||
1212 | */ | ||
1213 | /****************************************************************************/ | ||
1214 | |||
1215 | int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */ | ||
1216 | dma_addr_t srcData, /* Physical address of source data */ | ||
1217 | dma_addr_t dstData1, /* Physical address of first destination buffer */ | ||
1218 | dma_addr_t dstData2, /* Physical address of second destination buffer */ | ||
1219 | size_t numBytes /* Number of bytes in each destination buffer */ | ||
1220 | ) { | ||
1221 | DMA_Channel_t *channel; | ||
1222 | DMA_DeviceAttribute_t *devAttr; | ||
1223 | int numDst1Descriptors; | ||
1224 | int numDst2Descriptors; | ||
1225 | int numDescriptors; | ||
1226 | size_t ringBytesRequired; | ||
1227 | int rc = 0; | ||
1228 | |||
1229 | channel = HandleToChannel(handle); | ||
1230 | if (channel == NULL) { | ||
1231 | return -ENODEV; | ||
1232 | } | ||
1233 | |||
1234 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1235 | |||
1236 | /* Figure out how many descriptors we need. */ | ||
1237 | |||
1238 | /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ | ||
1239 | /* srcData, dstData, numBytes); */ | ||
1240 | |||
1241 | numDst1Descriptors = | ||
1242 | dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, | ||
1243 | (void *)dstData1, numBytes); | ||
1244 | if (numDst1Descriptors < 0) { | ||
1245 | return -EINVAL; | ||
1246 | } | ||
1247 | numDst2Descriptors = | ||
1248 | dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, | ||
1249 | (void *)dstData2, numBytes); | ||
1250 | if (numDst2Descriptors < 0) { | ||
1251 | return -EINVAL; | ||
1252 | } | ||
1253 | numDescriptors = numDst1Descriptors + numDst2Descriptors; | ||
1254 | /* printk("numDescriptors: %d\n", numDescriptors); */ | ||
1255 | |||
1256 | /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ | ||
1257 | /* a new one. */ | ||
1258 | |||
1259 | ringBytesRequired = dmacHw_descriptorLen(numDescriptors); | ||
1260 | |||
1261 | /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ | ||
1262 | |||
1263 | if (ringBytesRequired > devAttr->ring.bytesAllocated) { | ||
1264 | /* Make sure that this code path is never taken from interrupt context. */ | ||
1265 | /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ | ||
1266 | /* allocation needs to have already been done. */ | ||
1267 | |||
1268 | might_sleep(); | ||
1269 | |||
1270 | /* Free the old descriptor ring and allocate a new one. */ | ||
1271 | |||
1272 | dma_free_descriptor_ring(&devAttr->ring); | ||
1273 | |||
1274 | /* And allocate a new one. */ | ||
1275 | |||
1276 | rc = | ||
1277 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
1278 | numDescriptors); | ||
1279 | if (rc < 0) { | ||
1280 | printk(KERN_ERR | ||
1281 | "%s: dma_alloc_descriptor_ring(%d) failed\n", | ||
1282 | __func__, ringBytesRequired); | ||
1283 | return rc; | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | /* Setup the descriptor for this transfer. Since this function is used with */ | ||
1288 | /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */ | ||
1289 | /* setDataDescriptor will keep trying to append onto the end. */ | ||
1290 | |||
1291 | if (dmacHw_initDescriptor(devAttr->ring.virtAddr, | ||
1292 | devAttr->ring.physAddr, | ||
1293 | devAttr->ring.bytesAllocated, | ||
1294 | numDescriptors) < 0) { | ||
1295 | printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__); | ||
1296 | return -EINVAL; | ||
1297 | } | ||
1298 | |||
1299 | /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ | ||
1300 | /* as last time, then we don't need to call setDataDescriptor again. */ | ||
1301 | |||
1302 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1303 | devAttr->ring.virtAddr, | ||
1304 | (void *)srcData, | ||
1305 | (void *)dstData1, numBytes) < 0) { | ||
1306 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n", | ||
1307 | __func__); | ||
1308 | return -EINVAL; | ||
1309 | } | ||
1310 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1311 | devAttr->ring.virtAddr, | ||
1312 | (void *)srcData, | ||
1313 | (void *)dstData2, numBytes) < 0) { | ||
1314 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n", | ||
1315 | __func__); | ||
1316 | return -EINVAL; | ||
1317 | } | ||
1318 | |||
1319 | /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */ | ||
1320 | /* try to make the 'prev' variables right. */ | ||
1321 | |||
1322 | devAttr->prevSrcData = 0; | ||
1323 | devAttr->prevDstData = 0; | ||
1324 | devAttr->prevNumBytes = 0; | ||
1325 | |||
1326 | return numDescriptors; | ||
1327 | } | ||
1328 | |||
1329 | EXPORT_SYMBOL(dma_alloc_double_dst_descriptors); | ||
1330 | |||
1331 | /****************************************************************************/ | ||
1332 | /** | ||
1333 | * Initiates a transfer when the descriptors have already been setup. | ||
1334 | * | ||
1335 | * This is a special case, and normally, the dma_transfer_xxx functions should | ||
1336 | * be used. | ||
1337 | * | ||
1338 | * @return | ||
1339 | * 0 Transfer was started successfully | ||
1340 | * -ENODEV Invalid handle | ||
1341 | */ | ||
1342 | /****************************************************************************/ | ||
1343 | |||
1344 | int dma_start_transfer(DMA_Handle_t handle) | ||
1345 | { | ||
1346 | DMA_Channel_t *channel; | ||
1347 | DMA_DeviceAttribute_t *devAttr; | ||
1348 | |||
1349 | channel = HandleToChannel(handle); | ||
1350 | if (channel == NULL) { | ||
1351 | return -ENODEV; | ||
1352 | } | ||
1353 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1354 | |||
1355 | dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, | ||
1356 | devAttr->ring.virtAddr); | ||
1357 | |||
1358 | /* Since we got this far, everything went successfully */ | ||
1359 | |||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1363 | EXPORT_SYMBOL(dma_start_transfer); | ||
1364 | |||
1365 | /****************************************************************************/ | ||
1366 | /** | ||
1367 | * Stops a previously started DMA transfer. | ||
1368 | * | ||
1369 | * @return | ||
1370 | * 0 Transfer was stopped successfully | ||
1371 | * -ENODEV Invalid handle | ||
1372 | */ | ||
1373 | /****************************************************************************/ | ||
1374 | |||
1375 | int dma_stop_transfer(DMA_Handle_t handle) | ||
1376 | { | ||
1377 | DMA_Channel_t *channel; | ||
1378 | |||
1379 | channel = HandleToChannel(handle); | ||
1380 | if (channel == NULL) { | ||
1381 | return -ENODEV; | ||
1382 | } | ||
1383 | |||
1384 | dmacHw_stopTransfer(channel->dmacHwHandle); | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | EXPORT_SYMBOL(dma_stop_transfer); | ||
1390 | |||
1391 | /****************************************************************************/ | ||
1392 | /** | ||
1393 | * Waits for a DMA to complete by polling. This function is only intended | ||
1394 | * to be used for testing. Interrupts should be used for most DMA operations. | ||
1395 | */ | ||
1396 | /****************************************************************************/ | ||
1397 | |||
1398 | int dma_wait_transfer_done(DMA_Handle_t handle) | ||
1399 | { | ||
1400 | DMA_Channel_t *channel; | ||
1401 | dmacHw_TRANSFER_STATUS_e status; | ||
1402 | |||
1403 | channel = HandleToChannel(handle); | ||
1404 | if (channel == NULL) { | ||
1405 | return -ENODEV; | ||
1406 | } | ||
1407 | |||
1408 | while ((status = | ||
1409 | dmacHw_transferCompleted(channel->dmacHwHandle)) == | ||
1410 | dmacHw_TRANSFER_STATUS_BUSY) { | ||
1411 | ; | ||
1412 | } | ||
1413 | |||
1414 | if (status == dmacHw_TRANSFER_STATUS_ERROR) { | ||
1415 | printk(KERN_ERR "%s: DMA transfer failed\n", __func__); | ||
1416 | return -EIO; | ||
1417 | } | ||
1418 | return 0; | ||
1419 | } | ||
1420 | |||
1421 | EXPORT_SYMBOL(dma_wait_transfer_done); | ||
1422 | |||
1423 | /****************************************************************************/ | ||
1424 | /** | ||
1425 | * Initiates a DMA, allocating the descriptors as required. | ||
1426 | * | ||
1427 | * @return | ||
1428 | * 0 Transfer was started successfully | ||
1429 | * -EINVAL Invalid device type for this kind of transfer | ||
1430 | * (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV) | ||
1431 | */ | ||
1432 | /****************************************************************************/ | ||
1433 | |||
1434 | int dma_transfer(DMA_Handle_t handle, /* DMA Handle */ | ||
1435 | dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ | ||
1436 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
1437 | dma_addr_t dstData, /* Pointer to device data address */ | ||
1438 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
1439 | ) { | ||
1440 | DMA_Channel_t *channel; | ||
1441 | DMA_DeviceAttribute_t *devAttr; | ||
1442 | int rc = 0; | ||
1443 | |||
1444 | channel = HandleToChannel(handle); | ||
1445 | if (channel == NULL) { | ||
1446 | return -ENODEV; | ||
1447 | } | ||
1448 | |||
1449 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1450 | |||
1451 | if (devAttr->config.transferType != transferType) { | ||
1452 | return -EINVAL; | ||
1453 | } | ||
1454 | |||
1455 | /* We keep track of the information about the previous request for this */ | ||
1456 | /* device, and if the attributes match, then we can use the descriptors we setup */ | ||
1457 | /* the last time, and not have to reinitialize everything. */ | ||
1458 | |||
1459 | { | ||
1460 | rc = | ||
1461 | dma_alloc_descriptors(handle, transferType, srcData, | ||
1462 | dstData, numBytes); | ||
1463 | if (rc != 0) { | ||
1464 | return rc; | ||
1465 | } | ||
1466 | } | ||
1467 | |||
1468 | /* And kick off the transfer */ | ||
1469 | |||
1470 | devAttr->numBytes = numBytes; | ||
1471 | devAttr->transferStartTime = timer_get_tick_count(); | ||
1472 | |||
1473 | dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, | ||
1474 | devAttr->ring.virtAddr); | ||
1475 | |||
1476 | /* Since we got this far, everything went successfully */ | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | |||
1481 | EXPORT_SYMBOL(dma_transfer); | ||
1482 | |||
1483 | /****************************************************************************/ | ||
1484 | /** | ||
1485 | * Set the callback function which will be called when a transfer completes. | ||
1486 | * If a NULL callback function is set, then no callback will occur. | ||
1487 | * | ||
1488 | * @note @a devHandler will be called from IRQ context. | ||
1489 | * | ||
1490 | * @return | ||
1491 | * 0 - Success | ||
1492 | * -ENODEV - Device handed in is invalid. | ||
1493 | */ | ||
1494 | /****************************************************************************/ | ||
1495 | |||
1496 | int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. */ | ||
1497 | DMA_DeviceHandler_t devHandler, /* Function to call when the DMA completes */ | ||
1498 | void *userData /* Pointer which will be passed to devHandler. */ | ||
1499 | ) { | ||
1500 | DMA_DeviceAttribute_t *devAttr; | ||
1501 | unsigned long flags; | ||
1502 | |||
1503 | if (!IsDeviceValid(dev)) { | ||
1504 | return -ENODEV; | ||
1505 | } | ||
1506 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
1507 | |||
1508 | local_irq_save(flags); | ||
1509 | |||
1510 | devAttr->userData = userData; | ||
1511 | devAttr->devHandler = devHandler; | ||
1512 | |||
1513 | local_irq_restore(flags); | ||
1514 | |||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | EXPORT_SYMBOL(dma_set_device_handler); | ||