diff options
Diffstat (limited to 'arch/arm/mach-bcmring/dma.c')
-rw-r--r-- | arch/arm/mach-bcmring/dma.c | 2329 |
1 files changed, 2329 insertions, 0 deletions
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c new file mode 100644 index 00000000000..0ca00050666 --- /dev/null +++ b/arch/arm/mach-bcmring/dma.c | |||
@@ -0,0 +1,2329 @@ | |||
1 | /***************************************************************************** | ||
2 | * Copyright 2004 - 2008 Broadcom Corporation. All rights reserved. | ||
3 | * | ||
4 | * Unless you and Broadcom execute a separate written software license | ||
5 | * agreement governing use of this software, this software is licensed to you | ||
6 | * under the terms of the GNU General Public License version 2, available at | ||
7 | * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). | ||
8 | * | ||
9 | * Notwithstanding the above, under no circumstances may you combine this | ||
10 | * software in any way with any other Broadcom software provided under a | ||
11 | * license other than the GPL, without Broadcom's express prior written | ||
12 | * consent. | ||
13 | *****************************************************************************/ | ||
14 | |||
15 | /****************************************************************************/ | ||
16 | /** | ||
17 | * @file dma.c | ||
18 | * | ||
19 | * @brief Implements the DMA interface. | ||
20 | */ | ||
21 | /****************************************************************************/ | ||
22 | |||
23 | /* ---- Include Files ---------------------------------------------------- */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/irqreturn.h> | ||
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/slab.h> | ||
32 | |||
33 | #include <mach/timer.h> | ||
34 | |||
35 | #include <linux/mm.h> | ||
36 | #include <linux/pfn.h> | ||
37 | #include <linux/atomic.h> | ||
38 | #include <mach/dma.h> | ||
39 | |||
40 | /* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ | ||
41 | /* especially since dc4 doesn't use kmalloc'd memory. */ | ||
42 | |||
43 | #define ALLOW_MAP_OF_KMALLOC_MEMORY 0 | ||
44 | |||
45 | /* ---- Public Variables ------------------------------------------------- */ | ||
46 | |||
47 | /* ---- Private Constants and Types -------------------------------------- */ | ||
48 | |||
49 | #define MAKE_HANDLE(controllerIdx, channelIdx) (((controllerIdx) << 4) | (channelIdx)) | ||
50 | |||
51 | #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) | ||
52 | #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) | ||
53 | |||
54 | #define DMA_MAP_DEBUG 0 | ||
55 | |||
56 | #if DMA_MAP_DEBUG | ||
57 | # define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args) | ||
58 | #else | ||
59 | # define DMA_MAP_PRINT(fmt, args...) | ||
60 | #endif | ||
61 | |||
62 | /* ---- Private Variables ------------------------------------------------ */ | ||
63 | |||
64 | static DMA_Global_t gDMA; | ||
65 | static struct proc_dir_entry *gDmaDir; | ||
66 | |||
67 | static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0); | ||
68 | static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0); | ||
69 | static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0); | ||
70 | static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); | ||
71 | |||
72 | #include "dma_device.c" | ||
73 | |||
74 | /* ---- Private Function Prototypes -------------------------------------- */ | ||
75 | |||
76 | /* ---- Functions ------------------------------------------------------- */ | ||
77 | |||
78 | /****************************************************************************/ | ||
79 | /** | ||
80 | * Displays information for /proc/dma/mem-type | ||
81 | */ | ||
82 | /****************************************************************************/ | ||
83 | |||
84 | static int dma_proc_read_mem_type(char *buf, char **start, off_t offset, | ||
85 | int count, int *eof, void *data) | ||
86 | { | ||
87 | int len = 0; | ||
88 | |||
89 | len += sprintf(buf + len, "dma_map_mem statistics\n"); | ||
90 | len += | ||
91 | sprintf(buf + len, "coherent: %d\n", | ||
92 | atomic_read(&gDmaStatMemTypeCoherent)); | ||
93 | len += | ||
94 | sprintf(buf + len, "kmalloc: %d\n", | ||
95 | atomic_read(&gDmaStatMemTypeKmalloc)); | ||
96 | len += | ||
97 | sprintf(buf + len, "vmalloc: %d\n", | ||
98 | atomic_read(&gDmaStatMemTypeVmalloc)); | ||
99 | len += | ||
100 | sprintf(buf + len, "user: %d\n", | ||
101 | atomic_read(&gDmaStatMemTypeUser)); | ||
102 | |||
103 | return len; | ||
104 | } | ||
105 | |||
106 | /****************************************************************************/ | ||
107 | /** | ||
108 | * Displays information for /proc/dma/channels | ||
109 | */ | ||
110 | /****************************************************************************/ | ||
111 | |||
112 | static int dma_proc_read_channels(char *buf, char **start, off_t offset, | ||
113 | int count, int *eof, void *data) | ||
114 | { | ||
115 | int controllerIdx; | ||
116 | int channelIdx; | ||
117 | int limit = count - 200; | ||
118 | int len = 0; | ||
119 | DMA_Channel_t *channel; | ||
120 | |||
121 | if (down_interruptible(&gDMA.lock) < 0) { | ||
122 | return -ERESTARTSYS; | ||
123 | } | ||
124 | |||
125 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
126 | controllerIdx++) { | ||
127 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
128 | channelIdx++) { | ||
129 | if (len >= limit) { | ||
130 | break; | ||
131 | } | ||
132 | |||
133 | channel = | ||
134 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
135 | |||
136 | len += | ||
137 | sprintf(buf + len, "%d:%d ", controllerIdx, | ||
138 | channelIdx); | ||
139 | |||
140 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != | ||
141 | 0) { | ||
142 | len += | ||
143 | sprintf(buf + len, "Dedicated for %s ", | ||
144 | DMA_gDeviceAttribute[channel-> | ||
145 | devType].name); | ||
146 | } else { | ||
147 | len += sprintf(buf + len, "Shared "); | ||
148 | } | ||
149 | |||
150 | if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) { | ||
151 | len += sprintf(buf + len, "No ISR "); | ||
152 | } | ||
153 | |||
154 | if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) { | ||
155 | len += sprintf(buf + len, "Fifo: 128 "); | ||
156 | } else { | ||
157 | len += sprintf(buf + len, "Fifo: 64 "); | ||
158 | } | ||
159 | |||
160 | if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { | ||
161 | len += | ||
162 | sprintf(buf + len, "InUse by %s", | ||
163 | DMA_gDeviceAttribute[channel-> | ||
164 | devType].name); | ||
165 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
166 | len += | ||
167 | sprintf(buf + len, " (%s:%d)", | ||
168 | channel->fileName, | ||
169 | channel->lineNum); | ||
170 | #endif | ||
171 | } else { | ||
172 | len += sprintf(buf + len, "Avail "); | ||
173 | } | ||
174 | |||
175 | if (channel->lastDevType != DMA_DEVICE_NONE) { | ||
176 | len += | ||
177 | sprintf(buf + len, "Last use: %s ", | ||
178 | DMA_gDeviceAttribute[channel-> | ||
179 | lastDevType]. | ||
180 | name); | ||
181 | } | ||
182 | |||
183 | len += sprintf(buf + len, "\n"); | ||
184 | } | ||
185 | } | ||
186 | up(&gDMA.lock); | ||
187 | *eof = 1; | ||
188 | |||
189 | return len; | ||
190 | } | ||
191 | |||
192 | /****************************************************************************/ | ||
193 | /** | ||
194 | * Displays information for /proc/dma/devices | ||
195 | */ | ||
196 | /****************************************************************************/ | ||
197 | |||
198 | static int dma_proc_read_devices(char *buf, char **start, off_t offset, | ||
199 | int count, int *eof, void *data) | ||
200 | { | ||
201 | int limit = count - 200; | ||
202 | int len = 0; | ||
203 | int devIdx; | ||
204 | |||
205 | if (down_interruptible(&gDMA.lock) < 0) { | ||
206 | return -ERESTARTSYS; | ||
207 | } | ||
208 | |||
209 | for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { | ||
210 | DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; | ||
211 | |||
212 | if (devAttr->name == NULL) { | ||
213 | continue; | ||
214 | } | ||
215 | |||
216 | if (len >= limit) { | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | len += sprintf(buf + len, "%-12s ", devAttr->name); | ||
221 | |||
222 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
223 | len += | ||
224 | sprintf(buf + len, "Dedicated %d:%d ", | ||
225 | devAttr->dedicatedController, | ||
226 | devAttr->dedicatedChannel); | ||
227 | } else { | ||
228 | len += sprintf(buf + len, "Shared DMA:"); | ||
229 | if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) { | ||
230 | len += sprintf(buf + len, "0"); | ||
231 | } | ||
232 | if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) { | ||
233 | len += sprintf(buf + len, "1"); | ||
234 | } | ||
235 | len += sprintf(buf + len, " "); | ||
236 | } | ||
237 | if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) { | ||
238 | len += sprintf(buf + len, "NoISR "); | ||
239 | } | ||
240 | if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) { | ||
241 | len += sprintf(buf + len, "Allow-128 "); | ||
242 | } | ||
243 | |||
244 | len += | ||
245 | sprintf(buf + len, | ||
246 | "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n", | ||
247 | devAttr->numTransfers, devAttr->transferTicks, | ||
248 | devAttr->transferBytes, | ||
249 | devAttr->ring.bytesAllocated); | ||
250 | |||
251 | } | ||
252 | |||
253 | up(&gDMA.lock); | ||
254 | *eof = 1; | ||
255 | |||
256 | return len; | ||
257 | } | ||
258 | |||
259 | /****************************************************************************/ | ||
260 | /** | ||
261 | * Determines if a DMA_Device_t is "valid". | ||
262 | * | ||
263 | * @return | ||
264 | * TRUE - dma device is valid | ||
265 | * FALSE - dma device isn't valid | ||
266 | */ | ||
267 | /****************************************************************************/ | ||
268 | |||
269 | static inline int IsDeviceValid(DMA_Device_t device) | ||
270 | { | ||
271 | return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES); | ||
272 | } | ||
273 | |||
274 | /****************************************************************************/ | ||
275 | /** | ||
276 | * Translates a DMA handle into a pointer to a channel. | ||
277 | * | ||
278 | * @return | ||
279 | * non-NULL - pointer to DMA_Channel_t | ||
280 | * NULL - DMA Handle was invalid | ||
281 | */ | ||
282 | /****************************************************************************/ | ||
283 | |||
284 | static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle) | ||
285 | { | ||
286 | int controllerIdx; | ||
287 | int channelIdx; | ||
288 | |||
289 | controllerIdx = CONTROLLER_FROM_HANDLE(handle); | ||
290 | channelIdx = CHANNEL_FROM_HANDLE(handle); | ||
291 | |||
292 | if ((controllerIdx > DMA_NUM_CONTROLLERS) | ||
293 | || (channelIdx > DMA_NUM_CHANNELS)) { | ||
294 | return NULL; | ||
295 | } | ||
296 | return &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
297 | } | ||
298 | |||
299 | /****************************************************************************/ | ||
300 | /** | ||
301 | * Interrupt handler which is called to process DMA interrupts. | ||
302 | */ | ||
303 | /****************************************************************************/ | ||
304 | |||
305 | static irqreturn_t dma_interrupt_handler(int irq, void *dev_id) | ||
306 | { | ||
307 | DMA_Channel_t *channel; | ||
308 | DMA_DeviceAttribute_t *devAttr; | ||
309 | int irqStatus; | ||
310 | |||
311 | channel = (DMA_Channel_t *) dev_id; | ||
312 | |||
313 | /* Figure out why we were called, and knock down the interrupt */ | ||
314 | |||
315 | irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle); | ||
316 | dmacHw_clearInterrupt(channel->dmacHwHandle); | ||
317 | |||
318 | if ((channel->devType < 0) | ||
319 | || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) { | ||
320 | printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n", | ||
321 | channel->devType); | ||
322 | return IRQ_NONE; | ||
323 | } | ||
324 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
325 | |||
326 | /* Update stats */ | ||
327 | |||
328 | if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) { | ||
329 | devAttr->transferTicks += | ||
330 | (timer_get_tick_count() - devAttr->transferStartTime); | ||
331 | } | ||
332 | |||
333 | if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) { | ||
334 | printk(KERN_ERR | ||
335 | "dma_interrupt_handler: devType :%d DMA error (%s)\n", | ||
336 | channel->devType, devAttr->name); | ||
337 | } else { | ||
338 | devAttr->numTransfers++; | ||
339 | devAttr->transferBytes += devAttr->numBytes; | ||
340 | } | ||
341 | |||
342 | /* Call any installed handler */ | ||
343 | |||
344 | if (devAttr->devHandler != NULL) { | ||
345 | devAttr->devHandler(channel->devType, irqStatus, | ||
346 | devAttr->userData); | ||
347 | } | ||
348 | |||
349 | return IRQ_HANDLED; | ||
350 | } | ||
351 | |||
352 | /****************************************************************************/ | ||
353 | /** | ||
354 | * Allocates memory to hold a descriptor ring. The descriptor ring then | ||
355 | * needs to be populated by making one or more calls to | ||
356 | * dna_add_descriptors. | ||
357 | * | ||
358 | * The returned descriptor ring will be automatically initialized. | ||
359 | * | ||
360 | * @return | ||
361 | * 0 Descriptor ring was allocated successfully | ||
362 | * -EINVAL Invalid parameters passed in | ||
363 | * -ENOMEM Unable to allocate memory for the desired number of descriptors. | ||
364 | */ | ||
365 | /****************************************************************************/ | ||
366 | |||
367 | int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to populate */ | ||
368 | int numDescriptors /* Number of descriptors that need to be allocated. */ | ||
369 | ) { | ||
370 | size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors); | ||
371 | |||
372 | if ((ring == NULL) || (numDescriptors <= 0)) { | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | |||
376 | ring->physAddr = 0; | ||
377 | ring->descriptorsAllocated = 0; | ||
378 | ring->bytesAllocated = 0; | ||
379 | |||
380 | ring->virtAddr = dma_alloc_writecombine(NULL, | ||
381 | bytesToAlloc, | ||
382 | &ring->physAddr, | ||
383 | GFP_KERNEL); | ||
384 | if (ring->virtAddr == NULL) { | ||
385 | return -ENOMEM; | ||
386 | } | ||
387 | |||
388 | ring->bytesAllocated = bytesToAlloc; | ||
389 | ring->descriptorsAllocated = numDescriptors; | ||
390 | |||
391 | return dma_init_descriptor_ring(ring, numDescriptors); | ||
392 | } | ||
393 | |||
394 | EXPORT_SYMBOL(dma_alloc_descriptor_ring); | ||
395 | |||
396 | /****************************************************************************/ | ||
397 | /** | ||
398 | * Releases the memory which was previously allocated for a descriptor ring. | ||
399 | */ | ||
400 | /****************************************************************************/ | ||
401 | |||
402 | void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring /* Descriptor to release */ | ||
403 | ) { | ||
404 | if (ring->virtAddr != NULL) { | ||
405 | dma_free_writecombine(NULL, | ||
406 | ring->bytesAllocated, | ||
407 | ring->virtAddr, ring->physAddr); | ||
408 | } | ||
409 | |||
410 | ring->bytesAllocated = 0; | ||
411 | ring->descriptorsAllocated = 0; | ||
412 | ring->virtAddr = NULL; | ||
413 | ring->physAddr = 0; | ||
414 | } | ||
415 | |||
416 | EXPORT_SYMBOL(dma_free_descriptor_ring); | ||
417 | |||
418 | /****************************************************************************/ | ||
419 | /** | ||
420 | * Initializes a descriptor ring, so that descriptors can be added to it. | ||
421 | * Once a descriptor ring has been allocated, it may be reinitialized for | ||
422 | * use with additional/different regions of memory. | ||
423 | * | ||
424 | * Note that if 7 descriptors are allocated, it's perfectly acceptable to | ||
425 | * initialize the ring with a smaller number of descriptors. The amount | ||
426 | * of memory allocated for the descriptor ring will not be reduced, and | ||
427 | * the descriptor ring may be reinitialized later | ||
428 | * | ||
429 | * @return | ||
430 | * 0 Descriptor ring was initialized successfully | ||
431 | * -ENOMEM The descriptor which was passed in has insufficient space | ||
432 | * to hold the desired number of descriptors. | ||
433 | */ | ||
434 | /****************************************************************************/ | ||
435 | |||
436 | int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring, /* Descriptor ring to initialize */ | ||
437 | int numDescriptors /* Number of descriptors to initialize. */ | ||
438 | ) { | ||
439 | if (ring->virtAddr == NULL) { | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | if (dmacHw_initDescriptor(ring->virtAddr, | ||
443 | ring->physAddr, | ||
444 | ring->bytesAllocated, numDescriptors) < 0) { | ||
445 | printk(KERN_ERR | ||
446 | "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n"); | ||
447 | return -ENOMEM; | ||
448 | } | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | EXPORT_SYMBOL(dma_init_descriptor_ring); | ||
454 | |||
455 | /****************************************************************************/ | ||
456 | /** | ||
457 | * Determines the number of descriptors which would be required for a | ||
458 | * transfer of the indicated memory region. | ||
459 | * | ||
460 | * This function also needs to know which DMA device this transfer will | ||
461 | * be destined for, so that the appropriate DMA configuration can be retrieved. | ||
462 | * DMA parameters such as transfer width, and whether this is a memory-to-memory | ||
463 | * or memory-to-peripheral, etc can all affect the actual number of descriptors | ||
464 | * required. | ||
465 | * | ||
466 | * @return | ||
467 | * > 0 Returns the number of descriptors required for the indicated transfer | ||
468 | * -ENODEV - Device handed in is invalid. | ||
469 | * -EINVAL Invalid parameters | ||
470 | * -ENOMEM Memory exhausted | ||
471 | */ | ||
472 | /****************************************************************************/ | ||
473 | |||
474 | int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */ | ||
475 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
476 | dma_addr_t dstData, /* Pointer to device data address */ | ||
477 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
478 | ) { | ||
479 | int numDescriptors; | ||
480 | DMA_DeviceAttribute_t *devAttr; | ||
481 | |||
482 | if (!IsDeviceValid(device)) { | ||
483 | return -ENODEV; | ||
484 | } | ||
485 | devAttr = &DMA_gDeviceAttribute[device]; | ||
486 | |||
487 | numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, | ||
488 | (void *)srcData, | ||
489 | (void *)dstData, | ||
490 | numBytes); | ||
491 | if (numDescriptors < 0) { | ||
492 | printk(KERN_ERR | ||
493 | "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n"); | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | |||
497 | return numDescriptors; | ||
498 | } | ||
499 | |||
500 | EXPORT_SYMBOL(dma_calculate_descriptor_count); | ||
501 | |||
502 | /****************************************************************************/ | ||
503 | /** | ||
504 | * Adds a region of memory to the descriptor ring. Note that it may take | ||
505 | * multiple descriptors for each region of memory. It is the callers | ||
506 | * responsibility to allocate a sufficiently large descriptor ring. | ||
507 | * | ||
508 | * @return | ||
509 | * 0 Descriptors were added successfully | ||
510 | * -ENODEV Device handed in is invalid. | ||
511 | * -EINVAL Invalid parameters | ||
512 | * -ENOMEM Memory exhausted | ||
513 | */ | ||
514 | /****************************************************************************/ | ||
515 | |||
516 | int dma_add_descriptors(DMA_DescriptorRing_t *ring, /* Descriptor ring to add descriptors to */ | ||
517 | DMA_Device_t device, /* DMA Device that descriptors are for */ | ||
518 | dma_addr_t srcData, /* Place to get data (memory or device) */ | ||
519 | dma_addr_t dstData, /* Place to put data (memory or device) */ | ||
520 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
521 | ) { | ||
522 | int rc; | ||
523 | DMA_DeviceAttribute_t *devAttr; | ||
524 | |||
525 | if (!IsDeviceValid(device)) { | ||
526 | return -ENODEV; | ||
527 | } | ||
528 | devAttr = &DMA_gDeviceAttribute[device]; | ||
529 | |||
530 | rc = dmacHw_setDataDescriptor(&devAttr->config, | ||
531 | ring->virtAddr, | ||
532 | (void *)srcData, | ||
533 | (void *)dstData, numBytes); | ||
534 | if (rc < 0) { | ||
535 | printk(KERN_ERR | ||
536 | "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n", | ||
537 | rc); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | |||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | EXPORT_SYMBOL(dma_add_descriptors); | ||
545 | |||
546 | /****************************************************************************/ | ||
547 | /** | ||
548 | * Sets the descriptor ring associated with a device. | ||
549 | * | ||
550 | * Once set, the descriptor ring will be associated with the device, even | ||
551 | * across channel request/free calls. Passing in a NULL descriptor ring | ||
552 | * will release any descriptor ring currently associated with the device. | ||
553 | * | ||
554 | * Note: If you call dma_transfer, or one of the other dma_alloc_ functions | ||
555 | * the descriptor ring may be released and reallocated. | ||
556 | * | ||
557 | * Note: This function will release the descriptor memory for any current | ||
558 | * descriptor ring associated with this device. | ||
559 | * | ||
560 | * @return | ||
561 | * 0 Descriptors were added successfully | ||
562 | * -ENODEV Device handed in is invalid. | ||
563 | */ | ||
564 | /****************************************************************************/ | ||
565 | |||
566 | int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */ | ||
567 | DMA_DescriptorRing_t *ring /* Descriptor ring to add descriptors to */ | ||
568 | ) { | ||
569 | DMA_DeviceAttribute_t *devAttr; | ||
570 | |||
571 | if (!IsDeviceValid(device)) { | ||
572 | return -ENODEV; | ||
573 | } | ||
574 | devAttr = &DMA_gDeviceAttribute[device]; | ||
575 | |||
576 | /* Free the previously allocated descriptor ring */ | ||
577 | |||
578 | dma_free_descriptor_ring(&devAttr->ring); | ||
579 | |||
580 | if (ring != NULL) { | ||
581 | /* Copy in the new one */ | ||
582 | |||
583 | devAttr->ring = *ring; | ||
584 | } | ||
585 | |||
586 | /* Set things up so that if dma_transfer is called then this descriptor */ | ||
587 | /* ring will get freed. */ | ||
588 | |||
589 | devAttr->prevSrcData = 0; | ||
590 | devAttr->prevDstData = 0; | ||
591 | devAttr->prevNumBytes = 0; | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | EXPORT_SYMBOL(dma_set_device_descriptor_ring); | ||
597 | |||
598 | /****************************************************************************/ | ||
599 | /** | ||
600 | * Retrieves the descriptor ring associated with a device. | ||
601 | * | ||
602 | * @return | ||
603 | * 0 Descriptors were added successfully | ||
604 | * -ENODEV Device handed in is invalid. | ||
605 | */ | ||
606 | /****************************************************************************/ | ||
607 | |||
608 | int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */ | ||
609 | DMA_DescriptorRing_t *ring /* Place to store retrieved ring */ | ||
610 | ) { | ||
611 | DMA_DeviceAttribute_t *devAttr; | ||
612 | |||
613 | memset(ring, 0, sizeof(*ring)); | ||
614 | |||
615 | if (!IsDeviceValid(device)) { | ||
616 | return -ENODEV; | ||
617 | } | ||
618 | devAttr = &DMA_gDeviceAttribute[device]; | ||
619 | |||
620 | *ring = devAttr->ring; | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | EXPORT_SYMBOL(dma_get_device_descriptor_ring); | ||
626 | |||
627 | /****************************************************************************/ | ||
628 | /** | ||
629 | * Configures a DMA channel. | ||
630 | * | ||
631 | * @return | ||
632 | * >= 0 - Initialization was successful. | ||
633 | * | ||
634 | * -EBUSY - Device is currently being used. | ||
635 | * -ENODEV - Device handed in is invalid. | ||
636 | */ | ||
637 | /****************************************************************************/ | ||
638 | |||
639 | static int ConfigChannel(DMA_Handle_t handle) | ||
640 | { | ||
641 | DMA_Channel_t *channel; | ||
642 | DMA_DeviceAttribute_t *devAttr; | ||
643 | int controllerIdx; | ||
644 | |||
645 | channel = HandleToChannel(handle); | ||
646 | if (channel == NULL) { | ||
647 | return -ENODEV; | ||
648 | } | ||
649 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
650 | controllerIdx = CONTROLLER_FROM_HANDLE(handle); | ||
651 | |||
652 | if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) { | ||
653 | if (devAttr->config.transferType == | ||
654 | dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) { | ||
655 | devAttr->config.dstPeripheralPort = | ||
656 | devAttr->dmacPort[controllerIdx]; | ||
657 | } else if (devAttr->config.transferType == | ||
658 | dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) { | ||
659 | devAttr->config.srcPeripheralPort = | ||
660 | devAttr->dmacPort[controllerIdx]; | ||
661 | } | ||
662 | } | ||
663 | |||
664 | if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) { | ||
665 | printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n"); | ||
666 | return -EIO; | ||
667 | } | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | /****************************************************************************/ | ||
673 | /** | ||
674 | * Initializes all of the data structures associated with the DMA. | ||
675 | * @return | ||
676 | * >= 0 - Initialization was successful. | ||
677 | * | ||
678 | * -EBUSY - Device is currently being used. | ||
679 | * -ENODEV - Device handed in is invalid. | ||
680 | */ | ||
681 | /****************************************************************************/ | ||
682 | |||
683 | int dma_init(void) | ||
684 | { | ||
685 | int rc = 0; | ||
686 | int controllerIdx; | ||
687 | int channelIdx; | ||
688 | DMA_Device_t devIdx; | ||
689 | DMA_Channel_t *channel; | ||
690 | DMA_Handle_t dedicatedHandle; | ||
691 | |||
692 | memset(&gDMA, 0, sizeof(gDMA)); | ||
693 | |||
694 | sema_init(&gDMA.lock, 0); | ||
695 | init_waitqueue_head(&gDMA.freeChannelQ); | ||
696 | |||
697 | /* Initialize the Hardware */ | ||
698 | |||
699 | dmacHw_initDma(); | ||
700 | |||
701 | /* Start off by marking all of the DMA channels as shared. */ | ||
702 | |||
703 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
704 | controllerIdx++) { | ||
705 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
706 | channelIdx++) { | ||
707 | channel = | ||
708 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
709 | |||
710 | channel->flags = 0; | ||
711 | channel->devType = DMA_DEVICE_NONE; | ||
712 | channel->lastDevType = DMA_DEVICE_NONE; | ||
713 | |||
714 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
715 | channel->fileName = ""; | ||
716 | channel->lineNum = 0; | ||
717 | #endif | ||
718 | |||
719 | channel->dmacHwHandle = | ||
720 | dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID | ||
721 | (controllerIdx, | ||
722 | channelIdx)); | ||
723 | dmacHw_initChannel(channel->dmacHwHandle); | ||
724 | } | ||
725 | } | ||
726 | |||
727 | /* Record any special attributes that channels may have */ | ||
728 | |||
729 | gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
730 | gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
731 | gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
732 | gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO; | ||
733 | |||
734 | /* Now walk through and record the dedicated channels. */ | ||
735 | |||
736 | for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) { | ||
737 | DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx]; | ||
738 | |||
739 | if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) | ||
740 | && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) { | ||
741 | printk(KERN_ERR | ||
742 | "DMA Device: %s Can only request NO_ISR for dedicated devices\n", | ||
743 | devAttr->name); | ||
744 | rc = -EINVAL; | ||
745 | goto out; | ||
746 | } | ||
747 | |||
748 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
749 | /* This is a dedicated device. Mark the channel as being reserved. */ | ||
750 | |||
751 | if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) { | ||
752 | printk(KERN_ERR | ||
753 | "DMA Device: %s DMA Controller %d is out of range\n", | ||
754 | devAttr->name, | ||
755 | devAttr->dedicatedController); | ||
756 | rc = -EINVAL; | ||
757 | goto out; | ||
758 | } | ||
759 | |||
760 | if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) { | ||
761 | printk(KERN_ERR | ||
762 | "DMA Device: %s DMA Channel %d is out of range\n", | ||
763 | devAttr->name, | ||
764 | devAttr->dedicatedChannel); | ||
765 | rc = -EINVAL; | ||
766 | goto out; | ||
767 | } | ||
768 | |||
769 | dedicatedHandle = | ||
770 | MAKE_HANDLE(devAttr->dedicatedController, | ||
771 | devAttr->dedicatedChannel); | ||
772 | channel = HandleToChannel(dedicatedHandle); | ||
773 | |||
774 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) != | ||
775 | 0) { | ||
776 | printk | ||
777 | ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n", | ||
778 | devAttr->name, | ||
779 | devAttr->dedicatedController, | ||
780 | devAttr->dedicatedChannel, | ||
781 | DMA_gDeviceAttribute[channel->devType]. | ||
782 | name); | ||
783 | rc = -EBUSY; | ||
784 | goto out; | ||
785 | } | ||
786 | |||
787 | channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED; | ||
788 | channel->devType = devIdx; | ||
789 | |||
790 | if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) { | ||
791 | channel->flags |= DMA_CHANNEL_FLAG_NO_ISR; | ||
792 | } | ||
793 | |||
794 | /* For dedicated channels, we can go ahead and configure the DMA channel now */ | ||
795 | /* as well. */ | ||
796 | |||
797 | ConfigChannel(dedicatedHandle); | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /* Go through and register the interrupt handlers */ | ||
802 | |||
803 | for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS; | ||
804 | controllerIdx++) { | ||
805 | for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS; | ||
806 | channelIdx++) { | ||
807 | channel = | ||
808 | &gDMA.controller[controllerIdx].channel[channelIdx]; | ||
809 | |||
810 | if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) { | ||
811 | snprintf(channel->name, sizeof(channel->name), | ||
812 | "dma %d:%d %s", controllerIdx, | ||
813 | channelIdx, | ||
814 | channel->devType == | ||
815 | DMA_DEVICE_NONE ? "" : | ||
816 | DMA_gDeviceAttribute[channel->devType]. | ||
817 | name); | ||
818 | |||
819 | rc = | ||
820 | request_irq(IRQ_DMA0C0 + | ||
821 | (controllerIdx * | ||
822 | DMA_NUM_CHANNELS) + | ||
823 | channelIdx, | ||
824 | dma_interrupt_handler, | ||
825 | IRQF_DISABLED, channel->name, | ||
826 | channel); | ||
827 | if (rc != 0) { | ||
828 | printk(KERN_ERR | ||
829 | "request_irq for IRQ_DMA%dC%d failed\n", | ||
830 | controllerIdx, channelIdx); | ||
831 | } | ||
832 | } | ||
833 | } | ||
834 | } | ||
835 | |||
836 | /* Create /proc/dma/channels and /proc/dma/devices */ | ||
837 | |||
838 | gDmaDir = proc_mkdir("dma", NULL); | ||
839 | |||
840 | if (gDmaDir == NULL) { | ||
841 | printk(KERN_ERR "Unable to create /proc/dma\n"); | ||
842 | } else { | ||
843 | create_proc_read_entry("channels", 0, gDmaDir, | ||
844 | dma_proc_read_channels, NULL); | ||
845 | create_proc_read_entry("devices", 0, gDmaDir, | ||
846 | dma_proc_read_devices, NULL); | ||
847 | create_proc_read_entry("mem-type", 0, gDmaDir, | ||
848 | dma_proc_read_mem_type, NULL); | ||
849 | } | ||
850 | |||
851 | out: | ||
852 | |||
853 | up(&gDMA.lock); | ||
854 | |||
855 | return rc; | ||
856 | } | ||
857 | |||
858 | /****************************************************************************/ | ||
859 | /** | ||
860 | * Reserves a channel for use with @a dev. If the device is setup to use | ||
861 | * a shared channel, then this function will block until a free channel | ||
862 | * becomes available. | ||
863 | * | ||
864 | * @return | ||
865 | * >= 0 - A valid DMA Handle. | ||
866 | * -EBUSY - Device is currently being used. | ||
867 | * -ENODEV - Device handed in is invalid. | ||
868 | */ | ||
869 | /****************************************************************************/ | ||
870 | |||
871 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
872 | DMA_Handle_t dma_request_channel_dbg | ||
873 | (DMA_Device_t dev, const char *fileName, int lineNum) | ||
874 | #else | ||
875 | DMA_Handle_t dma_request_channel(DMA_Device_t dev) | ||
876 | #endif | ||
877 | { | ||
878 | DMA_Handle_t handle; | ||
879 | DMA_DeviceAttribute_t *devAttr; | ||
880 | DMA_Channel_t *channel; | ||
881 | int controllerIdx; | ||
882 | int controllerIdx2; | ||
883 | int channelIdx; | ||
884 | |||
885 | if (down_interruptible(&gDMA.lock) < 0) { | ||
886 | return -ERESTARTSYS; | ||
887 | } | ||
888 | |||
889 | if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) { | ||
890 | handle = -ENODEV; | ||
891 | goto out; | ||
892 | } | ||
893 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
894 | |||
895 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
896 | { | ||
897 | char *s; | ||
898 | |||
899 | s = strrchr(fileName, '/'); | ||
900 | if (s != NULL) { | ||
901 | fileName = s + 1; | ||
902 | } | ||
903 | } | ||
904 | #endif | ||
905 | if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) { | ||
906 | /* This device has already been requested and not been freed */ | ||
907 | |||
908 | printk(KERN_ERR "%s: device %s is already requested\n", | ||
909 | __func__, devAttr->name); | ||
910 | handle = -EBUSY; | ||
911 | goto out; | ||
912 | } | ||
913 | |||
914 | if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) { | ||
915 | /* This device has a dedicated channel. */ | ||
916 | |||
917 | channel = | ||
918 | &gDMA.controller[devAttr->dedicatedController]. | ||
919 | channel[devAttr->dedicatedChannel]; | ||
920 | if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) { | ||
921 | handle = -EBUSY; | ||
922 | goto out; | ||
923 | } | ||
924 | |||
925 | channel->flags |= DMA_CHANNEL_FLAG_IN_USE; | ||
926 | devAttr->flags |= DMA_DEVICE_FLAG_IN_USE; | ||
927 | |||
928 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
929 | channel->fileName = fileName; | ||
930 | channel->lineNum = lineNum; | ||
931 | #endif | ||
932 | handle = | ||
933 | MAKE_HANDLE(devAttr->dedicatedController, | ||
934 | devAttr->dedicatedChannel); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | /* This device needs to use one of the shared channels. */ | ||
939 | |||
940 | handle = DMA_INVALID_HANDLE; | ||
941 | while (handle == DMA_INVALID_HANDLE) { | ||
942 | /* Scan through the shared channels and see if one is available */ | ||
943 | |||
944 | for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS; | ||
945 | controllerIdx2++) { | ||
946 | /* Check to see if we should try on controller 1 first. */ | ||
947 | |||
948 | controllerIdx = controllerIdx2; | ||
949 | if ((devAttr-> | ||
950 | flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) { | ||
951 | controllerIdx = 1 - controllerIdx; | ||
952 | } | ||
953 | |||
954 | /* See if the device is available on the controller being tested */ | ||
955 | |||
956 | if ((devAttr-> | ||
957 | flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx)) | ||
958 | != 0) { | ||
959 | for (channelIdx = 0; | ||
960 | channelIdx < DMA_NUM_CHANNELS; | ||
961 | channelIdx++) { | ||
962 | channel = | ||
963 | &gDMA.controller[controllerIdx]. | ||
964 | channel[channelIdx]; | ||
965 | |||
966 | if (((channel-> | ||
967 | flags & | ||
968 | DMA_CHANNEL_FLAG_IS_DEDICATED) == | ||
969 | 0) | ||
970 | && | ||
971 | ((channel-> | ||
972 | flags & DMA_CHANNEL_FLAG_IN_USE) | ||
973 | == 0)) { | ||
974 | if (((channel-> | ||
975 | flags & | ||
976 | DMA_CHANNEL_FLAG_LARGE_FIFO) | ||
977 | != 0) | ||
978 | && | ||
979 | ((devAttr-> | ||
980 | flags & | ||
981 | DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) | ||
982 | == 0)) { | ||
983 | /* This channel is a large fifo - don't tie it up */ | ||
984 | /* with devices that we don't want using it. */ | ||
985 | |||
986 | continue; | ||
987 | } | ||
988 | |||
989 | channel->flags |= | ||
990 | DMA_CHANNEL_FLAG_IN_USE; | ||
991 | channel->devType = dev; | ||
992 | devAttr->flags |= | ||
993 | DMA_DEVICE_FLAG_IN_USE; | ||
994 | |||
995 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
996 | channel->fileName = fileName; | ||
997 | channel->lineNum = lineNum; | ||
998 | #endif | ||
999 | handle = | ||
1000 | MAKE_HANDLE(controllerIdx, | ||
1001 | channelIdx); | ||
1002 | |||
1003 | /* Now that we've reserved the channel - we can go ahead and configure it */ | ||
1004 | |||
1005 | if (ConfigChannel(handle) != 0) { | ||
1006 | handle = -EIO; | ||
1007 | printk(KERN_ERR | ||
1008 | "dma_request_channel: ConfigChannel failed\n"); | ||
1009 | } | ||
1010 | goto out; | ||
1011 | } | ||
1012 | } | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | /* No channels are currently available. Let's wait for one to free up. */ | ||
1017 | |||
1018 | { | ||
1019 | DEFINE_WAIT(wait); | ||
1020 | |||
1021 | prepare_to_wait(&gDMA.freeChannelQ, &wait, | ||
1022 | TASK_INTERRUPTIBLE); | ||
1023 | up(&gDMA.lock); | ||
1024 | schedule(); | ||
1025 | finish_wait(&gDMA.freeChannelQ, &wait); | ||
1026 | |||
1027 | if (signal_pending(current)) { | ||
1028 | /* We don't currently hold gDMA.lock, so we return directly */ | ||
1029 | |||
1030 | return -ERESTARTSYS; | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | if (down_interruptible(&gDMA.lock)) { | ||
1035 | return -ERESTARTSYS; | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | out: | ||
1040 | up(&gDMA.lock); | ||
1041 | |||
1042 | return handle; | ||
1043 | } | ||
1044 | |||
1045 | /* Create both _dbg and non _dbg functions for modules. */ | ||
1046 | |||
1047 | #if (DMA_DEBUG_TRACK_RESERVATION) | ||
1048 | #undef dma_request_channel | ||
1049 | DMA_Handle_t dma_request_channel(DMA_Device_t dev) | ||
1050 | { | ||
1051 | return dma_request_channel_dbg(dev, __FILE__, __LINE__); | ||
1052 | } | ||
1053 | |||
1054 | EXPORT_SYMBOL(dma_request_channel_dbg); | ||
1055 | #endif | ||
1056 | EXPORT_SYMBOL(dma_request_channel); | ||
1057 | |||
1058 | /****************************************************************************/ | ||
1059 | /** | ||
1060 | * Frees a previously allocated DMA Handle. | ||
1061 | */ | ||
1062 | /****************************************************************************/ | ||
1063 | |||
1064 | int dma_free_channel(DMA_Handle_t handle /* DMA handle. */ | ||
1065 | ) { | ||
1066 | int rc = 0; | ||
1067 | DMA_Channel_t *channel; | ||
1068 | DMA_DeviceAttribute_t *devAttr; | ||
1069 | |||
1070 | if (down_interruptible(&gDMA.lock) < 0) { | ||
1071 | return -ERESTARTSYS; | ||
1072 | } | ||
1073 | |||
1074 | channel = HandleToChannel(handle); | ||
1075 | if (channel == NULL) { | ||
1076 | rc = -EINVAL; | ||
1077 | goto out; | ||
1078 | } | ||
1079 | |||
1080 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1081 | |||
1082 | if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) { | ||
1083 | channel->lastDevType = channel->devType; | ||
1084 | channel->devType = DMA_DEVICE_NONE; | ||
1085 | } | ||
1086 | channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE; | ||
1087 | devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE; | ||
1088 | |||
1089 | out: | ||
1090 | up(&gDMA.lock); | ||
1091 | |||
1092 | wake_up_interruptible(&gDMA.freeChannelQ); | ||
1093 | |||
1094 | return rc; | ||
1095 | } | ||
1096 | |||
1097 | EXPORT_SYMBOL(dma_free_channel); | ||
1098 | |||
1099 | /****************************************************************************/ | ||
1100 | /** | ||
1101 | * Determines if a given device has been configured as using a shared | ||
1102 | * channel. | ||
1103 | * | ||
1104 | * @return | ||
1105 | * 0 Device uses a dedicated channel | ||
1106 | * > zero Device uses a shared channel | ||
1107 | * < zero Error code | ||
1108 | */ | ||
1109 | /****************************************************************************/ | ||
1110 | |||
1111 | int dma_device_is_channel_shared(DMA_Device_t device /* Device to check. */ | ||
1112 | ) { | ||
1113 | DMA_DeviceAttribute_t *devAttr; | ||
1114 | |||
1115 | if (!IsDeviceValid(device)) { | ||
1116 | return -ENODEV; | ||
1117 | } | ||
1118 | devAttr = &DMA_gDeviceAttribute[device]; | ||
1119 | |||
1120 | return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0); | ||
1121 | } | ||
1122 | |||
1123 | EXPORT_SYMBOL(dma_device_is_channel_shared); | ||
1124 | |||
1125 | /****************************************************************************/ | ||
1126 | /** | ||
1127 | * Allocates buffers for the descriptors. This is normally done automatically | ||
1128 | * but needs to be done explicitly when initiating a dma from interrupt | ||
1129 | * context. | ||
1130 | * | ||
1131 | * @return | ||
1132 | * 0 Descriptors were allocated successfully | ||
1133 | * -EINVAL Invalid device type for this kind of transfer | ||
1134 | * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) | ||
1135 | * -ENOMEM Memory exhausted | ||
1136 | */ | ||
1137 | /****************************************************************************/ | ||
1138 | |||
1139 | int dma_alloc_descriptors(DMA_Handle_t handle, /* DMA Handle */ | ||
1140 | dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ | ||
1141 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
1142 | dma_addr_t dstData, /* Pointer to device data address */ | ||
1143 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
1144 | ) { | ||
1145 | DMA_Channel_t *channel; | ||
1146 | DMA_DeviceAttribute_t *devAttr; | ||
1147 | int numDescriptors; | ||
1148 | size_t ringBytesRequired; | ||
1149 | int rc = 0; | ||
1150 | |||
1151 | channel = HandleToChannel(handle); | ||
1152 | if (channel == NULL) { | ||
1153 | return -ENODEV; | ||
1154 | } | ||
1155 | |||
1156 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1157 | |||
1158 | if (devAttr->config.transferType != transferType) { | ||
1159 | return -EINVAL; | ||
1160 | } | ||
1161 | |||
1162 | /* Figure out how many descriptors we need. */ | ||
1163 | |||
1164 | /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ | ||
1165 | /* srcData, dstData, numBytes); */ | ||
1166 | |||
1167 | numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config, | ||
1168 | (void *)srcData, | ||
1169 | (void *)dstData, | ||
1170 | numBytes); | ||
1171 | if (numDescriptors < 0) { | ||
1172 | printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n", | ||
1173 | __func__); | ||
1174 | return -EINVAL; | ||
1175 | } | ||
1176 | |||
1177 | /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ | ||
1178 | /* a new one. */ | ||
1179 | |||
1180 | ringBytesRequired = dmacHw_descriptorLen(numDescriptors); | ||
1181 | |||
1182 | /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ | ||
1183 | |||
1184 | if (ringBytesRequired > devAttr->ring.bytesAllocated) { | ||
1185 | /* Make sure that this code path is never taken from interrupt context. */ | ||
1186 | /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ | ||
1187 | /* allocation needs to have already been done. */ | ||
1188 | |||
1189 | might_sleep(); | ||
1190 | |||
1191 | /* Free the old descriptor ring and allocate a new one. */ | ||
1192 | |||
1193 | dma_free_descriptor_ring(&devAttr->ring); | ||
1194 | |||
1195 | /* And allocate a new one. */ | ||
1196 | |||
1197 | rc = | ||
1198 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
1199 | numDescriptors); | ||
1200 | if (rc < 0) { | ||
1201 | printk(KERN_ERR | ||
1202 | "%s: dma_alloc_descriptor_ring(%d) failed\n", | ||
1203 | __func__, numDescriptors); | ||
1204 | return rc; | ||
1205 | } | ||
1206 | /* Setup the descriptor for this transfer */ | ||
1207 | |||
1208 | if (dmacHw_initDescriptor(devAttr->ring.virtAddr, | ||
1209 | devAttr->ring.physAddr, | ||
1210 | devAttr->ring.bytesAllocated, | ||
1211 | numDescriptors) < 0) { | ||
1212 | printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", | ||
1213 | __func__); | ||
1214 | return -EINVAL; | ||
1215 | } | ||
1216 | } else { | ||
1217 | /* We've already got enough ring buffer allocated. All we need to do is reset */ | ||
1218 | /* any control information, just in case the previous DMA was stopped. */ | ||
1219 | |||
1220 | dmacHw_resetDescriptorControl(devAttr->ring.virtAddr); | ||
1221 | } | ||
1222 | |||
1223 | /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ | ||
1224 | /* as last time, then we don't need to call setDataDescriptor again. */ | ||
1225 | |||
1226 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1227 | devAttr->ring.virtAddr, | ||
1228 | (void *)srcData, | ||
1229 | (void *)dstData, numBytes) < 0) { | ||
1230 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n", | ||
1231 | __func__); | ||
1232 | return -EINVAL; | ||
1233 | } | ||
1234 | |||
1235 | /* Remember the critical information for this transfer so that we can eliminate */ | ||
1236 | /* another call to dma_alloc_descriptors if the caller reuses the same buffers */ | ||
1237 | |||
1238 | devAttr->prevSrcData = srcData; | ||
1239 | devAttr->prevDstData = dstData; | ||
1240 | devAttr->prevNumBytes = numBytes; | ||
1241 | |||
1242 | return 0; | ||
1243 | } | ||
1244 | |||
1245 | EXPORT_SYMBOL(dma_alloc_descriptors); | ||
1246 | |||
1247 | /****************************************************************************/ | ||
1248 | /** | ||
1249 | * Allocates and sets up descriptors for a double buffered circular buffer. | ||
1250 | * | ||
1251 | * This is primarily intended to be used for things like the ingress samples | ||
1252 | * from a microphone. | ||
1253 | * | ||
1254 | * @return | ||
1255 | * > 0 Number of descriptors actually allocated. | ||
1256 | * -EINVAL Invalid device type for this kind of transfer | ||
1257 | * (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM) | ||
1258 | * -ENOMEM Memory exhausted | ||
1259 | */ | ||
1260 | /****************************************************************************/ | ||
1261 | |||
1262 | int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */ | ||
1263 | dma_addr_t srcData, /* Physical address of source data */ | ||
1264 | dma_addr_t dstData1, /* Physical address of first destination buffer */ | ||
1265 | dma_addr_t dstData2, /* Physical address of second destination buffer */ | ||
1266 | size_t numBytes /* Number of bytes in each destination buffer */ | ||
1267 | ) { | ||
1268 | DMA_Channel_t *channel; | ||
1269 | DMA_DeviceAttribute_t *devAttr; | ||
1270 | int numDst1Descriptors; | ||
1271 | int numDst2Descriptors; | ||
1272 | int numDescriptors; | ||
1273 | size_t ringBytesRequired; | ||
1274 | int rc = 0; | ||
1275 | |||
1276 | channel = HandleToChannel(handle); | ||
1277 | if (channel == NULL) { | ||
1278 | return -ENODEV; | ||
1279 | } | ||
1280 | |||
1281 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1282 | |||
1283 | /* Figure out how many descriptors we need. */ | ||
1284 | |||
1285 | /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */ | ||
1286 | /* srcData, dstData, numBytes); */ | ||
1287 | |||
1288 | numDst1Descriptors = | ||
1289 | dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, | ||
1290 | (void *)dstData1, numBytes); | ||
1291 | if (numDst1Descriptors < 0) { | ||
1292 | return -EINVAL; | ||
1293 | } | ||
1294 | numDst2Descriptors = | ||
1295 | dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData, | ||
1296 | (void *)dstData2, numBytes); | ||
1297 | if (numDst2Descriptors < 0) { | ||
1298 | return -EINVAL; | ||
1299 | } | ||
1300 | numDescriptors = numDst1Descriptors + numDst2Descriptors; | ||
1301 | /* printk("numDescriptors: %d\n", numDescriptors); */ | ||
1302 | |||
1303 | /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */ | ||
1304 | /* a new one. */ | ||
1305 | |||
1306 | ringBytesRequired = dmacHw_descriptorLen(numDescriptors); | ||
1307 | |||
1308 | /* printk("ringBytesRequired: %d\n", ringBytesRequired); */ | ||
1309 | |||
1310 | if (ringBytesRequired > devAttr->ring.bytesAllocated) { | ||
1311 | /* Make sure that this code path is never taken from interrupt context. */ | ||
1312 | /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */ | ||
1313 | /* allocation needs to have already been done. */ | ||
1314 | |||
1315 | might_sleep(); | ||
1316 | |||
1317 | /* Free the old descriptor ring and allocate a new one. */ | ||
1318 | |||
1319 | dma_free_descriptor_ring(&devAttr->ring); | ||
1320 | |||
1321 | /* And allocate a new one. */ | ||
1322 | |||
1323 | rc = | ||
1324 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
1325 | numDescriptors); | ||
1326 | if (rc < 0) { | ||
1327 | printk(KERN_ERR | ||
1328 | "%s: dma_alloc_descriptor_ring(%d) failed\n", | ||
1329 | __func__, ringBytesRequired); | ||
1330 | return rc; | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | /* Setup the descriptor for this transfer. Since this function is used with */ | ||
1335 | /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */ | ||
1336 | /* setDataDescriptor will keep trying to append onto the end. */ | ||
1337 | |||
1338 | if (dmacHw_initDescriptor(devAttr->ring.virtAddr, | ||
1339 | devAttr->ring.physAddr, | ||
1340 | devAttr->ring.bytesAllocated, | ||
1341 | numDescriptors) < 0) { | ||
1342 | printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__); | ||
1343 | return -EINVAL; | ||
1344 | } | ||
1345 | |||
1346 | /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */ | ||
1347 | /* as last time, then we don't need to call setDataDescriptor again. */ | ||
1348 | |||
1349 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1350 | devAttr->ring.virtAddr, | ||
1351 | (void *)srcData, | ||
1352 | (void *)dstData1, numBytes) < 0) { | ||
1353 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n", | ||
1354 | __func__); | ||
1355 | return -EINVAL; | ||
1356 | } | ||
1357 | if (dmacHw_setDataDescriptor(&devAttr->config, | ||
1358 | devAttr->ring.virtAddr, | ||
1359 | (void *)srcData, | ||
1360 | (void *)dstData2, numBytes) < 0) { | ||
1361 | printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n", | ||
1362 | __func__); | ||
1363 | return -EINVAL; | ||
1364 | } | ||
1365 | |||
1366 | /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */ | ||
1367 | /* try to make the 'prev' variables right. */ | ||
1368 | |||
1369 | devAttr->prevSrcData = 0; | ||
1370 | devAttr->prevDstData = 0; | ||
1371 | devAttr->prevNumBytes = 0; | ||
1372 | |||
1373 | return numDescriptors; | ||
1374 | } | ||
1375 | |||
1376 | EXPORT_SYMBOL(dma_alloc_double_dst_descriptors); | ||
1377 | |||
1378 | /****************************************************************************/ | ||
1379 | /** | ||
1380 | * Initiates a transfer when the descriptors have already been setup. | ||
1381 | * | ||
1382 | * This is a special case, and normally, the dma_transfer_xxx functions should | ||
1383 | * be used. | ||
1384 | * | ||
1385 | * @return | ||
1386 | * 0 Transfer was started successfully | ||
1387 | * -ENODEV Invalid handle | ||
1388 | */ | ||
1389 | /****************************************************************************/ | ||
1390 | |||
1391 | int dma_start_transfer(DMA_Handle_t handle) | ||
1392 | { | ||
1393 | DMA_Channel_t *channel; | ||
1394 | DMA_DeviceAttribute_t *devAttr; | ||
1395 | |||
1396 | channel = HandleToChannel(handle); | ||
1397 | if (channel == NULL) { | ||
1398 | return -ENODEV; | ||
1399 | } | ||
1400 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1401 | |||
1402 | dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, | ||
1403 | devAttr->ring.virtAddr); | ||
1404 | |||
1405 | /* Since we got this far, everything went successfully */ | ||
1406 | |||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | EXPORT_SYMBOL(dma_start_transfer); | ||
1411 | |||
1412 | /****************************************************************************/ | ||
1413 | /** | ||
1414 | * Stops a previously started DMA transfer. | ||
1415 | * | ||
1416 | * @return | ||
1417 | * 0 Transfer was stopped successfully | ||
1418 | * -ENODEV Invalid handle | ||
1419 | */ | ||
1420 | /****************************************************************************/ | ||
1421 | |||
1422 | int dma_stop_transfer(DMA_Handle_t handle) | ||
1423 | { | ||
1424 | DMA_Channel_t *channel; | ||
1425 | |||
1426 | channel = HandleToChannel(handle); | ||
1427 | if (channel == NULL) { | ||
1428 | return -ENODEV; | ||
1429 | } | ||
1430 | |||
1431 | dmacHw_stopTransfer(channel->dmacHwHandle); | ||
1432 | |||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | EXPORT_SYMBOL(dma_stop_transfer); | ||
1437 | |||
1438 | /****************************************************************************/ | ||
1439 | /** | ||
1440 | * Waits for a DMA to complete by polling. This function is only intended | ||
1441 | * to be used for testing. Interrupts should be used for most DMA operations. | ||
1442 | */ | ||
1443 | /****************************************************************************/ | ||
1444 | |||
1445 | int dma_wait_transfer_done(DMA_Handle_t handle) | ||
1446 | { | ||
1447 | DMA_Channel_t *channel; | ||
1448 | dmacHw_TRANSFER_STATUS_e status; | ||
1449 | |||
1450 | channel = HandleToChannel(handle); | ||
1451 | if (channel == NULL) { | ||
1452 | return -ENODEV; | ||
1453 | } | ||
1454 | |||
1455 | while ((status = | ||
1456 | dmacHw_transferCompleted(channel->dmacHwHandle)) == | ||
1457 | dmacHw_TRANSFER_STATUS_BUSY) { | ||
1458 | ; | ||
1459 | } | ||
1460 | |||
1461 | if (status == dmacHw_TRANSFER_STATUS_ERROR) { | ||
1462 | printk(KERN_ERR "%s: DMA transfer failed\n", __func__); | ||
1463 | return -EIO; | ||
1464 | } | ||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | EXPORT_SYMBOL(dma_wait_transfer_done); | ||
1469 | |||
1470 | /****************************************************************************/ | ||
1471 | /** | ||
1472 | * Initiates a DMA, allocating the descriptors as required. | ||
1473 | * | ||
1474 | * @return | ||
1475 | * 0 Transfer was started successfully | ||
1476 | * -EINVAL Invalid device type for this kind of transfer | ||
1477 | * (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV) | ||
1478 | */ | ||
1479 | /****************************************************************************/ | ||
1480 | |||
1481 | int dma_transfer(DMA_Handle_t handle, /* DMA Handle */ | ||
1482 | dmacHw_TRANSFER_TYPE_e transferType, /* Type of transfer being performed */ | ||
1483 | dma_addr_t srcData, /* Place to get data to write to device */ | ||
1484 | dma_addr_t dstData, /* Pointer to device data address */ | ||
1485 | size_t numBytes /* Number of bytes to transfer to the device */ | ||
1486 | ) { | ||
1487 | DMA_Channel_t *channel; | ||
1488 | DMA_DeviceAttribute_t *devAttr; | ||
1489 | int rc = 0; | ||
1490 | |||
1491 | channel = HandleToChannel(handle); | ||
1492 | if (channel == NULL) { | ||
1493 | return -ENODEV; | ||
1494 | } | ||
1495 | |||
1496 | devAttr = &DMA_gDeviceAttribute[channel->devType]; | ||
1497 | |||
1498 | if (devAttr->config.transferType != transferType) { | ||
1499 | return -EINVAL; | ||
1500 | } | ||
1501 | |||
1502 | /* We keep track of the information about the previous request for this */ | ||
1503 | /* device, and if the attributes match, then we can use the descriptors we setup */ | ||
1504 | /* the last time, and not have to reinitialize everything. */ | ||
1505 | |||
1506 | { | ||
1507 | rc = | ||
1508 | dma_alloc_descriptors(handle, transferType, srcData, | ||
1509 | dstData, numBytes); | ||
1510 | if (rc != 0) { | ||
1511 | return rc; | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | /* And kick off the transfer */ | ||
1516 | |||
1517 | devAttr->numBytes = numBytes; | ||
1518 | devAttr->transferStartTime = timer_get_tick_count(); | ||
1519 | |||
1520 | dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config, | ||
1521 | devAttr->ring.virtAddr); | ||
1522 | |||
1523 | /* Since we got this far, everything went successfully */ | ||
1524 | |||
1525 | return 0; | ||
1526 | } | ||
1527 | |||
1528 | EXPORT_SYMBOL(dma_transfer); | ||
1529 | |||
1530 | /****************************************************************************/ | ||
1531 | /** | ||
1532 | * Set the callback function which will be called when a transfer completes. | ||
1533 | * If a NULL callback function is set, then no callback will occur. | ||
1534 | * | ||
1535 | * @note @a devHandler will be called from IRQ context. | ||
1536 | * | ||
1537 | * @return | ||
1538 | * 0 - Success | ||
1539 | * -ENODEV - Device handed in is invalid. | ||
1540 | */ | ||
1541 | /****************************************************************************/ | ||
1542 | |||
1543 | int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. */ | ||
1544 | DMA_DeviceHandler_t devHandler, /* Function to call when the DMA completes */ | ||
1545 | void *userData /* Pointer which will be passed to devHandler. */ | ||
1546 | ) { | ||
1547 | DMA_DeviceAttribute_t *devAttr; | ||
1548 | unsigned long flags; | ||
1549 | |||
1550 | if (!IsDeviceValid(dev)) { | ||
1551 | return -ENODEV; | ||
1552 | } | ||
1553 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
1554 | |||
1555 | local_irq_save(flags); | ||
1556 | |||
1557 | devAttr->userData = userData; | ||
1558 | devAttr->devHandler = devHandler; | ||
1559 | |||
1560 | local_irq_restore(flags); | ||
1561 | |||
1562 | return 0; | ||
1563 | } | ||
1564 | |||
1565 | EXPORT_SYMBOL(dma_set_device_handler); | ||
1566 | |||
1567 | /****************************************************************************/ | ||
1568 | /** | ||
1569 | * Initializes a memory mapping structure | ||
1570 | */ | ||
1571 | /****************************************************************************/ | ||
1572 | |||
1573 | int dma_init_mem_map(DMA_MemMap_t *memMap) | ||
1574 | { | ||
1575 | memset(memMap, 0, sizeof(*memMap)); | ||
1576 | |||
1577 | sema_init(&memMap->lock, 1); | ||
1578 | |||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | EXPORT_SYMBOL(dma_init_mem_map); | ||
1583 | |||
1584 | /****************************************************************************/ | ||
1585 | /** | ||
1586 | * Releases any memory currently being held by a memory mapping structure. | ||
1587 | */ | ||
1588 | /****************************************************************************/ | ||
1589 | |||
1590 | int dma_term_mem_map(DMA_MemMap_t *memMap) | ||
1591 | { | ||
1592 | down(&memMap->lock); /* Just being paranoid */ | ||
1593 | |||
1594 | /* Free up any allocated memory */ | ||
1595 | |||
1596 | up(&memMap->lock); | ||
1597 | memset(memMap, 0, sizeof(*memMap)); | ||
1598 | |||
1599 | return 0; | ||
1600 | } | ||
1601 | |||
1602 | EXPORT_SYMBOL(dma_term_mem_map); | ||
1603 | |||
1604 | /****************************************************************************/ | ||
1605 | /** | ||
1606 | * Looks at a memory address and categorizes it. | ||
1607 | * | ||
1608 | * @return One of the values from the DMA_MemType_t enumeration. | ||
1609 | */ | ||
1610 | /****************************************************************************/ | ||
1611 | |||
1612 | DMA_MemType_t dma_mem_type(void *addr) | ||
1613 | { | ||
1614 | unsigned long addrVal = (unsigned long)addr; | ||
1615 | |||
1616 | if (addrVal >= VMALLOC_END) { | ||
1617 | /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */ | ||
1618 | |||
1619 | /* dma_alloc_xxx pages are physically and virtually contiguous */ | ||
1620 | |||
1621 | return DMA_MEM_TYPE_DMA; | ||
1622 | } | ||
1623 | |||
1624 | /* Technically, we could add one more classification. Addresses between VMALLOC_END */ | ||
1625 | /* and the beginning of the DMA virtual address could be considered to be I/O space. */ | ||
1626 | /* Right now, nobody cares about this particular classification, so we ignore it. */ | ||
1627 | |||
1628 | if (is_vmalloc_addr(addr)) { | ||
1629 | /* Address comes from the vmalloc'd region. Pages are virtually */ | ||
1630 | /* contiguous but NOT physically contiguous */ | ||
1631 | |||
1632 | return DMA_MEM_TYPE_VMALLOC; | ||
1633 | } | ||
1634 | |||
1635 | if (addrVal >= PAGE_OFFSET) { | ||
1636 | /* PAGE_OFFSET is typically 0xC0000000 */ | ||
1637 | |||
1638 | /* kmalloc'd pages are physically contiguous */ | ||
1639 | |||
1640 | return DMA_MEM_TYPE_KMALLOC; | ||
1641 | } | ||
1642 | |||
1643 | return DMA_MEM_TYPE_USER; | ||
1644 | } | ||
1645 | |||
1646 | EXPORT_SYMBOL(dma_mem_type); | ||
1647 | |||
1648 | /****************************************************************************/ | ||
1649 | /** | ||
1650 | * Looks at a memory address and determines if we support DMA'ing to/from | ||
1651 | * that type of memory. | ||
1652 | * | ||
1653 | * @return boolean - | ||
1654 | * return value != 0 means dma supported | ||
1655 | * return value == 0 means dma not supported | ||
1656 | */ | ||
1657 | /****************************************************************************/ | ||
1658 | |||
1659 | int dma_mem_supports_dma(void *addr) | ||
1660 | { | ||
1661 | DMA_MemType_t memType = dma_mem_type(addr); | ||
1662 | |||
1663 | return (memType == DMA_MEM_TYPE_DMA) | ||
1664 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
1665 | || (memType == DMA_MEM_TYPE_KMALLOC) | ||
1666 | #endif | ||
1667 | || (memType == DMA_MEM_TYPE_USER); | ||
1668 | } | ||
1669 | |||
1670 | EXPORT_SYMBOL(dma_mem_supports_dma); | ||
1671 | |||
1672 | /****************************************************************************/ | ||
1673 | /** | ||
1674 | * Maps in a memory region such that it can be used for performing a DMA. | ||
1675 | * | ||
1676 | * @return | ||
1677 | */ | ||
1678 | /****************************************************************************/ | ||
1679 | |||
1680 | int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1681 | enum dma_data_direction dir /* Direction that the mapping will be going */ | ||
1682 | ) { | ||
1683 | int rc; | ||
1684 | |||
1685 | down(&memMap->lock); | ||
1686 | |||
1687 | DMA_MAP_PRINT("memMap: %p\n", memMap); | ||
1688 | |||
1689 | if (memMap->inUse) { | ||
1690 | printk(KERN_ERR "%s: memory map %p is already being used\n", | ||
1691 | __func__, memMap); | ||
1692 | rc = -EBUSY; | ||
1693 | goto out; | ||
1694 | } | ||
1695 | |||
1696 | memMap->inUse = 1; | ||
1697 | memMap->dir = dir; | ||
1698 | memMap->numRegionsUsed = 0; | ||
1699 | |||
1700 | rc = 0; | ||
1701 | |||
1702 | out: | ||
1703 | |||
1704 | DMA_MAP_PRINT("returning %d", rc); | ||
1705 | |||
1706 | up(&memMap->lock); | ||
1707 | |||
1708 | return rc; | ||
1709 | } | ||
1710 | |||
1711 | EXPORT_SYMBOL(dma_map_start); | ||
1712 | |||
1713 | /****************************************************************************/ | ||
1714 | /** | ||
1715 | * Adds a segment of memory to a memory map. Each segment is both | ||
1716 | * physically and virtually contiguous. | ||
1717 | * | ||
1718 | * @return 0 on success, error code otherwise. | ||
1719 | */ | ||
1720 | /****************************************************************************/ | ||
1721 | |||
1722 | static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1723 | DMA_Region_t *region, /* Region that the segment belongs to */ | ||
1724 | void *virtAddr, /* Virtual address of the segment being added */ | ||
1725 | dma_addr_t physAddr, /* Physical address of the segment being added */ | ||
1726 | size_t numBytes /* Number of bytes of the segment being added */ | ||
1727 | ) { | ||
1728 | DMA_Segment_t *segment; | ||
1729 | |||
1730 | DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr, | ||
1731 | physAddr, numBytes); | ||
1732 | |||
1733 | /* Sanity check */ | ||
1734 | |||
1735 | if (((unsigned long)virtAddr < (unsigned long)region->virtAddr) | ||
1736 | || (((unsigned long)virtAddr + numBytes)) > | ||
1737 | ((unsigned long)region->virtAddr + region->numBytes)) { | ||
1738 | printk(KERN_ERR | ||
1739 | "%s: virtAddr %p is outside region @ %p len: %d\n", | ||
1740 | __func__, virtAddr, region->virtAddr, region->numBytes); | ||
1741 | return -EINVAL; | ||
1742 | } | ||
1743 | |||
1744 | if (region->numSegmentsUsed > 0) { | ||
1745 | /* Check to see if this segment is physically contiguous with the previous one */ | ||
1746 | |||
1747 | segment = ®ion->segment[region->numSegmentsUsed - 1]; | ||
1748 | |||
1749 | if ((segment->physAddr + segment->numBytes) == physAddr) { | ||
1750 | /* It is - just add on to the end */ | ||
1751 | |||
1752 | DMA_MAP_PRINT("appending %d bytes to last segment\n", | ||
1753 | numBytes); | ||
1754 | |||
1755 | segment->numBytes += numBytes; | ||
1756 | |||
1757 | return 0; | ||
1758 | } | ||
1759 | } | ||
1760 | |||
1761 | /* Reallocate to hold more segments, if required. */ | ||
1762 | |||
1763 | if (region->numSegmentsUsed >= region->numSegmentsAllocated) { | ||
1764 | DMA_Segment_t *newSegment; | ||
1765 | size_t oldSize = | ||
1766 | region->numSegmentsAllocated * sizeof(*newSegment); | ||
1767 | int newAlloc = region->numSegmentsAllocated + 4; | ||
1768 | size_t newSize = newAlloc * sizeof(*newSegment); | ||
1769 | |||
1770 | newSegment = kmalloc(newSize, GFP_KERNEL); | ||
1771 | if (newSegment == NULL) { | ||
1772 | return -ENOMEM; | ||
1773 | } | ||
1774 | memcpy(newSegment, region->segment, oldSize); | ||
1775 | memset(&((uint8_t *) newSegment)[oldSize], 0, | ||
1776 | newSize - oldSize); | ||
1777 | kfree(region->segment); | ||
1778 | |||
1779 | region->numSegmentsAllocated = newAlloc; | ||
1780 | region->segment = newSegment; | ||
1781 | } | ||
1782 | |||
1783 | segment = ®ion->segment[region->numSegmentsUsed]; | ||
1784 | region->numSegmentsUsed++; | ||
1785 | |||
1786 | segment->virtAddr = virtAddr; | ||
1787 | segment->physAddr = physAddr; | ||
1788 | segment->numBytes = numBytes; | ||
1789 | |||
1790 | DMA_MAP_PRINT("returning success\n"); | ||
1791 | |||
1792 | return 0; | ||
1793 | } | ||
1794 | |||
1795 | /****************************************************************************/ | ||
1796 | /** | ||
1797 | * Adds a region of memory to a memory map. Each region is virtually | ||
1798 | * contiguous, but not necessarily physically contiguous. | ||
1799 | * | ||
1800 | * @return 0 on success, error code otherwise. | ||
1801 | */ | ||
1802 | /****************************************************************************/ | ||
1803 | |||
1804 | int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1805 | void *mem, /* Virtual address that we want to get a map of */ | ||
1806 | size_t numBytes /* Number of bytes being mapped */ | ||
1807 | ) { | ||
1808 | unsigned long addr = (unsigned long)mem; | ||
1809 | unsigned int offset; | ||
1810 | int rc = 0; | ||
1811 | DMA_Region_t *region; | ||
1812 | dma_addr_t physAddr; | ||
1813 | |||
1814 | down(&memMap->lock); | ||
1815 | |||
1816 | DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes); | ||
1817 | |||
1818 | if (!memMap->inUse) { | ||
1819 | printk(KERN_ERR "%s: Make sure you call dma_map_start first\n", | ||
1820 | __func__); | ||
1821 | rc = -EINVAL; | ||
1822 | goto out; | ||
1823 | } | ||
1824 | |||
1825 | /* Reallocate to hold more regions. */ | ||
1826 | |||
1827 | if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) { | ||
1828 | DMA_Region_t *newRegion; | ||
1829 | size_t oldSize = | ||
1830 | memMap->numRegionsAllocated * sizeof(*newRegion); | ||
1831 | int newAlloc = memMap->numRegionsAllocated + 4; | ||
1832 | size_t newSize = newAlloc * sizeof(*newRegion); | ||
1833 | |||
1834 | newRegion = kmalloc(newSize, GFP_KERNEL); | ||
1835 | if (newRegion == NULL) { | ||
1836 | rc = -ENOMEM; | ||
1837 | goto out; | ||
1838 | } | ||
1839 | memcpy(newRegion, memMap->region, oldSize); | ||
1840 | memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize); | ||
1841 | |||
1842 | kfree(memMap->region); | ||
1843 | |||
1844 | memMap->numRegionsAllocated = newAlloc; | ||
1845 | memMap->region = newRegion; | ||
1846 | } | ||
1847 | |||
1848 | region = &memMap->region[memMap->numRegionsUsed]; | ||
1849 | memMap->numRegionsUsed++; | ||
1850 | |||
1851 | offset = addr & ~PAGE_MASK; | ||
1852 | |||
1853 | region->memType = dma_mem_type(mem); | ||
1854 | region->virtAddr = mem; | ||
1855 | region->numBytes = numBytes; | ||
1856 | region->numSegmentsUsed = 0; | ||
1857 | region->numLockedPages = 0; | ||
1858 | region->lockedPages = NULL; | ||
1859 | |||
1860 | switch (region->memType) { | ||
1861 | case DMA_MEM_TYPE_VMALLOC: | ||
1862 | { | ||
1863 | atomic_inc(&gDmaStatMemTypeVmalloc); | ||
1864 | |||
1865 | /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */ | ||
1866 | |||
1867 | /* vmalloc'd pages are not physically contiguous */ | ||
1868 | |||
1869 | rc = -EINVAL; | ||
1870 | break; | ||
1871 | } | ||
1872 | |||
1873 | case DMA_MEM_TYPE_KMALLOC: | ||
1874 | { | ||
1875 | atomic_inc(&gDmaStatMemTypeKmalloc); | ||
1876 | |||
1877 | /* kmalloc'd pages are physically contiguous, so they'll have exactly */ | ||
1878 | /* one segment */ | ||
1879 | |||
1880 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
1881 | physAddr = | ||
1882 | dma_map_single(NULL, mem, numBytes, memMap->dir); | ||
1883 | rc = dma_map_add_segment(memMap, region, mem, physAddr, | ||
1884 | numBytes); | ||
1885 | #else | ||
1886 | rc = -EINVAL; | ||
1887 | #endif | ||
1888 | break; | ||
1889 | } | ||
1890 | |||
1891 | case DMA_MEM_TYPE_DMA: | ||
1892 | { | ||
1893 | /* dma_alloc_xxx pages are physically contiguous */ | ||
1894 | |||
1895 | atomic_inc(&gDmaStatMemTypeCoherent); | ||
1896 | |||
1897 | physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset; | ||
1898 | |||
1899 | dma_sync_single_for_cpu(NULL, physAddr, numBytes, | ||
1900 | memMap->dir); | ||
1901 | rc = dma_map_add_segment(memMap, region, mem, physAddr, | ||
1902 | numBytes); | ||
1903 | break; | ||
1904 | } | ||
1905 | |||
1906 | case DMA_MEM_TYPE_USER: | ||
1907 | { | ||
1908 | size_t firstPageOffset; | ||
1909 | size_t firstPageSize; | ||
1910 | struct page **pages; | ||
1911 | struct task_struct *userTask; | ||
1912 | |||
1913 | atomic_inc(&gDmaStatMemTypeUser); | ||
1914 | |||
1915 | #if 1 | ||
1916 | /* If the pages are user pages, then the dma_mem_map_set_user_task function */ | ||
1917 | /* must have been previously called. */ | ||
1918 | |||
1919 | if (memMap->userTask == NULL) { | ||
1920 | printk(KERN_ERR | ||
1921 | "%s: must call dma_mem_map_set_user_task when using user-mode memory\n", | ||
1922 | __func__); | ||
1923 | return -EINVAL; | ||
1924 | } | ||
1925 | |||
1926 | /* User pages need to be locked. */ | ||
1927 | |||
1928 | firstPageOffset = | ||
1929 | (unsigned long)region->virtAddr & (PAGE_SIZE - 1); | ||
1930 | firstPageSize = PAGE_SIZE - firstPageOffset; | ||
1931 | |||
1932 | region->numLockedPages = (firstPageOffset | ||
1933 | + region->numBytes + | ||
1934 | PAGE_SIZE - 1) / PAGE_SIZE; | ||
1935 | pages = | ||
1936 | kmalloc(region->numLockedPages * | ||
1937 | sizeof(struct page *), GFP_KERNEL); | ||
1938 | |||
1939 | if (pages == NULL) { | ||
1940 | region->numLockedPages = 0; | ||
1941 | return -ENOMEM; | ||
1942 | } | ||
1943 | |||
1944 | userTask = memMap->userTask; | ||
1945 | |||
1946 | down_read(&userTask->mm->mmap_sem); | ||
1947 | rc = get_user_pages(userTask, /* task */ | ||
1948 | userTask->mm, /* mm */ | ||
1949 | (unsigned long)region->virtAddr, /* start */ | ||
1950 | region->numLockedPages, /* len */ | ||
1951 | memMap->dir == DMA_FROM_DEVICE, /* write */ | ||
1952 | 0, /* force */ | ||
1953 | pages, /* pages (array of pointers to page) */ | ||
1954 | NULL); /* vmas */ | ||
1955 | up_read(&userTask->mm->mmap_sem); | ||
1956 | |||
1957 | if (rc != region->numLockedPages) { | ||
1958 | kfree(pages); | ||
1959 | region->numLockedPages = 0; | ||
1960 | |||
1961 | if (rc >= 0) { | ||
1962 | rc = -EINVAL; | ||
1963 | } | ||
1964 | } else { | ||
1965 | uint8_t *virtAddr = region->virtAddr; | ||
1966 | size_t bytesRemaining; | ||
1967 | int pageIdx; | ||
1968 | |||
1969 | rc = 0; /* Since get_user_pages returns +ve number */ | ||
1970 | |||
1971 | region->lockedPages = pages; | ||
1972 | |||
1973 | /* We've locked the user pages. Now we need to walk them and figure */ | ||
1974 | /* out the physical addresses. */ | ||
1975 | |||
1976 | /* The first page may be partial */ | ||
1977 | |||
1978 | dma_map_add_segment(memMap, | ||
1979 | region, | ||
1980 | virtAddr, | ||
1981 | PFN_PHYS(page_to_pfn | ||
1982 | (pages[0])) + | ||
1983 | firstPageOffset, | ||
1984 | firstPageSize); | ||
1985 | |||
1986 | virtAddr += firstPageSize; | ||
1987 | bytesRemaining = | ||
1988 | region->numBytes - firstPageSize; | ||
1989 | |||
1990 | for (pageIdx = 1; | ||
1991 | pageIdx < region->numLockedPages; | ||
1992 | pageIdx++) { | ||
1993 | size_t bytesThisPage = | ||
1994 | (bytesRemaining > | ||
1995 | PAGE_SIZE ? PAGE_SIZE : | ||
1996 | bytesRemaining); | ||
1997 | |||
1998 | DMA_MAP_PRINT | ||
1999 | ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n", | ||
2000 | pageIdx, pages[pageIdx], | ||
2001 | page_to_pfn(pages[pageIdx]), | ||
2002 | PFN_PHYS(page_to_pfn | ||
2003 | (pages[pageIdx]))); | ||
2004 | |||
2005 | dma_map_add_segment(memMap, | ||
2006 | region, | ||
2007 | virtAddr, | ||
2008 | PFN_PHYS(page_to_pfn | ||
2009 | (pages | ||
2010 | [pageIdx])), | ||
2011 | bytesThisPage); | ||
2012 | |||
2013 | virtAddr += bytesThisPage; | ||
2014 | bytesRemaining -= bytesThisPage; | ||
2015 | } | ||
2016 | } | ||
2017 | #else | ||
2018 | printk(KERN_ERR | ||
2019 | "%s: User mode pages are not yet supported\n", | ||
2020 | __func__); | ||
2021 | |||
2022 | /* user pages are not physically contiguous */ | ||
2023 | |||
2024 | rc = -EINVAL; | ||
2025 | #endif | ||
2026 | break; | ||
2027 | } | ||
2028 | |||
2029 | default: | ||
2030 | { | ||
2031 | printk(KERN_ERR "%s: Unsupported memory type: %d\n", | ||
2032 | __func__, region->memType); | ||
2033 | |||
2034 | rc = -EINVAL; | ||
2035 | break; | ||
2036 | } | ||
2037 | } | ||
2038 | |||
2039 | if (rc != 0) { | ||
2040 | memMap->numRegionsUsed--; | ||
2041 | } | ||
2042 | |||
2043 | out: | ||
2044 | |||
2045 | DMA_MAP_PRINT("returning %d\n", rc); | ||
2046 | |||
2047 | up(&memMap->lock); | ||
2048 | |||
2049 | return rc; | ||
2050 | } | ||
2051 | |||
2052 | EXPORT_SYMBOL(dma_map_add_segment); | ||
2053 | |||
2054 | /****************************************************************************/ | ||
2055 | /** | ||
2056 | * Maps in a memory region such that it can be used for performing a DMA. | ||
2057 | * | ||
2058 | * @return 0 on success, error code otherwise. | ||
2059 | */ | ||
2060 | /****************************************************************************/ | ||
2061 | |||
2062 | int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
2063 | void *mem, /* Virtual address that we want to get a map of */ | ||
2064 | size_t numBytes, /* Number of bytes being mapped */ | ||
2065 | enum dma_data_direction dir /* Direction that the mapping will be going */ | ||
2066 | ) { | ||
2067 | int rc; | ||
2068 | |||
2069 | rc = dma_map_start(memMap, dir); | ||
2070 | if (rc == 0) { | ||
2071 | rc = dma_map_add_region(memMap, mem, numBytes); | ||
2072 | if (rc < 0) { | ||
2073 | /* Since the add fails, this function will fail, and the caller won't */ | ||
2074 | /* call unmap, so we need to do it here. */ | ||
2075 | |||
2076 | dma_unmap(memMap, 0); | ||
2077 | } | ||
2078 | } | ||
2079 | |||
2080 | return rc; | ||
2081 | } | ||
2082 | |||
2083 | EXPORT_SYMBOL(dma_map_mem); | ||
2084 | |||
2085 | /****************************************************************************/ | ||
2086 | /** | ||
2087 | * Setup a descriptor ring for a given memory map. | ||
2088 | * | ||
2089 | * It is assumed that the descriptor ring has already been initialized, and | ||
2090 | * this routine will only reallocate a new descriptor ring if the existing | ||
2091 | * one is too small. | ||
2092 | * | ||
2093 | * @return 0 on success, error code otherwise. | ||
2094 | */ | ||
2095 | /****************************************************************************/ | ||
2096 | |||
2097 | int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */ | ||
2098 | DMA_MemMap_t *memMap, /* Memory map that will be used */ | ||
2099 | dma_addr_t devPhysAddr /* Physical address of device */ | ||
2100 | ) { | ||
2101 | int rc; | ||
2102 | int numDescriptors; | ||
2103 | DMA_DeviceAttribute_t *devAttr; | ||
2104 | DMA_Region_t *region; | ||
2105 | DMA_Segment_t *segment; | ||
2106 | dma_addr_t srcPhysAddr; | ||
2107 | dma_addr_t dstPhysAddr; | ||
2108 | int regionIdx; | ||
2109 | int segmentIdx; | ||
2110 | |||
2111 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
2112 | |||
2113 | down(&memMap->lock); | ||
2114 | |||
2115 | /* Figure out how many descriptors we need */ | ||
2116 | |||
2117 | numDescriptors = 0; | ||
2118 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2119 | region = &memMap->region[regionIdx]; | ||
2120 | |||
2121 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2122 | segmentIdx++) { | ||
2123 | segment = ®ion->segment[segmentIdx]; | ||
2124 | |||
2125 | if (memMap->dir == DMA_TO_DEVICE) { | ||
2126 | srcPhysAddr = segment->physAddr; | ||
2127 | dstPhysAddr = devPhysAddr; | ||
2128 | } else { | ||
2129 | srcPhysAddr = devPhysAddr; | ||
2130 | dstPhysAddr = segment->physAddr; | ||
2131 | } | ||
2132 | |||
2133 | rc = | ||
2134 | dma_calculate_descriptor_count(dev, srcPhysAddr, | ||
2135 | dstPhysAddr, | ||
2136 | segment-> | ||
2137 | numBytes); | ||
2138 | if (rc < 0) { | ||
2139 | printk(KERN_ERR | ||
2140 | "%s: dma_calculate_descriptor_count failed: %d\n", | ||
2141 | __func__, rc); | ||
2142 | goto out; | ||
2143 | } | ||
2144 | numDescriptors += rc; | ||
2145 | } | ||
2146 | } | ||
2147 | |||
2148 | /* Adjust the size of the ring, if it isn't big enough */ | ||
2149 | |||
2150 | if (numDescriptors > devAttr->ring.descriptorsAllocated) { | ||
2151 | dma_free_descriptor_ring(&devAttr->ring); | ||
2152 | rc = | ||
2153 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
2154 | numDescriptors); | ||
2155 | if (rc < 0) { | ||
2156 | printk(KERN_ERR | ||
2157 | "%s: dma_alloc_descriptor_ring failed: %d\n", | ||
2158 | __func__, rc); | ||
2159 | goto out; | ||
2160 | } | ||
2161 | } else { | ||
2162 | rc = | ||
2163 | dma_init_descriptor_ring(&devAttr->ring, | ||
2164 | numDescriptors); | ||
2165 | if (rc < 0) { | ||
2166 | printk(KERN_ERR | ||
2167 | "%s: dma_init_descriptor_ring failed: %d\n", | ||
2168 | __func__, rc); | ||
2169 | goto out; | ||
2170 | } | ||
2171 | } | ||
2172 | |||
2173 | /* Populate the descriptors */ | ||
2174 | |||
2175 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2176 | region = &memMap->region[regionIdx]; | ||
2177 | |||
2178 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2179 | segmentIdx++) { | ||
2180 | segment = ®ion->segment[segmentIdx]; | ||
2181 | |||
2182 | if (memMap->dir == DMA_TO_DEVICE) { | ||
2183 | srcPhysAddr = segment->physAddr; | ||
2184 | dstPhysAddr = devPhysAddr; | ||
2185 | } else { | ||
2186 | srcPhysAddr = devPhysAddr; | ||
2187 | dstPhysAddr = segment->physAddr; | ||
2188 | } | ||
2189 | |||
2190 | rc = | ||
2191 | dma_add_descriptors(&devAttr->ring, dev, | ||
2192 | srcPhysAddr, dstPhysAddr, | ||
2193 | segment->numBytes); | ||
2194 | if (rc < 0) { | ||
2195 | printk(KERN_ERR | ||
2196 | "%s: dma_add_descriptors failed: %d\n", | ||
2197 | __func__, rc); | ||
2198 | goto out; | ||
2199 | } | ||
2200 | } | ||
2201 | } | ||
2202 | |||
2203 | rc = 0; | ||
2204 | |||
2205 | out: | ||
2206 | |||
2207 | up(&memMap->lock); | ||
2208 | return rc; | ||
2209 | } | ||
2210 | |||
2211 | EXPORT_SYMBOL(dma_map_create_descriptor_ring); | ||
2212 | |||
2213 | /****************************************************************************/ | ||
2214 | /** | ||
2215 | * Maps in a memory region such that it can be used for performing a DMA. | ||
2216 | * | ||
2217 | * @return | ||
2218 | */ | ||
2219 | /****************************************************************************/ | ||
2220 | |||
2221 | int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
2222 | int dirtied /* non-zero if any of the pages were modified */ | ||
2223 | ) { | ||
2224 | |||
2225 | int rc = 0; | ||
2226 | int regionIdx; | ||
2227 | int segmentIdx; | ||
2228 | DMA_Region_t *region; | ||
2229 | DMA_Segment_t *segment; | ||
2230 | |||
2231 | down(&memMap->lock); | ||
2232 | |||
2233 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2234 | region = &memMap->region[regionIdx]; | ||
2235 | |||
2236 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2237 | segmentIdx++) { | ||
2238 | segment = ®ion->segment[segmentIdx]; | ||
2239 | |||
2240 | switch (region->memType) { | ||
2241 | case DMA_MEM_TYPE_VMALLOC: | ||
2242 | { | ||
2243 | printk(KERN_ERR | ||
2244 | "%s: vmalloc'd pages are not yet supported\n", | ||
2245 | __func__); | ||
2246 | rc = -EINVAL; | ||
2247 | goto out; | ||
2248 | } | ||
2249 | |||
2250 | case DMA_MEM_TYPE_KMALLOC: | ||
2251 | { | ||
2252 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
2253 | dma_unmap_single(NULL, | ||
2254 | segment->physAddr, | ||
2255 | segment->numBytes, | ||
2256 | memMap->dir); | ||
2257 | #endif | ||
2258 | break; | ||
2259 | } | ||
2260 | |||
2261 | case DMA_MEM_TYPE_DMA: | ||
2262 | { | ||
2263 | dma_sync_single_for_cpu(NULL, | ||
2264 | segment-> | ||
2265 | physAddr, | ||
2266 | segment-> | ||
2267 | numBytes, | ||
2268 | memMap->dir); | ||
2269 | break; | ||
2270 | } | ||
2271 | |||
2272 | case DMA_MEM_TYPE_USER: | ||
2273 | { | ||
2274 | /* Nothing to do here. */ | ||
2275 | |||
2276 | break; | ||
2277 | } | ||
2278 | |||
2279 | default: | ||
2280 | { | ||
2281 | printk(KERN_ERR | ||
2282 | "%s: Unsupported memory type: %d\n", | ||
2283 | __func__, region->memType); | ||
2284 | rc = -EINVAL; | ||
2285 | goto out; | ||
2286 | } | ||
2287 | } | ||
2288 | |||
2289 | segment->virtAddr = NULL; | ||
2290 | segment->physAddr = 0; | ||
2291 | segment->numBytes = 0; | ||
2292 | } | ||
2293 | |||
2294 | if (region->numLockedPages > 0) { | ||
2295 | int pageIdx; | ||
2296 | |||
2297 | /* Some user pages were locked. We need to go and unlock them now. */ | ||
2298 | |||
2299 | for (pageIdx = 0; pageIdx < region->numLockedPages; | ||
2300 | pageIdx++) { | ||
2301 | struct page *page = | ||
2302 | region->lockedPages[pageIdx]; | ||
2303 | |||
2304 | if (memMap->dir == DMA_FROM_DEVICE) { | ||
2305 | SetPageDirty(page); | ||
2306 | } | ||
2307 | page_cache_release(page); | ||
2308 | } | ||
2309 | kfree(region->lockedPages); | ||
2310 | region->numLockedPages = 0; | ||
2311 | region->lockedPages = NULL; | ||
2312 | } | ||
2313 | |||
2314 | region->memType = DMA_MEM_TYPE_NONE; | ||
2315 | region->virtAddr = NULL; | ||
2316 | region->numBytes = 0; | ||
2317 | region->numSegmentsUsed = 0; | ||
2318 | } | ||
2319 | memMap->userTask = NULL; | ||
2320 | memMap->numRegionsUsed = 0; | ||
2321 | memMap->inUse = 0; | ||
2322 | |||
2323 | out: | ||
2324 | up(&memMap->lock); | ||
2325 | |||
2326 | return rc; | ||
2327 | } | ||
2328 | |||
2329 | EXPORT_SYMBOL(dma_unmap); | ||