diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/device.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 196 |
2 files changed, 28 insertions, 169 deletions
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 7aa368003b05..6e2cb0ee770d 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #define ASMARM_DEVICE_H | 7 | #define ASMARM_DEVICE_H |
8 | 8 | ||
9 | struct dev_archdata { | 9 | struct dev_archdata { |
10 | struct dma_map_ops *dma_ops; | ||
10 | #ifdef CONFIG_DMABOUNCE | 11 | #ifdef CONFIG_DMABOUNCE |
11 | struct dmabounce_device_info *dmabounce; | 12 | struct dmabounce_device_info *dmabounce; |
12 | #endif | 13 | #endif |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index eeddbe201e24..6725a08a5c21 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -11,6 +11,27 @@ | |||
11 | #include <asm/memory.h> | 11 | #include <asm/memory.h> |
12 | 12 | ||
13 | #define DMA_ERROR_CODE (~0) | 13 | #define DMA_ERROR_CODE (~0) |
14 | extern struct dma_map_ops arm_dma_ops; | ||
15 | |||
16 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
17 | { | ||
18 | if (dev && dev->archdata.dma_ops) | ||
19 | return dev->archdata.dma_ops; | ||
20 | return &arm_dma_ops; | ||
21 | } | ||
22 | |||
23 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | ||
24 | { | ||
25 | BUG_ON(!dev); | ||
26 | dev->archdata.dma_ops = ops; | ||
27 | } | ||
28 | |||
29 | #include <asm-generic/dma-mapping-common.h> | ||
30 | |||
31 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
32 | { | ||
33 | return get_dma_ops(dev)->set_dma_mask(dev, mask); | ||
34 | } | ||
14 | 35 | ||
15 | #ifdef __arch_page_to_dma | 36 | #ifdef __arch_page_to_dma |
16 | #error Please update to __arch_pfn_to_dma | 37 | #error Please update to __arch_pfn_to_dma |
@@ -119,7 +140,6 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
119 | 140 | ||
120 | extern int dma_supported(struct device *, u64); | 141 | extern int dma_supported(struct device *, u64); |
121 | extern int dma_set_mask(struct device *, u64); | 142 | extern int dma_set_mask(struct device *, u64); |
122 | |||
123 | /* | 143 | /* |
124 | * DMA errors are defined by all-bits-set in the DMA address. | 144 | * DMA errors are defined by all-bits-set in the DMA address. |
125 | */ | 145 | */ |
@@ -297,179 +317,17 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
297 | } | 317 | } |
298 | #endif /* CONFIG_DMABOUNCE */ | 318 | #endif /* CONFIG_DMABOUNCE */ |
299 | 319 | ||
300 | /** | ||
301 | * dma_map_single - map a single buffer for streaming DMA | ||
302 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
303 | * @cpu_addr: CPU direct mapped address of buffer | ||
304 | * @size: size of buffer to map | ||
305 | * @dir: DMA transfer direction | ||
306 | * | ||
307 | * Ensure that any data held in the cache is appropriately discarded | ||
308 | * or written back. | ||
309 | * | ||
310 | * The device owns this memory once this call has completed. The CPU | ||
311 | * can regain ownership by calling dma_unmap_single() or | ||
312 | * dma_sync_single_for_cpu(). | ||
313 | */ | ||
314 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
315 | size_t size, enum dma_data_direction dir) | ||
316 | { | ||
317 | unsigned long offset; | ||
318 | struct page *page; | ||
319 | dma_addr_t addr; | ||
320 | |||
321 | BUG_ON(!virt_addr_valid(cpu_addr)); | ||
322 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); | ||
323 | BUG_ON(!valid_dma_direction(dir)); | ||
324 | |||
325 | page = virt_to_page(cpu_addr); | ||
326 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; | ||
327 | addr = __dma_map_page(dev, page, offset, size, dir); | ||
328 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); | ||
329 | |||
330 | return addr; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * dma_map_page - map a portion of a page for streaming DMA | ||
335 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
336 | * @page: page that buffer resides in | ||
337 | * @offset: offset into page for start of buffer | ||
338 | * @size: size of buffer to map | ||
339 | * @dir: DMA transfer direction | ||
340 | * | ||
341 | * Ensure that any data held in the cache is appropriately discarded | ||
342 | * or written back. | ||
343 | * | ||
344 | * The device owns this memory once this call has completed. The CPU | ||
345 | * can regain ownership by calling dma_unmap_page(). | ||
346 | */ | ||
347 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
348 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
349 | { | ||
350 | dma_addr_t addr; | ||
351 | |||
352 | BUG_ON(!valid_dma_direction(dir)); | ||
353 | |||
354 | addr = __dma_map_page(dev, page, offset, size, dir); | ||
355 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
356 | |||
357 | return addr; | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * dma_unmap_single - unmap a single buffer previously mapped | ||
362 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
363 | * @handle: DMA address of buffer | ||
364 | * @size: size of buffer (same as passed to dma_map_single) | ||
365 | * @dir: DMA transfer direction (same as passed to dma_map_single) | ||
366 | * | ||
367 | * Unmap a single streaming mode DMA translation. The handle and size | ||
368 | * must match what was provided in the previous dma_map_single() call. | ||
369 | * All other usages are undefined. | ||
370 | * | ||
371 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
372 | * whatever the device wrote there. | ||
373 | */ | ||
374 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
375 | size_t size, enum dma_data_direction dir) | ||
376 | { | ||
377 | debug_dma_unmap_page(dev, handle, size, dir, true); | ||
378 | __dma_unmap_page(dev, handle, size, dir); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
383 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
384 | * @handle: DMA address of buffer | ||
385 | * @size: size of buffer (same as passed to dma_map_page) | ||
386 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
387 | * | ||
388 | * Unmap a page streaming mode DMA translation. The handle and size | ||
389 | * must match what was provided in the previous dma_map_page() call. | ||
390 | * All other usages are undefined. | ||
391 | * | ||
392 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
393 | * whatever the device wrote there. | ||
394 | */ | ||
395 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
396 | size_t size, enum dma_data_direction dir) | ||
397 | { | ||
398 | debug_dma_unmap_page(dev, handle, size, dir, false); | ||
399 | __dma_unmap_page(dev, handle, size, dir); | ||
400 | } | ||
401 | |||
402 | |||
403 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
404 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
405 | { | ||
406 | BUG_ON(!valid_dma_direction(dir)); | ||
407 | |||
408 | debug_dma_sync_single_for_cpu(dev, handle, size, dir); | ||
409 | |||
410 | if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) | ||
411 | return; | ||
412 | |||
413 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | ||
414 | } | ||
415 | |||
416 | static inline void dma_sync_single_for_device(struct device *dev, | ||
417 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
418 | { | ||
419 | BUG_ON(!valid_dma_direction(dir)); | ||
420 | |||
421 | debug_dma_sync_single_for_device(dev, handle, size, dir); | ||
422 | |||
423 | if (!dmabounce_sync_for_device(dev, handle, size, dir)) | ||
424 | return; | ||
425 | |||
426 | __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * dma_sync_single_range_for_cpu | ||
431 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
432 | * @handle: DMA address of buffer | ||
433 | * @offset: offset of region to start sync | ||
434 | * @size: size of region to sync | ||
435 | * @dir: DMA transfer direction (same as passed to dma_map_single) | ||
436 | * | ||
437 | * Make physical memory consistent for a single streaming mode DMA | ||
438 | * translation after a transfer. | ||
439 | * | ||
440 | * If you perform a dma_map_single() but wish to interrogate the | ||
441 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
442 | * mapping, you must call this function before doing so. At the | ||
443 | * next point you give the PCI dma address back to the card, you | ||
444 | * must first the perform a dma_sync_for_device, and then the | ||
445 | * device again owns the buffer. | ||
446 | */ | ||
447 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
448 | dma_addr_t handle, unsigned long offset, size_t size, | ||
449 | enum dma_data_direction dir) | ||
450 | { | ||
451 | dma_sync_single_for_cpu(dev, handle + offset, size, dir); | ||
452 | } | ||
453 | |||
454 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
455 | dma_addr_t handle, unsigned long offset, size_t size, | ||
456 | enum dma_data_direction dir) | ||
457 | { | ||
458 | dma_sync_single_for_device(dev, handle + offset, size, dir); | ||
459 | } | ||
460 | |||
461 | /* | 320 | /* |
462 | * The scatter list versions of the above methods. | 321 | * The scatter list versions of the above methods. |
463 | */ | 322 | */ |
464 | extern int dma_map_sg(struct device *, struct scatterlist *, int, | 323 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
465 | enum dma_data_direction); | 324 | enum dma_data_direction, struct dma_attrs *attrs); |
466 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | 325 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
326 | enum dma_data_direction, struct dma_attrs *attrs); | ||
327 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | ||
467 | enum dma_data_direction); | 328 | enum dma_data_direction); |
468 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | 329 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
469 | enum dma_data_direction); | 330 | enum dma_data_direction); |
470 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
471 | enum dma_data_direction); | ||
472 | |||
473 | 331 | ||
474 | #endif /* __KERNEL__ */ | 332 | #endif /* __KERNEL__ */ |
475 | #endif | 333 | #endif |