aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h93
1 files changed, 69 insertions, 24 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c568da7dcae4..4fff837363ed 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,24 +5,29 @@
5 5
6#include <linux/mm_types.h> 6#include <linux/mm_types.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
8 9
9#include <asm-generic/dma-coherent.h> 10#include <asm-generic/dma-coherent.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
11 12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
12/* 17/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions 18 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
14 * used internally by the DMA-mapping API to provide DMA addresses. They 19 * functions used internally by the DMA-mapping API to provide DMA
15 * must not be used by drivers. 20 * addresses. They must not be used by drivers.
16 */ 21 */
17#ifndef __arch_page_to_dma 22#ifndef __arch_pfn_to_dma
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
19{ 24{
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 25 return (dma_addr_t)__pfn_to_bus(pfn);
21} 26}
22 27
23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 28static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
24{ 29{
25 return pfn_to_page(__bus_to_pfn(addr)); 30 return __bus_to_pfn(addr);
26} 31}
27 32
28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
36} 41}
37#else 42#else
38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 43static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
39{ 44{
40 return __arch_page_to_dma(dev, page); 45 return __arch_pfn_to_dma(dev, pfn);
41} 46}
42 47
43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 48static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
44{ 49{
45 return __arch_dma_to_page(dev, addr); 50 return __arch_dma_to_pfn(dev, addr);
46} 51}
47 52
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
293/* 298/*
294 * The DMA API, implemented by dmabounce.c. See below for descriptions. 299 * The DMA API, implemented by dmabounce.c. See below for descriptions.
295 */ 300 */
296extern dma_addr_t dma_map_single(struct device *, void *, size_t, 301extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
297 enum dma_data_direction); 302 enum dma_data_direction);
298extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 303extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
299 enum dma_data_direction); 304 enum dma_data_direction);
300extern dma_addr_t dma_map_page(struct device *, struct page *, 305extern dma_addr_t __dma_map_page(struct device *, struct page *,
301 unsigned long, size_t, enum dma_data_direction); 306 unsigned long, size_t, enum dma_data_direction);
302extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 307extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
303 enum dma_data_direction); 308 enum dma_data_direction);
304 309
305/* 310/*
@@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
323} 328}
324 329
325 330
331static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332 size_t size, enum dma_data_direction dir)
333{
334 __dma_single_cpu_to_dev(cpu_addr, size, dir);
335 return virt_to_dma(dev, cpu_addr);
336}
337
338static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339 unsigned long offset, size_t size, enum dma_data_direction dir)
340{
341 __dma_page_cpu_to_dev(page, offset, size, dir);
342 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
343}
344
345static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346 size_t size, enum dma_data_direction dir)
347{
348 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
349}
350
351static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352 size_t size, enum dma_data_direction dir)
353{
354 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
355 handle & ~PAGE_MASK, size, dir);
356}
357#endif /* CONFIG_DMABOUNCE */
358
326/** 359/**
327 * dma_map_single - map a single buffer for streaming DMA 360 * dma_map_single - map a single buffer for streaming DMA
328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 361 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
340static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 373static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
341 size_t size, enum dma_data_direction dir) 374 size_t size, enum dma_data_direction dir)
342{ 375{
376 dma_addr_t addr;
377
343 BUG_ON(!valid_dma_direction(dir)); 378 BUG_ON(!valid_dma_direction(dir));
344 379
345 __dma_single_cpu_to_dev(cpu_addr, size, dir); 380 addr = __dma_map_single(dev, cpu_addr, size, dir);
381 debug_dma_map_page(dev, virt_to_page(cpu_addr),
382 (unsigned long)cpu_addr & ~PAGE_MASK, size,
383 dir, addr, true);
346 384
347 return virt_to_dma(dev, cpu_addr); 385 return addr;
348} 386}
349 387
350/** 388/**
@@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
364static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 402static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
365 unsigned long offset, size_t size, enum dma_data_direction dir) 403 unsigned long offset, size_t size, enum dma_data_direction dir)
366{ 404{
405 dma_addr_t addr;
406
367 BUG_ON(!valid_dma_direction(dir)); 407 BUG_ON(!valid_dma_direction(dir));
368 408
369 __dma_page_cpu_to_dev(page, offset, size, dir); 409 addr = __dma_map_page(dev, page, offset, size, dir);
410 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
370 411
371 return page_to_dma(dev, page) + offset; 412 return addr;
372} 413}
373 414
374/** 415/**
@@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
388static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 429static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
389 size_t size, enum dma_data_direction dir) 430 size_t size, enum dma_data_direction dir)
390{ 431{
391 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 432 debug_dma_unmap_page(dev, handle, size, dir, true);
433 __dma_unmap_single(dev, handle, size, dir);
392} 434}
393 435
394/** 436/**
@@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
408static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 450static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
409 size_t size, enum dma_data_direction dir) 451 size_t size, enum dma_data_direction dir)
410{ 452{
411 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, 453 debug_dma_unmap_page(dev, handle, size, dir, false);
412 size, dir); 454 __dma_unmap_page(dev, handle, size, dir);
413} 455}
414#endif /* CONFIG_DMABOUNCE */
415 456
416/** 457/**
417 * dma_sync_single_range_for_cpu 458 * dma_sync_single_range_for_cpu
@@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
437{ 478{
438 BUG_ON(!valid_dma_direction(dir)); 479 BUG_ON(!valid_dma_direction(dir));
439 480
481 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
482
440 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 483 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
441 return; 484 return;
442 485
@@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
449{ 492{
450 BUG_ON(!valid_dma_direction(dir)); 493 BUG_ON(!valid_dma_direction(dir));
451 494
495 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
496
452 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 497 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
453 return; 498 return;
454 499