aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-11-24 11:27:17 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 10:22:11 -0500
commit4ea0d7371e808628d11154b0d44140b70f05b998 (patch)
tree7673b2fc850475a587144a02c9f568a2831f0c2f
parent18eabe2347ae7a11b3db768695913724166dfb0e (diff)
ARM: dma-mapping: push buffer ownership down into dma-mapping.c
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
-rw-r--r--arch/arm/include/asm/dma-mapping.h39
-rw-r--r--arch/arm/mm/dma-mapping.c34
2 files changed, 55 insertions, 18 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index e850f5c1607b..256ee1c9f51a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,46 +57,57 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57#endif 57#endif
58 58
59/* 59/*
60 * Private support functions: these are not part of the API and are
61 * liable to change. Drivers must not use these.
62 */
63extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
64extern void dma_cache_maint_page(struct page *page, unsigned long offset,
65 size_t size, int rw);
66
67/*
68 * The DMA API is built upon the notion of "buffer ownership". A buffer 60 * The DMA API is built upon the notion of "buffer ownership". A buffer
69 * is either exclusively owned by the CPU (and therefore may be accessed 61 * is either exclusively owned by the CPU (and therefore may be accessed
70 * by it) or exclusively owned by the DMA device. These helper functions 62 * by it) or exclusively owned by the DMA device. These helper functions
71 * represent the transitions between these two ownership states. 63 * represent the transitions between these two ownership states.
72 * 64 *
73 * As above, these are private support functions and not part of the API. 65 * Note, however, that on later ARMs, this notion does not work due to
74 * Drivers must not use these. 66 * speculative prefetches. We model our approach on the assumption that
67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
69 *
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
75 */ 72 */
76static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, 73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
77 enum dma_data_direction dir) 74 enum dma_data_direction dir)
78{ 75{
76 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
79 if (!arch_is_coherent()) 79 if (!arch_is_coherent())
80 dma_cache_maint(kaddr, size, dir); 80 ___dma_single_cpu_to_dev(kaddr, size, dir);
81} 81}
82 82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, 83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir) 84 enum dma_data_direction dir)
85{ 85{
86 /* nothing to do */ 86 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
87} 91}
88 92
89static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
90 size_t size, enum dma_data_direction dir) 94 size_t size, enum dma_data_direction dir)
91{ 95{
96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
92 if (!arch_is_coherent()) 99 if (!arch_is_coherent())
93 dma_cache_maint_page(page, off, size, dir); 100 ___dma_page_cpu_to_dev(page, off, size, dir);
94} 101}
95 102
96static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
97 size_t size, enum dma_data_direction dir) 104 size_t size, enum dma_data_direction dir)
98{ 105{
99 /* nothing to do */ 106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
100} 111}
101 112
102/* 113/*
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a316c9459526..bbf87880b915 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,7 +404,7 @@ EXPORT_SYMBOL(dma_free_coherent);
404 * platforms with CONFIG_DMABOUNCE. 404 * platforms with CONFIG_DMABOUNCE.
405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
406 */ 406 */
407void dma_cache_maint(const void *start, size_t size, int direction) 407static void dma_cache_maint(const void *start, size_t size, int direction)
408{ 408{
409 void (*inner_op)(const void *, const void *); 409 void (*inner_op)(const void *, const void *);
410 void (*outer_op)(unsigned long, unsigned long); 410 void (*outer_op)(unsigned long, unsigned long);
@@ -431,7 +431,20 @@ void dma_cache_maint(const void *start, size_t size, int direction)
431 inner_op(start, start + size); 431 inner_op(start, start + size);
432 outer_op(__pa(start), __pa(start) + size); 432 outer_op(__pa(start), __pa(start) + size);
433} 433}
434EXPORT_SYMBOL(dma_cache_maint); 434
435void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
436 enum dma_data_direction dir)
437{
438 dma_cache_maint(kaddr, size, dir);
439}
440EXPORT_SYMBOL(___dma_single_cpu_to_dev);
441
442void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
443 enum dma_data_direction dir)
444{
445 /* nothing to do */
446}
447EXPORT_SYMBOL(___dma_single_dev_to_cpu);
435 448
436static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, 449static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
437 size_t size, int direction) 450 size_t size, int direction)
@@ -474,7 +487,7 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
474 outer_op(paddr, paddr + size); 487 outer_op(paddr, paddr + size);
475} 488}
476 489
477void dma_cache_maint_page(struct page *page, unsigned long offset, 490static void dma_cache_maint_page(struct page *page, unsigned long offset,
478 size_t size, int dir) 491 size_t size, int dir)
479{ 492{
480 /* 493 /*
@@ -499,7 +512,20 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
499 left -= len; 512 left -= len;
500 } while (left); 513 } while (left);
501} 514}
502EXPORT_SYMBOL(dma_cache_maint_page); 515
516void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
517 size_t size, enum dma_data_direction dir)
518{
519 dma_cache_maint_page(page, off, size, dir);
520}
521EXPORT_SYMBOL(___dma_page_cpu_to_dev);
522
523void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
524 size_t size, enum dma_data_direction dir)
525{
526 /* nothing to do */
527}
528EXPORT_SYMBOL(___dma_page_dev_to_cpu);
503 529
504/** 530/**
505 * dma_map_sg - map a set of SG buffers for streaming mode DMA 531 * dma_map_sg - map a set of SG buffers for streaming mode DMA