diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-11-24 11:27:17 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-15 10:22:11 -0500 |
commit | 4ea0d7371e808628d11154b0d44140b70f05b998 (patch) | |
tree | 7673b2fc850475a587144a02c9f568a2831f0c2f /arch/arm/include | |
parent | 18eabe2347ae7a11b3db768695913724166dfb0e (diff) |
ARM: dma-mapping: push buffer ownership down into dma-mapping.c
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 39 |
1 files changed, 25 insertions, 14 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e850f5c1607b..256ee1c9f51a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -57,46 +57,57 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Private support functions: these are not part of the API and are | ||
61 | * liable to change. Drivers must not use these. | ||
62 | */ | ||
63 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); | ||
64 | extern void dma_cache_maint_page(struct page *page, unsigned long offset, | ||
65 | size_t size, int rw); | ||
66 | |||
67 | /* | ||
68 | * The DMA API is built upon the notion of "buffer ownership". A buffer | 60 | * The DMA API is built upon the notion of "buffer ownership". A buffer |
69 | * is either exclusively owned by the CPU (and therefore may be accessed | 61 | * is either exclusively owned by the CPU (and therefore may be accessed |
70 | * by it) or exclusively owned by the DMA device. These helper functions | 62 | * by it) or exclusively owned by the DMA device. These helper functions |
71 | * represent the transitions between these two ownership states. | 63 | * represent the transitions between these two ownership states. |
72 | * | 64 | * |
73 | * As above, these are private support functions and not part of the API. | 65 | * Note, however, that on later ARMs, this notion does not work due to |
74 | * Drivers must not use these. | 66 | * speculative prefetches. We model our approach on the assumption that |
67 | * the CPU does do speculative prefetches, which means we clean caches | ||
68 | * before transfers and delay cache invalidation until transfer completion. | ||
69 | * | ||
70 | * Private support functions: these are not part of the API and are | ||
71 | * liable to change. Drivers must not use these. | ||
75 | */ | 72 | */ |
76 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | 73 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, |
77 | enum dma_data_direction dir) | 74 | enum dma_data_direction dir) |
78 | { | 75 | { |
76 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
77 | enum dma_data_direction); | ||
78 | |||
79 | if (!arch_is_coherent()) | 79 | if (!arch_is_coherent()) |
80 | dma_cache_maint(kaddr, size, dir); | 80 | ___dma_single_cpu_to_dev(kaddr, size, dir); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | 83 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, |
84 | enum dma_data_direction dir) | 84 | enum dma_data_direction dir) |
85 | { | 85 | { |
86 | /* nothing to do */ | 86 | extern void ___dma_single_dev_to_cpu(const void *, size_t, |
87 | enum dma_data_direction); | ||
88 | |||
89 | if (!arch_is_coherent()) | ||
90 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
87 | } | 91 | } |
88 | 92 | ||
89 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | 93 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, |
90 | size_t size, enum dma_data_direction dir) | 94 | size_t size, enum dma_data_direction dir) |
91 | { | 95 | { |
96 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
97 | size_t, enum dma_data_direction); | ||
98 | |||
92 | if (!arch_is_coherent()) | 99 | if (!arch_is_coherent()) |
93 | dma_cache_maint_page(page, off, size, dir); | 100 | ___dma_page_cpu_to_dev(page, off, size, dir); |
94 | } | 101 | } |
95 | 102 | ||
96 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | 103 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
97 | size_t size, enum dma_data_direction dir) | 104 | size_t size, enum dma_data_direction dir) |
98 | { | 105 | { |
99 | /* nothing to do */ | 106 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, |
107 | size_t, enum dma_data_direction); | ||
108 | |||
109 | if (!arch_is_coherent()) | ||
110 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
100 | } | 111 | } |
101 | 112 | ||
102 | /* | 113 | /* |