diff options
Diffstat (limited to 'include/asm-sh64')
-rw-r--r-- | include/asm-sh64/dma-mapping.h | 6 | ||||
-rw-r--r-- | include/asm-sh64/io.h | 48 |
2 files changed, 5 insertions, 49 deletions
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h index de4309960207..e661857f98dc 100644 --- a/include/asm-sh64/dma-mapping.h +++ b/include/asm-sh64/dma-mapping.h | |||
@@ -42,7 +42,11 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
42 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 42 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
43 | enum dma_data_direction dir) | 43 | enum dma_data_direction dir) |
44 | { | 44 | { |
45 | dma_cache_wback_inv((unsigned long)vaddr, size); | 45 | unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK; |
46 | unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK; | ||
47 | |||
48 | for (; s <= e; s += L1_CACHE_BYTES) | ||
49 | asm volatile ("ocbp %0, 0" : : "r" (s)); | ||
46 | } | 50 | } |
47 | 51 | ||
48 | static inline dma_addr_t dma_map_single(struct device *dev, | 52 | static inline dma_addr_t dma_map_single(struct device *dev, |
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h index 3de3ad99f457..7bd7314d38c2 100644 --- a/include/asm-sh64/io.h +++ b/include/asm-sh64/io.h | |||
@@ -182,54 +182,6 @@ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* n | |||
182 | extern void onchip_unmap(unsigned long vaddr); | 182 | extern void onchip_unmap(unsigned long vaddr); |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * The caches on some architectures aren't dma-coherent and have need to | ||
186 | * handle this in software. There are three types of operations that | ||
187 | * can be applied to dma buffers. | ||
188 | * | ||
189 | * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by | ||
190 | * writing the content of the caches back to memory, if necessary. | ||
191 | * The function also invalidates the affected part of the caches as | ||
192 | * necessary before DMA transfers from outside to memory. | ||
193 | * - dma_cache_inv(start, size) invalidates the affected parts of the | ||
194 | * caches. Dirty lines of the caches may be written back or simply | ||
195 | * be discarded. This operation is necessary before dma operations | ||
196 | * to the memory. | ||
197 | * - dma_cache_wback(start, size) writes back any dirty lines but does | ||
198 | * not invalidate the cache. This can be used before DMA reads from | ||
199 | * memory, | ||
200 | */ | ||
201 | |||
202 | static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size) | ||
203 | { | ||
204 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
205 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
206 | |||
207 | for (; s <= e; s += L1_CACHE_BYTES) | ||
208 | asm volatile ("ocbp %0, 0" : : "r" (s)); | ||
209 | } | ||
210 | |||
211 | static __inline__ void dma_cache_inv (unsigned long start, unsigned long size) | ||
212 | { | ||
213 | // Note that caller has to be careful with overzealous | ||
214 | // invalidation should there be partial cache lines at the extremities | ||
215 | // of the specified range | ||
216 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
217 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
218 | |||
219 | for (; s <= e; s += L1_CACHE_BYTES) | ||
220 | asm volatile ("ocbi %0, 0" : : "r" (s)); | ||
221 | } | ||
222 | |||
223 | static __inline__ void dma_cache_wback (unsigned long start, unsigned long size) | ||
224 | { | ||
225 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
226 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
227 | |||
228 | for (; s <= e; s += L1_CACHE_BYTES) | ||
229 | asm volatile ("ocbwb %0, 0" : : "r" (s)); | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 185 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
234 | * access | 186 | * access |
235 | */ | 187 | */ |