diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-10-09 09:17:01 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-10-12 18:43:45 -0400 |
commit | 84aa462e2c2cd1b921f6b8e283f8d41666e02e8e (patch) | |
tree | 9fd1eb1df658b44f706c6b1768b335b73774fc4b /include/asm-arm/dma-mapping.h | |
parent | 353ba84acdd551c737ac71577322393fceb969f0 (diff) |
[ARM] Rename consistent_sync() as dma_cache_maint()
consistent_sync() is used to handle the cache maintainence issues with
DMA operations. Since we've now removed the misuse of this function
from the two MTD drivers, rename it to prevent future mis-use.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/dma-mapping.h')
-rw-r--r-- | include/asm-arm/dma-mapping.h | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index c8b5d0db0cf0..678134bf2475 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h | |||
@@ -17,7 +17,7 @@ | |||
17 | * platforms with CONFIG_DMABOUNCE. | 17 | * platforms with CONFIG_DMABOUNCE. |
18 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 18 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) |
19 | */ | 19 | */ |
20 | extern void consistent_sync(const void *kaddr, size_t size, int rw); | 20 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Return whether the given device DMA address mask can be supported | 23 | * Return whether the given device DMA address mask can be supported |
@@ -165,7 +165,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
165 | enum dma_data_direction dir) | 165 | enum dma_data_direction dir) |
166 | { | 166 | { |
167 | if (!arch_is_coherent()) | 167 | if (!arch_is_coherent()) |
168 | consistent_sync(cpu_addr, size, dir); | 168 | dma_cache_maint(cpu_addr, size, dir); |
169 | 169 | ||
170 | return virt_to_dma(dev, (unsigned long)cpu_addr); | 170 | return virt_to_dma(dev, (unsigned long)cpu_addr); |
171 | } | 171 | } |
@@ -278,7 +278,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
278 | virt = page_address(sg->page) + sg->offset; | 278 | virt = page_address(sg->page) + sg->offset; |
279 | 279 | ||
280 | if (!arch_is_coherent()) | 280 | if (!arch_is_coherent()) |
281 | consistent_sync(virt, sg->length, dir); | 281 | dma_cache_maint(virt, sg->length, dir); |
282 | } | 282 | } |
283 | 283 | ||
284 | return nents; | 284 | return nents; |
@@ -334,7 +334,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | |||
334 | enum dma_data_direction dir) | 334 | enum dma_data_direction dir) |
335 | { | 335 | { |
336 | if (!arch_is_coherent()) | 336 | if (!arch_is_coherent()) |
337 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | 337 | dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); |
338 | } | 338 | } |
339 | 339 | ||
340 | static inline void | 340 | static inline void |
@@ -342,7 +342,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | |||
342 | enum dma_data_direction dir) | 342 | enum dma_data_direction dir) |
343 | { | 343 | { |
344 | if (!arch_is_coherent()) | 344 | if (!arch_is_coherent()) |
345 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | 345 | dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); |
346 | } | 346 | } |
347 | #else | 347 | #else |
348 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); | 348 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); |
@@ -373,7 +373,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |||
373 | for (i = 0; i < nents; i++, sg++) { | 373 | for (i = 0; i < nents; i++, sg++) { |
374 | char *virt = page_address(sg->page) + sg->offset; | 374 | char *virt = page_address(sg->page) + sg->offset; |
375 | if (!arch_is_coherent()) | 375 | if (!arch_is_coherent()) |
376 | consistent_sync(virt, sg->length, dir); | 376 | dma_cache_maint(virt, sg->length, dir); |
377 | } | 377 | } |
378 | } | 378 | } |
379 | 379 | ||
@@ -386,7 +386,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |||
386 | for (i = 0; i < nents; i++, sg++) { | 386 | for (i = 0; i < nents; i++, sg++) { |
387 | char *virt = page_address(sg->page) + sg->offset; | 387 | char *virt = page_address(sg->page) + sg->offset; |
388 | if (!arch_is_coherent()) | 388 | if (!arch_is_coherent()) |
389 | consistent_sync(virt, sg->length, dir); | 389 | dma_cache_maint(virt, sg->length, dir); |
390 | } | 390 | } |
391 | } | 391 | } |
392 | #else | 392 | #else |