aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-10-09 09:17:01 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-10-12 18:43:45 -0400
commit84aa462e2c2cd1b921f6b8e283f8d41666e02e8e (patch)
tree9fd1eb1df658b44f706c6b1768b335b73774fc4b
parent353ba84acdd551c737ac71577322393fceb969f0 (diff)
[ARM] Rename consistent_sync() as dma_cache_maint()
consistent_sync() is used to handle the cache maintainence issues with DMA operations. Since we've now removed the misuse of this function from the two MTD drivers, rename it to prevent future mis-use. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/common/dmabounce.c4
-rw-r--r--arch/arm/mm/consistent.c4
-rw-r--r--include/asm-arm/dma-mapping.h14
3 files changed, 11 insertions, 11 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index b36b1e8a105d..44ab0dad4035 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -263,7 +263,7 @@ map_single(struct device *dev, void *ptr, size_t size,
263 * We don't need to sync the DMA buffer since 263 * We don't need to sync the DMA buffer since
264 * it was allocated via the coherent allocators. 264 * it was allocated via the coherent allocators.
265 */ 265 */
266 consistent_sync(ptr, size, dir); 266 dma_cache_maint(ptr, size, dir);
267 } 267 }
268 268
269 return dma_addr; 269 return dma_addr;
@@ -383,7 +383,7 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
383 * via the coherent allocators. 383 * via the coherent allocators.
384 */ 384 */
385 } else { 385 } else {
386 consistent_sync(dma_to_virt(dev, dma_addr), size, dir); 386 dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir);
387 } 387 }
388} 388}
389 389
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 1f9f94f9af4b..cefdf2f9f26e 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -481,7 +481,7 @@ core_initcall(consistent_init);
481 * platforms with CONFIG_DMABOUNCE. 481 * platforms with CONFIG_DMABOUNCE.
482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
483 */ 483 */
484void consistent_sync(const void *start, size_t size, int direction) 484void dma_cache_maint(const void *start, size_t size, int direction)
485{ 485{
486 const void *end = start + size; 486 const void *end = start + size;
487 487
@@ -504,4 +504,4 @@ void consistent_sync(const void *start, size_t size, int direction)
504 BUG(); 504 BUG();
505 } 505 }
506} 506}
507EXPORT_SYMBOL(consistent_sync); 507EXPORT_SYMBOL(dma_cache_maint);
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index c8b5d0db0cf0..678134bf2475 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -17,7 +17,7 @@
17 * platforms with CONFIG_DMABOUNCE. 17 * platforms with CONFIG_DMABOUNCE.
18 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 18 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
19 */ 19 */
20extern void consistent_sync(const void *kaddr, size_t size, int rw); 20extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
21 21
22/* 22/*
23 * Return whether the given device DMA address mask can be supported 23 * Return whether the given device DMA address mask can be supported
@@ -165,7 +165,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
165 enum dma_data_direction dir) 165 enum dma_data_direction dir)
166{ 166{
167 if (!arch_is_coherent()) 167 if (!arch_is_coherent())
168 consistent_sync(cpu_addr, size, dir); 168 dma_cache_maint(cpu_addr, size, dir);
169 169
170 return virt_to_dma(dev, (unsigned long)cpu_addr); 170 return virt_to_dma(dev, (unsigned long)cpu_addr);
171} 171}
@@ -278,7 +278,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
278 virt = page_address(sg->page) + sg->offset; 278 virt = page_address(sg->page) + sg->offset;
279 279
280 if (!arch_is_coherent()) 280 if (!arch_is_coherent())
281 consistent_sync(virt, sg->length, dir); 281 dma_cache_maint(virt, sg->length, dir);
282 } 282 }
283 283
284 return nents; 284 return nents;
@@ -334,7 +334,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
334 enum dma_data_direction dir) 334 enum dma_data_direction dir)
335{ 335{
336 if (!arch_is_coherent()) 336 if (!arch_is_coherent())
337 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 337 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
338} 338}
339 339
340static inline void 340static inline void
@@ -342,7 +342,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
342 enum dma_data_direction dir) 342 enum dma_data_direction dir)
343{ 343{
344 if (!arch_is_coherent()) 344 if (!arch_is_coherent())
345 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 345 dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
346} 346}
347#else 347#else
348extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); 348extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
@@ -373,7 +373,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
373 for (i = 0; i < nents; i++, sg++) { 373 for (i = 0; i < nents; i++, sg++) {
374 char *virt = page_address(sg->page) + sg->offset; 374 char *virt = page_address(sg->page) + sg->offset;
375 if (!arch_is_coherent()) 375 if (!arch_is_coherent())
376 consistent_sync(virt, sg->length, dir); 376 dma_cache_maint(virt, sg->length, dir);
377 } 377 }
378} 378}
379 379
@@ -386,7 +386,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
386 for (i = 0; i < nents; i++, sg++) { 386 for (i = 0; i < nents; i++, sg++) {
387 char *virt = page_address(sg->page) + sg->offset; 387 char *virt = page_address(sg->page) + sg->offset;
388 if (!arch_is_coherent()) 388 if (!arch_is_coherent())
389 consistent_sync(virt, sg->length, dir); 389 dma_cache_maint(virt, sg->length, dir);
390 } 390 }
391} 391}
392#else 392#else