aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-29 14:50:59 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-30 06:01:36 -0400
commit309dbbabee7b19e003e1ba4b98f43d28f390a84e (patch)
treeba748b84c0573f5eb151a581e333b95010576521
parent0e18b5d7c6339311f1e32e7b186ae3556c5b6d33 (diff)
[ARM] dma: don't touch cache on dma_*_for_cpu()
As per the dma_unmap_* calls, we don't touch the cache when a DMA buffer transitions from device to CPU ownership. Presently, no problems have been identified with speculative cache prefetching which in itself is a new feature in later architectures. We may have to revisit the DMA API later for these architectures anyway. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/dma-mapping.h6
-rw-r--r--arch/arm/mm/dma-mapping.c8
2 files changed, 3 insertions, 11 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2544a087c213..ad62020763f1 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -376,11 +376,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
376{ 376{
377 BUG_ON(!valid_dma_direction(dir)); 377 BUG_ON(!valid_dma_direction(dir));
378 378
379 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 379 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
380 return;
381
382 if (!arch_is_coherent())
383 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
384} 380}
385 381
386static inline void dma_sync_single_range_for_device(struct device *dev, 382static inline void dma_sync_single_range_for_device(struct device *dev,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0e28cf33f7dd..67960017dc8f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -585,12 +585,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
585 int i; 585 int i;
586 586
587 for_each_sg(sg, s, nents, i) { 587 for_each_sg(sg, s, nents, i) {
588 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 588 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
589 sg_dma_len(s), dir)) 589 sg_dma_len(s), dir);
590 continue;
591
592 if (!arch_is_coherent())
593 dma_cache_maint(sg_virt(s), s->length, dir);
594 } 590 }
595} 591}
596EXPORT_SYMBOL(dma_sync_sg_for_cpu); 592EXPORT_SYMBOL(dma_sync_sg_for_cpu);