aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-10-31 12:52:16 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 10:22:25 -0500
commit2ffe2da3e71652d4f4cae19539b5c78c2a239136 (patch)
tree1b69404360a47369c858e54643bab6836015ddbd /arch/arm/mm/dma-mapping.c
parent702b94bff3c50542a6e4ab9a4f4cef093262fe65 (diff)
ARM: dma-mapping: fix for speculative prefetching
ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes DMA cache coherency handling slightly more interesting. Rather than being able to rely upon the CPU not accessing the DMA buffer until DMA has completed, we now must expect that the cache could be loaded with possibly stale data from the DMA buffer. Where DMA involves data being transferred to the device, we clean the cache before handing it over for DMA, otherwise we invalidate the buffer to get rid of potential writebacks. On DMA Completion, if data was transferred from the device, we invalidate the buffer to get rid of any stale speculative prefetches. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c68
1 files changed, 30 insertions, 38 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index efa8efa33f5e..64daef2173bd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent);
404 * platforms with CONFIG_DMABOUNCE. 404 * platforms with CONFIG_DMABOUNCE.
405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
406 */ 406 */
407static void dma_cache_maint(const void *start, size_t size, int direction)
408{
409 void (*outer_op)(unsigned long, unsigned long);
410
411 switch (direction) {
412 case DMA_FROM_DEVICE: /* invalidate only */
413 outer_op = outer_inv_range;
414 break;
415 case DMA_TO_DEVICE: /* writeback only */
416 outer_op = outer_clean_range;
417 break;
418 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
419 outer_op = outer_flush_range;
420 break;
421 default:
422 BUG();
423 }
424
425 outer_op(__pa(start), __pa(start) + size);
426}
427
428void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, 407void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
429 enum dma_data_direction dir) 408 enum dma_data_direction dir)
430{ 409{
410 unsigned long paddr;
411
431 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 412 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
432 413
433 dmac_map_area(kaddr, size, dir); 414 dmac_map_area(kaddr, size, dir);
434 dma_cache_maint(kaddr, size, dir); 415
416 paddr = __pa(kaddr);
417 if (dir == DMA_FROM_DEVICE) {
418 outer_inv_range(paddr, paddr + size);
419 } else {
420 outer_clean_range(paddr, paddr + size);
421 }
422 /* FIXME: non-speculating: flush on bidirectional mappings? */
435} 423}
436EXPORT_SYMBOL(___dma_single_cpu_to_dev); 424EXPORT_SYMBOL(___dma_single_cpu_to_dev);
437 425
@@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
440{ 428{
441 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 429 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
442 430
431 /* FIXME: non-speculating: not required */
432 /* don't bother invalidating if DMA to device */
433 if (dir != DMA_TO_DEVICE) {
434 unsigned long paddr = __pa(kaddr);
435 outer_inv_range(paddr, paddr + size);
436 }
437
443 dmac_unmap_area(kaddr, size, dir); 438 dmac_unmap_area(kaddr, size, dir);
444} 439}
445EXPORT_SYMBOL(___dma_single_dev_to_cpu); 440EXPORT_SYMBOL(___dma_single_dev_to_cpu);
@@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
487 size_t size, enum dma_data_direction dir) 482 size_t size, enum dma_data_direction dir)
488{ 483{
489 unsigned long paddr; 484 unsigned long paddr;
490 void (*outer_op)(unsigned long, unsigned long);
491
492 switch (direction) {
493 case DMA_FROM_DEVICE: /* invalidate only */
494 outer_op = outer_inv_range;
495 break;
496 case DMA_TO_DEVICE: /* writeback only */
497 outer_op = outer_clean_range;
498 break;
499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
500 outer_op = outer_flush_range;
501 break;
502 default:
503 BUG();
504 }
505 485
506 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 486 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
507 487
508 paddr = page_to_phys(page) + off; 488 paddr = page_to_phys(page) + off;
509 outer_op(paddr, paddr + size); 489 if (dir == DMA_FROM_DEVICE) {
490 outer_inv_range(paddr, paddr + size);
491 } else {
492 outer_clean_range(paddr, paddr + size);
493 }
494 /* FIXME: non-speculating: flush on bidirectional mappings? */
510} 495}
511EXPORT_SYMBOL(___dma_page_cpu_to_dev); 496EXPORT_SYMBOL(___dma_page_cpu_to_dev);
512 497
513void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, 498void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
514 size_t size, enum dma_data_direction dir) 499 size_t size, enum dma_data_direction dir)
515{ 500{
501 unsigned long paddr = page_to_phys(page) + off;
502
503 /* FIXME: non-speculating: not required */
504 /* don't bother invalidating if DMA to device */
505 if (dir != DMA_TO_DEVICE)
506 outer_inv_range(paddr, paddr + size);
507
516 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 508 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
517} 509}
518EXPORT_SYMBOL(___dma_page_dev_to_cpu); 510EXPORT_SYMBOL(___dma_page_dev_to_cpu);