aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-12 13:03:48 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-12 19:09:09 -0400
commit1522ac3ec95ff0230e7aa516f86b674fdf72866c (patch)
tree77444039536e70b3e9fbb38f686104cb5054aba3 /arch/arm/mm/dma-mapping.c
parent305b07680f6c6a7e59f996c5bd85f009caff5bb1 (diff)
[ARM] Fix virtual to physical translation macro corner cases
The current use of these macros works well when the conversion is entirely linear. In this case, we can be assured that the following holds true: __va(p + s) - s = __va(p) However, this is not always the case, especially when there is a non-linear conversion (eg, when there is a 3.5GB hole in memory.) In this case, if 's' is the size of the region (eg, PAGE_SIZE) and 'p' is the final page, the above is most definitely not true. So, we must ensure that __va() and __pa() are only used with valid kernel direct mapped RAM addresses. This patch tweaks the code to achieve this. Tested-by: Charles Moschel <fred99@carolina.rr.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 310e479309ef..f1ef5613ccd4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -490,26 +490,30 @@ core_initcall(consistent_init);
490 */ 490 */
491void dma_cache_maint(const void *start, size_t size, int direction) 491void dma_cache_maint(const void *start, size_t size, int direction)
492{ 492{
493 const void *end = start + size; 493 void (*inner_op)(const void *, const void *);
494 void (*outer_op)(unsigned long, unsigned long);
494 495
495 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); 496 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
496 497
497 switch (direction) { 498 switch (direction) {
498 case DMA_FROM_DEVICE: /* invalidate only */ 499 case DMA_FROM_DEVICE: /* invalidate only */
499 dmac_inv_range(start, end); 500 inner_op = dmac_inv_range;
500 outer_inv_range(__pa(start), __pa(end)); 501 outer_op = outer_inv_range;
501 break; 502 break;
502 case DMA_TO_DEVICE: /* writeback only */ 503 case DMA_TO_DEVICE: /* writeback only */
503 dmac_clean_range(start, end); 504 inner_op = dmac_clean_range;
504 outer_clean_range(__pa(start), __pa(end)); 505 outer_op = outer_clean_range;
505 break; 506 break;
506 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 507 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
507 dmac_flush_range(start, end); 508 inner_op = dmac_flush_range;
508 outer_flush_range(__pa(start), __pa(end)); 509 outer_op = outer_flush_range;
509 break; 510 break;
510 default: 511 default:
511 BUG(); 512 BUG();
512 } 513 }
514
515 inner_op(start, start + size);
516 outer_op(__pa(start), __pa(start) + size);
513} 517}
514EXPORT_SYMBOL(dma_cache_maint); 518EXPORT_SYMBOL(dma_cache_maint);
515 519