aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2009-03-12 22:52:09 -0400
committerNicolas Pitre <nico@cam.org>2009-03-15 21:01:21 -0400
commit43377453af83b8ff8c1c731da1508bd6b84ebfea (patch)
tree42a55f4d1856ced05d9e21f8619005e8b76694c3 /arch/arm/mm/dma-mapping.c
parent3297e760776af18a26bf30046cbaaae2e730c5c2 (diff)
[ARM] introduce dma_cache_maint_page()
This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c72
1 files changed, 71 insertions, 1 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1ef5613ccd4..510c179b0ac8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20 20
21#include <asm/memory.h> 21#include <asm/memory.h>
22#include <asm/highmem.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
24#include <asm/sizes.h> 25#include <asm/sizes.h>
@@ -517,6 +518,74 @@ void dma_cache_maint(const void *start, size_t size, int direction)
517} 518}
518EXPORT_SYMBOL(dma_cache_maint); 519EXPORT_SYMBOL(dma_cache_maint);
519 520
521static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
522 size_t size, int direction)
523{
524 void *vaddr;
525 unsigned long paddr;
526 void (*inner_op)(const void *, const void *);
527 void (*outer_op)(unsigned long, unsigned long);
528
529 switch (direction) {
530 case DMA_FROM_DEVICE: /* invalidate only */
531 inner_op = dmac_inv_range;
532 outer_op = outer_inv_range;
533 break;
534 case DMA_TO_DEVICE: /* writeback only */
535 inner_op = dmac_clean_range;
536 outer_op = outer_clean_range;
537 break;
538 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
539 inner_op = dmac_flush_range;
540 outer_op = outer_flush_range;
541 break;
542 default:
543 BUG();
544 }
545
546 if (!PageHighMem(page)) {
547 vaddr = page_address(page) + offset;
548 inner_op(vaddr, vaddr + size);
549 } else {
550 vaddr = kmap_high_get(page);
551 if (vaddr) {
552 vaddr += offset;
553 inner_op(vaddr, vaddr + size);
554 kunmap_high(page);
555 }
556 }
557
558 paddr = page_to_phys(page) + offset;
559 outer_op(paddr, paddr + size);
560}
561
562void dma_cache_maint_page(struct page *page, unsigned long offset,
563 size_t size, int dir)
564{
565 /*
566 * A single sg entry may refer to multiple physically contiguous
567 * pages. But we still need to process highmem pages individually.
568 * If highmem is not configured then the bulk of this loop gets
569 * optimized out.
570 */
571 size_t left = size;
572 do {
573 size_t len = left;
574 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
575 if (offset >= PAGE_SIZE) {
576 page += offset / PAGE_SIZE;
577 offset %= PAGE_SIZE;
578 }
579 len = PAGE_SIZE - offset;
580 }
581 dma_cache_maint_contiguous(page, offset, len, dir);
582 offset = 0;
583 page++;
584 left -= len;
585 } while (left);
586}
587EXPORT_SYMBOL(dma_cache_maint_page);
588
520/** 589/**
521 * dma_map_sg - map a set of SG buffers for streaming mode DMA 590 * dma_map_sg - map a set of SG buffers for streaming mode DMA
522 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 591 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -614,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
614 continue; 683 continue;
615 684
616 if (!arch_is_coherent()) 685 if (!arch_is_coherent())
617 dma_cache_maint(sg_virt(s), s->length, dir); 686 dma_cache_maint_page(sg_page(s), s->offset,
687 s->length, dir);
618 } 688 }
619} 689}
620EXPORT_SYMBOL(dma_sync_sg_for_device); 690EXPORT_SYMBOL(dma_sync_sg_for_device);