aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2009-03-12 22:52:09 -0400
committerNicolas Pitre <nico@cam.org>2009-03-15 21:01:21 -0400
commit43377453af83b8ff8c1c731da1508bd6b84ebfea (patch)
tree42a55f4d1856ced05d9e21f8619005e8b76694c3 /arch
parent3297e760776af18a26bf30046cbaaae2e730c5c2 (diff)
[ARM] introduce dma_cache_maint_page()
This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/dma-mapping.h4
-rw-r--r--arch/arm/include/asm/highmem.h3
-rw-r--r--arch/arm/mm/dma-mapping.c72
3 files changed, 77 insertions, 2 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 22cb14ec3438..59fa762e9c66 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,6 +57,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
58 */ 58 */
59extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 59extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
60extern void dma_cache_maint_page(struct page *page, unsigned long offset,
61 size_t size, int rw);
60 62
61/* 63/*
62 * Return whether the given device DMA address mask can be supported 64 * Return whether the given device DMA address mask can be supported
@@ -316,7 +318,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
316 BUG_ON(!valid_dma_direction(dir)); 318 BUG_ON(!valid_dma_direction(dir));
317 319
318 if (!arch_is_coherent()) 320 if (!arch_is_coherent())
319 dma_cache_maint(page_address(page) + offset, size, dir); 321 dma_cache_maint_page(page, offset, size, dir);
320 322
321 return page_to_dma(dev, page) + offset; 323 return page_to_dma(dev, page) + offset;
322} 324}
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 023d5b374544..7f36d00600b4 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -15,7 +15,10 @@
15 15
16extern pte_t *pkmap_page_table; 16extern pte_t *pkmap_page_table;
17 17
18#define ARCH_NEEDS_KMAP_HIGH_GET
19
18extern void *kmap_high(struct page *page); 20extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page);
19extern void kunmap_high(struct page *page); 22extern void kunmap_high(struct page *page);
20 23
21extern void *kmap(struct page *page); 24extern void *kmap(struct page *page);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1ef5613ccd4..510c179b0ac8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20 20
21#include <asm/memory.h> 21#include <asm/memory.h>
22#include <asm/highmem.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
24#include <asm/sizes.h> 25#include <asm/sizes.h>
@@ -517,6 +518,74 @@ void dma_cache_maint(const void *start, size_t size, int direction)
517} 518}
518EXPORT_SYMBOL(dma_cache_maint); 519EXPORT_SYMBOL(dma_cache_maint);
519 520
521static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
522 size_t size, int direction)
523{
524 void *vaddr;
525 unsigned long paddr;
526 void (*inner_op)(const void *, const void *);
527 void (*outer_op)(unsigned long, unsigned long);
528
529 switch (direction) {
530 case DMA_FROM_DEVICE: /* invalidate only */
531 inner_op = dmac_inv_range;
532 outer_op = outer_inv_range;
533 break;
534 case DMA_TO_DEVICE: /* writeback only */
535 inner_op = dmac_clean_range;
536 outer_op = outer_clean_range;
537 break;
538 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
539 inner_op = dmac_flush_range;
540 outer_op = outer_flush_range;
541 break;
542 default:
543 BUG();
544 }
545
546 if (!PageHighMem(page)) {
547 vaddr = page_address(page) + offset;
548 inner_op(vaddr, vaddr + size);
549 } else {
550 vaddr = kmap_high_get(page);
551 if (vaddr) {
552 vaddr += offset;
553 inner_op(vaddr, vaddr + size);
554 kunmap_high(page);
555 }
556 }
557
558 paddr = page_to_phys(page) + offset;
559 outer_op(paddr, paddr + size);
560}
561
562void dma_cache_maint_page(struct page *page, unsigned long offset,
563 size_t size, int dir)
564{
565 /*
566 * A single sg entry may refer to multiple physically contiguous
567 * pages. But we still need to process highmem pages individually.
568 * If highmem is not configured then the bulk of this loop gets
569 * optimized out.
570 */
571 size_t left = size;
572 do {
573 size_t len = left;
574 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
575 if (offset >= PAGE_SIZE) {
576 page += offset / PAGE_SIZE;
577 offset %= PAGE_SIZE;
578 }
579 len = PAGE_SIZE - offset;
580 }
581 dma_cache_maint_contiguous(page, offset, len, dir);
582 offset = 0;
583 page++;
584 left -= len;
585 } while (left);
586}
587EXPORT_SYMBOL(dma_cache_maint_page);
588
520/** 589/**
521 * dma_map_sg - map a set of SG buffers for streaming mode DMA 590 * dma_map_sg - map a set of SG buffers for streaming mode DMA
522 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 591 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -614,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
614 continue; 683 continue;
615 684
616 if (!arch_is_coherent()) 685 if (!arch_is_coherent())
617 dma_cache_maint(sg_virt(s), s->length, dir); 686 dma_cache_maint_page(sg_page(s), s->offset,
687 s->length, dir);
618 } 688 }
619} 689}
620EXPORT_SYMBOL(dma_sync_sg_for_device); 690EXPORT_SYMBOL(dma_sync_sg_for_device);