aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2009-03-12 22:52:09 -0400
committerNicolas Pitre <nico@cam.org>2009-03-15 21:01:21 -0400
commit43377453af83b8ff8c1c731da1508bd6b84ebfea (patch)
tree42a55f4d1856ced05d9e21f8619005e8b76694c3 /arch/arm/include/asm/dma-mapping.h
parent3297e760776af18a26bf30046cbaaae2e730c5c2 (diff)
[ARM] introduce dma_cache_maint_page()
This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 22cb14ec3438..59fa762e9c66 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,6 +57,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
58 */ 58 */
59extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 59extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
60extern void dma_cache_maint_page(struct page *page, unsigned long offset,
61 size_t size, int rw);
60 62
61/* 63/*
62 * Return whether the given device DMA address mask can be supported 64 * Return whether the given device DMA address mask can be supported
@@ -316,7 +318,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
316 BUG_ON(!valid_dma_direction(dir)); 318 BUG_ON(!valid_dma_direction(dir));
317 319
318 if (!arch_is_coherent()) 320 if (!arch_is_coherent())
319 dma_cache_maint(page_address(page) + offset, size, dir); 321 dma_cache_maint_page(page, offset, size, dir);
320 322
321 return page_to_dma(dev, page) + offset; 323 return page_to_dma(dev, page) + offset;
322} 324}