diff options
author | James Bottomley <James.Bottomley@suse.de> | 2010-01-25 12:42:20 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-01-25 12:42:20 -0500 |
commit | 9df5f74194871ebd0e51ef5ad2eca5084acaaaba (patch) | |
tree | e167b9ec3a7948e0706754de4a303dc018ec9817 /include/linux/highmem.h | |
parent | 6b7b284958d47b77d06745b36bc7f36dab769d9b (diff) |
mm: add coherence API for DMA to vmalloc/vmap areas
On Virtually Indexed architectures (which don't do automatic alias
resolution in their caches), we have to flush via the correct
virtual address to prepare pages for DMA. On some architectures
(like arm) we cannot prevent the CPU from doing data movein along
the alias (and thus giving stale read data), so we not only have to
introduce a flush API to push dirty cache lines out, but also an invalidate
API to kill inconsistent cache lines that may have moved in before
DMA changed the data
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'include/linux/highmem.h')
-rw-r--r-- | include/linux/highmem.h | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 211ff4497269..adfe1013b2bd 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -17,6 +17,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page | |||
17 | static inline void flush_kernel_dcache_page(struct page *page) | 17 | static inline void flush_kernel_dcache_page(struct page *page) |
18 | { | 18 | { |
19 | } | 19 | } |
20 | static inline void flush_kernel_vmap_range(void *vaddr, int size) | ||
21 | { | ||
22 | } | ||
23 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | ||
24 | { | ||
25 | } | ||
20 | #endif | 26 | #endif |
21 | 27 | ||
22 | #include <asm/kmap_types.h> | 28 | #include <asm/kmap_types.h> |