aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt24
-rw-r--r--arch/arm/include/asm/cacheflush.h10
-rw-r--r--arch/parisc/include/asm/cacheflush.h12
-rw-r--r--arch/sh/include/asm/cacheflush.h8
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c30
-rw-r--r--include/linux/highmem.h6
6 files changed, 89 insertions, 1 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index da42ab414c48..b231414bb8bc 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -377,3 +377,27 @@ maps this page at its virtual address.
377 All the functionality of flush_icache_page can be implemented in 377 All the functionality of flush_icache_page can be implemented in
378 flush_dcache_page and update_mmu_cache. In 2.7 the hope is to 378 flush_dcache_page and update_mmu_cache. In 2.7 the hope is to
379 remove this interface completely. 379 remove this interface completely.
380
381The final category of APIs is for I/O to deliberately aliased address
382ranges inside the kernel. Such aliases are set up by use of the
383vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O
384subsystem assumes that the user mapping and kernel offset mapping are
385the only aliases. This isn't true for vmap aliases, so anything in
386the kernel trying to do I/O to vmap areas must manually manage
387coherency. It must do this by flushing the vmap range before doing
388I/O and invalidating it after the I/O returns.
389
390 void flush_kernel_vmap_range(void *vaddr, int size)
391 flushes the kernel cache for a given virtual address range in
392 the vmap area. This is to make sure that any data the kernel
393 modified in the vmap range is made visible to the physical
394 page. The design is to make this area safe to perform I/O on.
395 Note that this API does *not* also flush the offset map alias
396 of the area.
397
398 void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates
399 the cache for a given virtual address range in the vmap area
400 which prevents the processor from making the cache stale by
401 speculatively reading data while the I/O was occurring to the
402 physical pages. This is only necessary for data reads into the
403 vmap area.
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 8113bb5fb66e..5fe4a2ad7fa3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -447,6 +447,16 @@ static inline void __flush_icache_all(void)
447 : "r" (0)); 447 : "r" (0));
448#endif 448#endif
449} 449}
450static inline void flush_kernel_vmap_range(void *addr, int size)
451{
452 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
453 __cpuc_flush_dcache_area(addr, (size_t)size);
454}
455static inline void invalidate_kernel_vmap_range(void *addr, int size)
456{
457 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
458 __cpuc_flush_dcache_area(addr, (size_t)size);
459}
450 460
451#define ARCH_HAS_FLUSH_ANON_PAGE 461#define ARCH_HAS_FLUSH_ANON_PAGE
452static inline void flush_anon_page(struct vm_area_struct *vma, 462static inline void flush_anon_page(struct vm_area_struct *vma,
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 7a73b615c23d..477277739da5 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm);
38 38
39#define flush_kernel_dcache_range(start,size) \ 39#define flush_kernel_dcache_range(start,size) \
40 flush_kernel_dcache_range_asm((start), (start)+(size)); 40 flush_kernel_dcache_range_asm((start), (start)+(size));
41/* vmap range flushes and invalidates. Architecturally, we don't need
42 * the invalidate, because the CPU should refuse to speculate once an
43 * area has been flushed, so invalidate is left empty */
44static inline void flush_kernel_vmap_range(void *vaddr, int size)
45{
46 unsigned long start = (unsigned long)vaddr;
47
48 flush_kernel_dcache_range_asm(start, start + size);
49}
50static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
51{
52}
41 53
42#define flush_cache_vmap(start, end) flush_cache_all() 54#define flush_cache_vmap(start, end) flush_cache_all()
43#define flush_cache_vunmap(start, end) flush_cache_all() 55#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index dda96eb3e7c0..da3ebec921a7 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -63,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) 63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
64 __flush_anon_page(page, vmaddr); 64 __flush_anon_page(page, vmaddr);
65} 65}
66static inline void flush_kernel_vmap_range(void *addr, int size)
67{
68 __flush_wback_region(addr, size);
69}
70static inline void invalidate_kernel_vmap_range(void *addr, int size)
71{
72 __flush_invalidate_region(addr, size);
73}
66 74
67#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 75#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
68static inline void flush_kernel_dcache_page(struct page *page) 76static inline void flush_kernel_dcache_page(struct page *page)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 77b8be81c769..6f3ebb634b8b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
76#define xfs_buf_deallocate(bp) \ 76#define xfs_buf_deallocate(bp) \
77 kmem_zone_free(xfs_buf_zone, (bp)); 77 kmem_zone_free(xfs_buf_zone, (bp));
78 78
79static inline int
80xfs_buf_is_vmapped(
81 struct xfs_buf *bp)
82{
83 /*
84 * Return true if the buffer is vmapped.
85 *
86 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
87 * code is clever enough to know it doesn't have to map a single page,
88 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
89 */
90 return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
91}
92
93static inline int
94xfs_buf_vmap_len(
95 struct xfs_buf *bp)
96{
97 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
98}
99
79/* 100/*
80 * Page Region interfaces. 101 * Page Region interfaces.
81 * 102 *
@@ -314,7 +335,7 @@ xfs_buf_free(
314 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 335 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
315 uint i; 336 uint i;
316 337
317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 338 if (xfs_buf_is_vmapped(bp))
318 free_address(bp->b_addr - bp->b_offset); 339 free_address(bp->b_addr - bp->b_offset);
319 340
320 for (i = 0; i < bp->b_page_count; i++) { 341 for (i = 0; i < bp->b_page_count; i++) {
@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
1107 1128
1108 xfs_buf_ioerror(bp, -error); 1129 xfs_buf_ioerror(bp, -error);
1109 1130
1131 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1132 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1133
1110 do { 1134 do {
1111 struct page *page = bvec->bv_page; 1135 struct page *page = bvec->bv_page;
1112 1136
@@ -1216,6 +1240,10 @@ next_chunk:
1216 1240
1217submit_io: 1241submit_io:
1218 if (likely(bio->bi_size)) { 1242 if (likely(bio->bi_size)) {
1243 if (xfs_buf_is_vmapped(bp)) {
1244 flush_kernel_vmap_range(bp->b_addr,
1245 xfs_buf_vmap_len(bp));
1246 }
1219 submit_bio(rw, bio); 1247 submit_bio(rw, bio);
1220 if (size) 1248 if (size)
1221 goto next_chunk; 1249 goto next_chunk;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ab2cc20e21a5..74152c08ad07 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -17,6 +17,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
17static inline void flush_kernel_dcache_page(struct page *page) 17static inline void flush_kernel_dcache_page(struct page *page)
18{ 18{
19} 19}
20static inline void flush_kernel_vmap_range(void *vaddr, int size)
21{
22}
23static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
24{
25}
20#endif 26#endif
21 27
22#include <asm/kmap_types.h> 28#include <asm/kmap_types.h>