aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2011-01-20 13:54:18 -0500
committerJames Bottomley <James.Bottomley@suse.de>2011-02-09 12:22:21 -0500
commit8e1964a98920100f113ad26f78220ea706dbfa2b (patch)
treeeff1ff22212732ee39eab6919123886aea3fa1d4 /arch/parisc
parent100b33c8bd8a3235fd0b7948338d6cbb3db3c63d (diff)
[PARISC] fix vmap flush/invalidate
On parisc, we never implemented invalidate_kernel_vmap_range() because it was unnecessary for the xfs use case. However, we do need to implement an invalidate for the opposite use case (which occurred in a recent NFS change) where the user wants to read through the vmap range and write via the kernel address. There's an additional complexity to this in that if the page has no userspace mappings, it might have dirty cache lines in the kernel (indicated by the PG_dcache_dirty bit). In order to get full coherency, we need to flush these pages through the kernel mapping before invalidating the vmap range. Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/cacheflush.h24
1 files changed, 17 insertions, 7 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f388a85bba11..7344e1d304af 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -37,6 +37,13 @@ void flush_cache_all_local(void);
37void flush_cache_all(void); 37void flush_cache_all(void);
38void flush_cache_mm(struct mm_struct *mm); 38void flush_cache_mm(struct mm_struct *mm);
39 39
40#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
41void flush_kernel_dcache_page_addr(void *addr);
42static inline void flush_kernel_dcache_page(struct page *page)
43{
44 flush_kernel_dcache_page_addr(page_address(page));
45}
46
40#define flush_kernel_dcache_range(start,size) \ 47#define flush_kernel_dcache_range(start,size) \
41 flush_kernel_dcache_range_asm((start), (start)+(size)); 48 flush_kernel_dcache_range_asm((start), (start)+(size));
42/* vmap range flushes and invalidates. Architecturally, we don't need 49/* vmap range flushes and invalidates. Architecturally, we don't need
@@ -50,6 +57,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size)
50} 57}
51static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 58static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
52{ 59{
60 unsigned long start = (unsigned long)vaddr;
61 void *cursor = vaddr;
62
63 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
64 struct page *page = vmalloc_to_page(cursor);
65
66 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
67 flush_kernel_dcache_page(page);
68 }
69 flush_kernel_dcache_range_asm(start, start + size);
53} 70}
54 71
55#define flush_cache_vmap(start, end) flush_cache_all() 72#define flush_cache_vmap(start, end) flush_cache_all()
@@ -98,13 +115,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
98 flush_user_dcache_page(vmaddr); 115 flush_user_dcache_page(vmaddr);
99} 116}
100 117
101#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
102void flush_kernel_dcache_page_addr(void *addr);
103static inline void flush_kernel_dcache_page(struct page *page)
104{
105 flush_kernel_dcache_page_addr(page_address(page));
106}
107
108#ifdef CONFIG_DEBUG_RODATA 118#ifdef CONFIG_DEBUG_RODATA
109void mark_rodata_ro(void); 119void mark_rodata_ro(void);
110#endif 120#endif