aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm/cacheflush.h')
-rw-r--r--arch/parisc/include/asm/cacheflush.h44
1 files changed, 29 insertions, 15 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index dba11aedce1b..da601dd34c05 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/tlbflush.h>
6 7
7/* The usual comment is "Caches aren't brain-dead on the <architecture>". 8/* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */ 9 * Unfortunately, that doesn't apply to PA-RISC. */
@@ -26,8 +27,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long);
26void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 27void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
27void flush_kernel_dcache_page_asm(void *); 28void flush_kernel_dcache_page_asm(void *);
28void flush_kernel_icache_page(void *); 29void flush_kernel_icache_page(void *);
29void flush_user_dcache_page(unsigned long);
30void flush_user_icache_page(unsigned long);
31void flush_user_dcache_range(unsigned long, unsigned long); 30void flush_user_dcache_range(unsigned long, unsigned long);
32void flush_user_icache_range(unsigned long, unsigned long); 31void flush_user_icache_range(unsigned long, unsigned long);
33 32
@@ -37,6 +36,13 @@ void flush_cache_all_local(void);
37void flush_cache_all(void); 36void flush_cache_all(void);
38void flush_cache_mm(struct mm_struct *mm); 37void flush_cache_mm(struct mm_struct *mm);
39 38
39#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
40void flush_kernel_dcache_page_addr(void *addr);
41static inline void flush_kernel_dcache_page(struct page *page)
42{
43 flush_kernel_dcache_page_addr(page_address(page));
44}
45
40#define flush_kernel_dcache_range(start,size) \ 46#define flush_kernel_dcache_range(start,size) \
41 flush_kernel_dcache_range_asm((start), (start)+(size)); 47 flush_kernel_dcache_range_asm((start), (start)+(size));
42/* vmap range flushes and invalidates. Architecturally, we don't need 48/* vmap range flushes and invalidates. Architecturally, we don't need
@@ -50,6 +56,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size)
50} 56}
51static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 57static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
52{ 58{
59 unsigned long start = (unsigned long)vaddr;
60 void *cursor = vaddr;
61
62 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
63 struct page *page = vmalloc_to_page(cursor);
64
65 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
66 flush_kernel_dcache_page(page);
67 }
68 flush_kernel_dcache_range_asm(start, start + size);
53} 69}
54 70
55#define flush_cache_vmap(start, end) flush_cache_all() 71#define flush_cache_vmap(start, end) flush_cache_all()
@@ -90,19 +106,17 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned
90void flush_cache_range(struct vm_area_struct *vma, 106void flush_cache_range(struct vm_area_struct *vma,
91 unsigned long start, unsigned long end); 107 unsigned long start, unsigned long end);
92 108
109/* defined in pacache.S exported in cache.c used by flush_anon_page */
110void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
111
93#define ARCH_HAS_FLUSH_ANON_PAGE 112#define ARCH_HAS_FLUSH_ANON_PAGE
94static inline void 113static inline void
95flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 114flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
96{ 115{
97 if (PageAnon(page)) 116 if (PageAnon(page)) {
98 flush_user_dcache_page(vmaddr); 117 flush_tlb_page(vma, vmaddr);
99} 118 flush_dcache_page_asm(page_to_phys(page), vmaddr);
100 119 }
101#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
102void flush_kernel_dcache_page_addr(void *addr);
103static inline void flush_kernel_dcache_page(struct page *page)
104{
105 flush_kernel_dcache_page_addr(page_address(page));
106} 120}
107 121
108#ifdef CONFIG_DEBUG_RODATA 122#ifdef CONFIG_DEBUG_RODATA
@@ -126,20 +140,20 @@ static inline void *kmap(struct page *page)
126 140
127#define kunmap(page) kunmap_parisc(page_address(page)) 141#define kunmap(page) kunmap_parisc(page_address(page))
128 142
129static inline void *kmap_atomic(struct page *page, enum km_type idx) 143static inline void *__kmap_atomic(struct page *page)
130{ 144{
131 pagefault_disable(); 145 pagefault_disable();
132 return page_address(page); 146 return page_address(page);
133} 147}
134 148
135static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) 149static inline void __kunmap_atomic(void *addr)
136{ 150{
137 kunmap_parisc(addr); 151 kunmap_parisc(addr);
138 pagefault_enable(); 152 pagefault_enable();
139} 153}
140 154
141#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 155#define kmap_atomic_prot(page, prot) kmap_atomic(page)
142#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 156#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
143#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 157#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
144#endif 158#endif
145 159