aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/kernel/cache.c13
2 files changed, 16 insertions, 2 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index d18328b3f938..da601dd34c05 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/tlbflush.h>
6 7
7/* The usual comment is "Caches aren't brain-dead on the <architecture>". 8/* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */ 9 * Unfortunately, that doesn't apply to PA-RISC. */
@@ -112,8 +113,10 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
112static inline void 113static inline void
113flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 114flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
114{ 115{
115 if (PageAnon(page)) 116 if (PageAnon(page)) {
117 flush_tlb_page(vma, vmaddr);
116 flush_dcache_page_asm(page_to_phys(page), vmaddr); 118 flush_dcache_page_asm(page_to_phys(page), vmaddr);
119 }
117} 120}
118 121
119#ifdef CONFIG_DEBUG_RODATA 122#ifdef CONFIG_DEBUG_RODATA
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 3f11331c2775..83335f3da5fc 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -304,10 +304,20 @@ void flush_dcache_page(struct page *page)
304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
305 addr = mpnt->vm_start + offset; 305 addr = mpnt->vm_start + offset;
306 306
307 /* The TLB is the engine of coherence on parisc: The
308 * CPU is entitled to speculate any page with a TLB
309 * mapping, so here we kill the mapping then flush the
310 * page along a special flush only alias mapping.
311 * This guarantees that the page is no-longer in the
312 * cache for any process and nor may it be
313 * speculatively read in (until the user or kernel
314 * specifically accesses it, of course) */
315
316 flush_tlb_page(mpnt, addr);
307 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
308 __flush_cache_page(mpnt, addr, page_to_phys(page)); 318 __flush_cache_page(mpnt, addr, page_to_phys(page));
309 if (old_addr) 319 if (old_addr)
310 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
311 old_addr = addr; 321 old_addr = addr;
312 } 322 }
313 } 323 }
@@ -499,6 +509,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
499{ 509{
500 BUG_ON(!vma->vm_mm->context); 510 BUG_ON(!vma->vm_mm->context);
501 511
512 flush_tlb_page(vma, vmaddr);
502 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 513 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
503 514
504} 515}