aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/cache.c
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2010-12-22 11:22:11 -0500
committerJames Bottomley <James.Bottomley@suse.de>2011-01-15 09:44:40 -0500
commitf311847c2fcebd81912e2f0caf8a461dec28db41 (patch)
tree802ef2aa01bd0d662e60412366a40c827fd3e875 /arch/parisc/kernel/cache.c
parent38567333a6dabd0f2b4150e9fb6dd8e3ba2985e5 (diff)
parisc: flush pages through tmpalias space
The kernel has an 8M tmpailas space (originally designed for copying and clearing pages but now only used for clearing). The idea is to place zeros into the cache above a physical page rather than into the physical page and flush the cache, because often the zeros end up being replaced quickly anyway. We can also use the tmpalias space for flushing a page. The difference here is that we have to do tmpalias processing in the non access data and instruction traps. The principle is the same: as long as we know the physical address and have a virtual address congruent to the real one, the flush will be effective. In order to use the tmpalias space, the icache miss path has to be enhanced to check for the alias region to make the fic instruction effective. Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r--arch/parisc/kernel/cache.c109
1 files changed, 17 insertions, 92 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index d054f3da3ff5..3f11331c2775 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -27,12 +27,17 @@
27#include <asm/pgalloc.h> 27#include <asm/pgalloc.h>
28#include <asm/processor.h> 28#include <asm/processor.h>
29#include <asm/sections.h> 29#include <asm/sections.h>
30#include <asm/shmparam.h>
30 31
31int split_tlb __read_mostly; 32int split_tlb __read_mostly;
32int dcache_stride __read_mostly; 33int dcache_stride __read_mostly;
33int icache_stride __read_mostly; 34int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride); 35EXPORT_SYMBOL(dcache_stride);
35 36
37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38EXPORT_SYMBOL(flush_dcache_page_asm);
39void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
36 41
37/* On some machines (e.g. ones with the Merced bus), there can be 42/* On some machines (e.g. ones with the Merced bus), there can be
38 * only a single PxTLB broadcast at a time; this must be guaranteed 43 * only a single PxTLB broadcast at a time; this must be guaranteed
@@ -259,81 +264,13 @@ void disable_sr_hashing(void)
259 panic("SpaceID hashing is still on!\n"); 264 panic("SpaceID hashing is still on!\n");
260} 265}
261 266
262/* Simple function to work out if we have an existing address translation
263 * for a user space vma. */
264static inline int translation_exists(struct vm_area_struct *vma,
265 unsigned long addr, unsigned long pfn)
266{
267 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
268 pmd_t *pmd;
269 pte_t pte;
270
271 if(pgd_none(*pgd))
272 return 0;
273
274 pmd = pmd_offset(pgd, addr);
275 if(pmd_none(*pmd) || pmd_bad(*pmd))
276 return 0;
277
278 /* We cannot take the pte lock here: flush_cache_page is usually
279 * called with pte lock already held. Whereas flush_dcache_page
280 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
281 * the vma itself is secure, but the pte might come or go racily.
282 */
283 pte = *pte_offset_map(pmd, addr);
284 /* But pte_unmap() does nothing on this architecture */
285
286 /* Filter out coincidental file entries and swap entries */
287 if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
288 return 0;
289
290 return pte_pfn(pte) == pfn;
291}
292
293/* Private function to flush a page from the cache of a non-current
294 * process. cr25 contains the Page Directory of the current user
295 * process; we're going to hijack both it and the user space %sr3 to
296 * temporarily make the non-current process current. We have to do
297 * this because cache flushing may cause a non-access tlb miss which
298 * the handlers have to fill in from the pgd of the non-current
299 * process. */
300static inline void 267static inline void
301flush_user_cache_page_non_current(struct vm_area_struct *vma, 268__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
302 unsigned long vmaddr) 269 unsigned long physaddr)
303{ 270{
304 /* save the current process space and pgd */ 271 flush_dcache_page_asm(physaddr, vmaddr);
305 unsigned long space = mfsp(3), pgd = mfctl(25); 272 if (vma->vm_flags & VM_EXEC)
306 273 flush_icache_page_asm(physaddr, vmaddr);
307 /* we don't mind taking interrupts since they may not
308 * do anything with user space, but we can't
309 * be preempted here */
310 preempt_disable();
311
312 /* make us current */
313 mtctl(__pa(vma->vm_mm->pgd), 25);
314 mtsp(vma->vm_mm->context, 3);
315
316 flush_user_dcache_page(vmaddr);
317 if(vma->vm_flags & VM_EXEC)
318 flush_user_icache_page(vmaddr);
319
320 /* put the old current process back */
321 mtsp(space, 3);
322 mtctl(pgd, 25);
323 preempt_enable();
324}
325
326
327static inline void
328__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
329{
330 if (likely(vma->vm_mm->context == mfsp(3))) {
331 flush_user_dcache_page(vmaddr);
332 if (vma->vm_flags & VM_EXEC)
333 flush_user_icache_page(vmaddr);
334 } else {
335 flush_user_cache_page_non_current(vma, vmaddr);
336 }
337} 274}
338 275
339void flush_dcache_page(struct page *page) 276void flush_dcache_page(struct page *page)
@@ -342,10 +279,8 @@ void flush_dcache_page(struct page *page)
342 struct vm_area_struct *mpnt; 279 struct vm_area_struct *mpnt;
343 struct prio_tree_iter iter; 280 struct prio_tree_iter iter;
344 unsigned long offset; 281 unsigned long offset;
345 unsigned long addr; 282 unsigned long addr, old_addr = 0;
346 pgoff_t pgoff; 283 pgoff_t pgoff;
347 unsigned long pfn = page_to_pfn(page);
348
349 284
350 if (mapping && !mapping_mapped(mapping)) { 285 if (mapping && !mapping_mapped(mapping)) {
351 set_bit(PG_dcache_dirty, &page->flags); 286 set_bit(PG_dcache_dirty, &page->flags);
@@ -369,20 +304,11 @@ void flush_dcache_page(struct page *page)
369 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
370 addr = mpnt->vm_start + offset; 305 addr = mpnt->vm_start + offset;
371 306
372 /* Flush instructions produce non access tlb misses. 307 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
373 * On PA, we nullify these instructions rather than 308 __flush_cache_page(mpnt, addr, page_to_phys(page));
374 * taking a page fault if the pte doesn't exist. 309 if (old_addr)
375 * This is just for speed. If the page translation 310 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
376 * isn't there, there's no point exciting the 311 old_addr = addr;
377 * nadtlb handler into a nullification frenzy.
378 *
379 * Make sure we really have this page: the private
380 * mappings may cover this area but have COW'd this
381 * particular page.
382 */
383 if (translation_exists(mpnt, addr, pfn)) {
384 __flush_cache_page(mpnt, addr);
385 break;
386 } 312 }
387 } 313 }
388 flush_dcache_mmap_unlock(mapping); 314 flush_dcache_mmap_unlock(mapping);
@@ -573,7 +499,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
573{ 499{
574 BUG_ON(!vma->vm_mm->context); 500 BUG_ON(!vma->vm_mm->context);
575 501
576 if (likely(translation_exists(vma, vmaddr, pfn))) 502 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
577 __flush_cache_page(vma, vmaddr);
578 503
579} 504}