diff options
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r-- | arch/parisc/kernel/cache.c | 120 |
1 files changed, 28 insertions, 92 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index d054f3da3ff5..83335f3da5fc 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -27,12 +27,17 @@ | |||
27 | #include <asm/pgalloc.h> | 27 | #include <asm/pgalloc.h> |
28 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
29 | #include <asm/sections.h> | 29 | #include <asm/sections.h> |
30 | #include <asm/shmparam.h> | ||
30 | 31 | ||
31 | int split_tlb __read_mostly; | 32 | int split_tlb __read_mostly; |
32 | int dcache_stride __read_mostly; | 33 | int dcache_stride __read_mostly; |
33 | int icache_stride __read_mostly; | 34 | int icache_stride __read_mostly; |
34 | EXPORT_SYMBOL(dcache_stride); | 35 | EXPORT_SYMBOL(dcache_stride); |
35 | 36 | ||
37 | void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); | ||
38 | EXPORT_SYMBOL(flush_dcache_page_asm); | ||
39 | void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); | ||
40 | |||
36 | 41 | ||
37 | /* On some machines (e.g. ones with the Merced bus), there can be | 42 | /* On some machines (e.g. ones with the Merced bus), there can be |
38 | * only a single PxTLB broadcast at a time; this must be guaranteed | 43 | * only a single PxTLB broadcast at a time; this must be guaranteed |
@@ -259,81 +264,13 @@ void disable_sr_hashing(void) | |||
259 | panic("SpaceID hashing is still on!\n"); | 264 | panic("SpaceID hashing is still on!\n"); |
260 | } | 265 | } |
261 | 266 | ||
262 | /* Simple function to work out if we have an existing address translation | ||
263 | * for a user space vma. */ | ||
264 | static inline int translation_exists(struct vm_area_struct *vma, | ||
265 | unsigned long addr, unsigned long pfn) | ||
266 | { | ||
267 | pgd_t *pgd = pgd_offset(vma->vm_mm, addr); | ||
268 | pmd_t *pmd; | ||
269 | pte_t pte; | ||
270 | |||
271 | if(pgd_none(*pgd)) | ||
272 | return 0; | ||
273 | |||
274 | pmd = pmd_offset(pgd, addr); | ||
275 | if(pmd_none(*pmd) || pmd_bad(*pmd)) | ||
276 | return 0; | ||
277 | |||
278 | /* We cannot take the pte lock here: flush_cache_page is usually | ||
279 | * called with pte lock already held. Whereas flush_dcache_page | ||
280 | * takes flush_dcache_mmap_lock, which is lower in the hierarchy: | ||
281 | * the vma itself is secure, but the pte might come or go racily. | ||
282 | */ | ||
283 | pte = *pte_offset_map(pmd, addr); | ||
284 | /* But pte_unmap() does nothing on this architecture */ | ||
285 | |||
286 | /* Filter out coincidental file entries and swap entries */ | ||
287 | if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) | ||
288 | return 0; | ||
289 | |||
290 | return pte_pfn(pte) == pfn; | ||
291 | } | ||
292 | |||
293 | /* Private function to flush a page from the cache of a non-current | ||
294 | * process. cr25 contains the Page Directory of the current user | ||
295 | * process; we're going to hijack both it and the user space %sr3 to | ||
296 | * temporarily make the non-current process current. We have to do | ||
297 | * this because cache flushing may cause a non-access tlb miss which | ||
298 | * the handlers have to fill in from the pgd of the non-current | ||
299 | * process. */ | ||
300 | static inline void | 267 | static inline void |
301 | flush_user_cache_page_non_current(struct vm_area_struct *vma, | 268 | __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, |
302 | unsigned long vmaddr) | 269 | unsigned long physaddr) |
303 | { | 270 | { |
304 | /* save the current process space and pgd */ | 271 | flush_dcache_page_asm(physaddr, vmaddr); |
305 | unsigned long space = mfsp(3), pgd = mfctl(25); | 272 | if (vma->vm_flags & VM_EXEC) |
306 | 273 | flush_icache_page_asm(physaddr, vmaddr); | |
307 | /* we don't mind taking interrupts since they may not | ||
308 | * do anything with user space, but we can't | ||
309 | * be preempted here */ | ||
310 | preempt_disable(); | ||
311 | |||
312 | /* make us current */ | ||
313 | mtctl(__pa(vma->vm_mm->pgd), 25); | ||
314 | mtsp(vma->vm_mm->context, 3); | ||
315 | |||
316 | flush_user_dcache_page(vmaddr); | ||
317 | if(vma->vm_flags & VM_EXEC) | ||
318 | flush_user_icache_page(vmaddr); | ||
319 | |||
320 | /* put the old current process back */ | ||
321 | mtsp(space, 3); | ||
322 | mtctl(pgd, 25); | ||
323 | preempt_enable(); | ||
324 | } | ||
325 | |||
326 | |||
327 | static inline void | ||
328 | __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) | ||
329 | { | ||
330 | if (likely(vma->vm_mm->context == mfsp(3))) { | ||
331 | flush_user_dcache_page(vmaddr); | ||
332 | if (vma->vm_flags & VM_EXEC) | ||
333 | flush_user_icache_page(vmaddr); | ||
334 | } else { | ||
335 | flush_user_cache_page_non_current(vma, vmaddr); | ||
336 | } | ||
337 | } | 274 | } |
338 | 275 | ||
339 | void flush_dcache_page(struct page *page) | 276 | void flush_dcache_page(struct page *page) |
@@ -342,10 +279,8 @@ void flush_dcache_page(struct page *page) | |||
342 | struct vm_area_struct *mpnt; | 279 | struct vm_area_struct *mpnt; |
343 | struct prio_tree_iter iter; | 280 | struct prio_tree_iter iter; |
344 | unsigned long offset; | 281 | unsigned long offset; |
345 | unsigned long addr; | 282 | unsigned long addr, old_addr = 0; |
346 | pgoff_t pgoff; | 283 | pgoff_t pgoff; |
347 | unsigned long pfn = page_to_pfn(page); | ||
348 | |||
349 | 284 | ||
350 | if (mapping && !mapping_mapped(mapping)) { | 285 | if (mapping && !mapping_mapped(mapping)) { |
351 | set_bit(PG_dcache_dirty, &page->flags); | 286 | set_bit(PG_dcache_dirty, &page->flags); |
@@ -369,20 +304,21 @@ void flush_dcache_page(struct page *page) | |||
369 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 304 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
370 | addr = mpnt->vm_start + offset; | 305 | addr = mpnt->vm_start + offset; |
371 | 306 | ||
372 | /* Flush instructions produce non access tlb misses. | 307 | /* The TLB is the engine of coherence on parisc: The |
373 | * On PA, we nullify these instructions rather than | 308 | * CPU is entitled to speculate any page with a TLB |
374 | * taking a page fault if the pte doesn't exist. | 309 | * mapping, so here we kill the mapping then flush the |
375 | * This is just for speed. If the page translation | 310 | * page along a special flush only alias mapping. |
376 | * isn't there, there's no point exciting the | 311 | * This guarantees that the page is no-longer in the |
377 | * nadtlb handler into a nullification frenzy. | 312 | * cache for any process and nor may it be |
378 | * | 313 | * speculatively read in (until the user or kernel |
379 | * Make sure we really have this page: the private | 314 | * specifically accesses it, of course) */ |
380 | * mappings may cover this area but have COW'd this | 315 | |
381 | * particular page. | 316 | flush_tlb_page(mpnt, addr); |
382 | */ | 317 | if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { |
383 | if (translation_exists(mpnt, addr, pfn)) { | 318 | __flush_cache_page(mpnt, addr, page_to_phys(page)); |
384 | __flush_cache_page(mpnt, addr); | 319 | if (old_addr) |
385 | break; | 320 | printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); |
321 | old_addr = addr; | ||
386 | } | 322 | } |
387 | } | 323 | } |
388 | flush_dcache_mmap_unlock(mapping); | 324 | flush_dcache_mmap_unlock(mapping); |
@@ -573,7 +509,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
573 | { | 509 | { |
574 | BUG_ON(!vma->vm_mm->context); | 510 | BUG_ON(!vma->vm_mm->context); |
575 | 511 | ||
576 | if (likely(translation_exists(vma, vmaddr, pfn))) | 512 | flush_tlb_page(vma, vmaddr); |
577 | __flush_cache_page(vma, vmaddr); | 513 | __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); |
578 | 514 | ||
579 | } | 515 | } |