aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2013-02-03 18:02:49 -0500
committerHelge Deller <deller@gmx.de>2013-02-20 16:49:49 -0500
commitcca8e9026041544c0103b3037d8f03c1d2f4ae02 (patch)
tree4b23cc0d2e54414d2ef91509fb66c895651797e4
parent6d2439d9558e259822fb487ec274cc9e362e6a81 (diff)
parisc: fixes and cleanups in page cache flushing (4/4)
CONFIG_PARISC_TMPALIAS enables clear_user_highpage and copy_user_highpage. These are essentially alternative implementations of clear_user_page and copy_user_page. They don't have anything to do with x86 high pages, but they build on the infrastructure to save a few instructions. Read the comment in clear_user_highpage as it is very important to the implementation. For this reason, there isn't any gain in using the TMPALIAS/highpage approach. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/kernel/cache.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ec63de95cbd9..1c61b8245650 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -596,3 +596,67 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
596 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 596 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
597 597
598} 598}
599
600#ifdef CONFIG_PARISC_TMPALIAS
601
602void clear_user_highpage(struct page *page, unsigned long vaddr)
603{
604 void *vto;
605 unsigned long flags;
606
607 /* Clear using TMPALIAS region. The page doesn't need to
608 be flushed but the kernel mapping needs to be purged. */
609
610 vto = kmap_atomic(page, KM_USER0);
611
612 /* The PA-RISC 2.0 Architecture book states on page F-6:
613 "Before a write-capable translation is enabled, *all*
614 non-equivalently-aliased translations must be removed
615 from the page table and purged from the TLB. (Note
616 that the caches are not required to be flushed at this
617 time.) Before any non-equivalent aliased translation
618 is re-enabled, the virtual address range for the writeable
619 page (the entire page) must be flushed from the cache,
620 and the write-capable translation removed from the page
621 table and purged from the TLB." */
622
623 purge_kernel_dcache_page_asm((unsigned long)vto);
624 purge_tlb_start(flags);
625 pdtlb_kernel(vto);
626 purge_tlb_end(flags);
627 preempt_disable();
628 clear_user_page_asm(vto, vaddr);
629 preempt_enable();
630
631 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
632}
633
634void copy_user_highpage(struct page *to, struct page *from,
635 unsigned long vaddr, struct vm_area_struct *vma)
636{
637 void *vfrom, *vto;
638 unsigned long flags;
639
640 /* Copy using TMPALIAS region. This has the advantage
641 that the `from' page doesn't need to be flushed. However,
642 the `to' page must be flushed in copy_user_page_asm since
643 it can be used to bring in executable code. */
644
645 vfrom = kmap_atomic(from, KM_USER0);
646 vto = kmap_atomic(to, KM_USER1);
647
648 purge_kernel_dcache_page_asm((unsigned long)vto);
649 purge_tlb_start(flags);
650 pdtlb_kernel(vto);
651 pdtlb_kernel(vfrom);
652 purge_tlb_end(flags);
653 preempt_disable();
654 copy_user_page_asm(vto, vfrom, vaddr);
655 flush_dcache_page_asm(__pa(vto), vaddr);
656 preempt_enable();
657
658 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
659 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
660}
661
662#endif /* CONFIG_PARISC_TMPALIAS */