aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-12-08 22:20:52 -0500
committerPaul Mackerras <paulus@samba.org>2005-12-09 00:30:48 -0500
commitcbf52afdc0eb88492cf7808cc4b4f58a46f1b1ad (patch)
tree89910c9d9a054f0bf64670757a3715e19fcc62b2 /arch/powerpc/mm/hugetlbpage.c
parentdabcafd3f363bacd6b89f537af27dc79128e4806 (diff)
[PATCH] powerpc: Add missing icache flushes for hugepages
On most powerpc CPUs, the dcache and icache are not coherent so between writing and executing a page, the caches must be flushed. Userspace programs assume pages given to them by the kernel are icache clean, so we must do this flush between the kernel clearing a page and it being mapped into userspace for execute. We were not doing this for hugepages, this patch corrects the situation. We use the same lazy mechanism as we use for normal pages, delaying the flush until userspace actually attempts to execute from the page in question. Tested on G5. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8bce515dc320..97512b89e7b0 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -639,8 +639,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
639 return -ENOMEM; 639 return -ENOMEM;
640} 640}
641 641
642/*
643 * Called by asm hashtable.S for doing lazy icache flush
644 */
645static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
646 pte_t pte, int trap)
647{
648 struct page *page;
649 int i;
650
651 if (!pfn_valid(pte_pfn(pte)))
652 return rflags;
653
654 page = pte_page(pte);
655
656 /* page is dirty */
657 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
658 if (trap == 0x400) {
659 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
660 __flush_dcache_icache(page_address(page+i));
661 set_bit(PG_arch_1, &page->flags);
662 } else {
663 rflags |= HPTE_R_N;
664 }
665 }
666 return rflags;
667}
668
642int hash_huge_page(struct mm_struct *mm, unsigned long access, 669int hash_huge_page(struct mm_struct *mm, unsigned long access,
643 unsigned long ea, unsigned long vsid, int local) 670 unsigned long ea, unsigned long vsid, int local,
671 unsigned long trap)
644{ 672{
645 pte_t *ptep; 673 pte_t *ptep;
646 unsigned long old_pte, new_pte; 674 unsigned long old_pte, new_pte;
@@ -691,6 +719,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
691 rflags = 0x2 | (!(new_pte & _PAGE_RW)); 719 rflags = 0x2 | (!(new_pte & _PAGE_RW));
692 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ 720 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
693 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); 721 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
722 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
723 /* No CPU has hugepages but lacks no execute, so we
724 * don't need to worry about that case */
725 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
726 trap);
694 727
695 /* Check if pte already has an hpte (case 2) */ 728 /* Check if pte already has an hpte (case 2) */
696 if (unlikely(old_pte & _PAGE_HASHPTE)) { 729 if (unlikely(old_pte & _PAGE_HASHPTE)) {