aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 71092c2f65cd..5610ffb14211 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -51,6 +51,7 @@
51#include <asm/cputable.h> 51#include <asm/cputable.h>
52#include <asm/abs_addr.h> 52#include <asm/abs_addr.h>
53#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/spu.h>
54 55
55#ifdef DEBUG 56#ifdef DEBUG
56#define DBG(fmt...) udbg_printf(fmt) 57#define DBG(fmt...) udbg_printf(fmt)
@@ -601,8 +602,13 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
601{ 602{
602 if (mm->context.user_psize == MMU_PAGE_4K) 603 if (mm->context.user_psize == MMU_PAGE_4K)
603 return; 604 return;
605#ifdef CONFIG_PPC_MM_SLICES
606 slice_set_user_psize(mm, MMU_PAGE_4K);
607#else /* CONFIG_PPC_MM_SLICES */
604 mm->context.user_psize = MMU_PAGE_4K; 608 mm->context.user_psize = MMU_PAGE_4K;
605 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; 609 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
610#endif /* CONFIG_PPC_MM_SLICES */
611
606#ifdef CONFIG_SPE_BASE 612#ifdef CONFIG_SPE_BASE
607 spu_flush_all_slbs(mm); 613 spu_flush_all_slbs(mm);
608#endif 614#endif
@@ -670,11 +676,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
670 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) 676 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
671 local = 1; 677 local = 1;
672 678
679#ifdef CONFIG_HUGETLB_PAGE
673 /* Handle hugepage regions */ 680 /* Handle hugepage regions */
674 if (unlikely(in_hugepage_area(mm->context, ea))) { 681 if (HPAGE_SHIFT &&
682 unlikely(get_slice_psize(mm, ea) == mmu_huge_psize)) {
675 DBG_LOW(" -> huge page !\n"); 683 DBG_LOW(" -> huge page !\n");
676 return hash_huge_page(mm, access, ea, vsid, local, trap); 684 return hash_huge_page(mm, access, ea, vsid, local, trap);
677 } 685 }
686#endif /* CONFIG_HUGETLB_PAGE */
678 687
679 /* Get PTE and page size from page tables */ 688 /* Get PTE and page size from page tables */
680 ptep = find_linux_pte(pgdir, ea); 689 ptep = find_linux_pte(pgdir, ea);
@@ -770,10 +779,13 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
770 unsigned long flags; 779 unsigned long flags;
771 int local = 0; 780 int local = 0;
772 781
773 /* We don't want huge pages prefaulted for now 782 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
774 */ 783
775 if (unlikely(in_hugepage_area(mm->context, ea))) 784#ifdef CONFIG_PPC_MM_SLICES
785 /* We only prefault standard pages for now */
786 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
776 return; 787 return;
788#endif
777 789
778 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," 790 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
779 " trap=%lx\n", mm, mm->pgd, ea, access, trap); 791 " trap=%lx\n", mm, mm->pgd, ea, access, trap);