aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c127
1 files changed, 111 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3c7fe2c65b5a..49618461defb 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -100,6 +100,11 @@ unsigned int HPAGE_SHIFT;
100#ifdef CONFIG_PPC_64K_PAGES 100#ifdef CONFIG_PPC_64K_PAGES
101int mmu_ci_restrictions; 101int mmu_ci_restrictions;
102#endif 102#endif
103#ifdef CONFIG_DEBUG_PAGEALLOC
104static u8 *linear_map_hash_slots;
105static unsigned long linear_map_hash_count;
106static spinlock_t linear_map_hash_lock;
107#endif /* CONFIG_DEBUG_PAGEALLOC */
103 108
104/* There are definitions of page sizes arrays to be used when none 109/* There are definitions of page sizes arrays to be used when none
105 * is provided by the firmware. 110 * is provided by the firmware.
@@ -152,11 +157,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
152 157
153 for (vaddr = vstart, paddr = pstart; vaddr < vend; 158 for (vaddr = vstart, paddr = pstart; vaddr < vend;
154 vaddr += step, paddr += step) { 159 vaddr += step, paddr += step) {
155 unsigned long vpn, hash, hpteg; 160 unsigned long hash, hpteg;
156 unsigned long vsid = get_kernel_vsid(vaddr); 161 unsigned long vsid = get_kernel_vsid(vaddr);
157 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 162 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
158 163
159 vpn = va >> shift;
160 tmp_mode = mode; 164 tmp_mode = mode;
161 165
162 /* Make non-kernel text non-executable */ 166 /* Make non-kernel text non-executable */
@@ -174,6 +178,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
174 178
175 if (ret < 0) 179 if (ret < 0)
176 break; 180 break;
181#ifdef CONFIG_DEBUG_PAGEALLOC
182 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
183 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
184#endif /* CONFIG_DEBUG_PAGEALLOC */
177 } 185 }
178 return ret < 0 ? ret : 0; 186 return ret < 0 ? ret : 0;
179} 187}
@@ -281,6 +289,7 @@ static void __init htab_init_page_sizes(void)
281 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 289 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
282 sizeof(mmu_psize_defaults_gp)); 290 sizeof(mmu_psize_defaults_gp));
283 found: 291 found:
292#ifndef CONFIG_DEBUG_PAGEALLOC
284 /* 293 /*
285 * Pick a size for the linear mapping. Currently, we only support 294 * Pick a size for the linear mapping. Currently, we only support
286 * 16M, 1M and 4K which is the default 295 * 16M, 1M and 4K which is the default
@@ -289,6 +298,7 @@ static void __init htab_init_page_sizes(void)
289 mmu_linear_psize = MMU_PAGE_16M; 298 mmu_linear_psize = MMU_PAGE_16M;
290 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 299 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
291 mmu_linear_psize = MMU_PAGE_1M; 300 mmu_linear_psize = MMU_PAGE_1M;
301#endif /* CONFIG_DEBUG_PAGEALLOC */
292 302
293#ifdef CONFIG_PPC_64K_PAGES 303#ifdef CONFIG_PPC_64K_PAGES
294 /* 304 /*
@@ -303,12 +313,14 @@ static void __init htab_init_page_sizes(void)
303 if (mmu_psize_defs[MMU_PAGE_64K].shift) { 313 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
304 mmu_virtual_psize = MMU_PAGE_64K; 314 mmu_virtual_psize = MMU_PAGE_64K;
305 mmu_vmalloc_psize = MMU_PAGE_64K; 315 mmu_vmalloc_psize = MMU_PAGE_64K;
316 if (mmu_linear_psize == MMU_PAGE_4K)
317 mmu_linear_psize = MMU_PAGE_64K;
306 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) 318 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
307 mmu_io_psize = MMU_PAGE_64K; 319 mmu_io_psize = MMU_PAGE_64K;
308 else 320 else
309 mmu_ci_restrictions = 1; 321 mmu_ci_restrictions = 1;
310 } 322 }
311#endif 323#endif /* CONFIG_PPC_64K_PAGES */
312 324
313 printk(KERN_DEBUG "Page orders: linear mapping = %d, " 325 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
314 "virtual = %d, io = %d\n", 326 "virtual = %d, io = %d\n",
@@ -476,6 +488,13 @@ void __init htab_initialize(void)
476 488
477 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; 489 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
478 490
491#ifdef CONFIG_DEBUG_PAGEALLOC
492 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
493 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
494 1, lmb.rmo_size));
495 memset(linear_map_hash_slots, 0, linear_map_hash_count);
496#endif /* CONFIG_DEBUG_PAGEALLOC */
497
479 /* On U3 based machines, we need to reserve the DART area and 498 /* On U3 based machines, we need to reserve the DART area and
480 * _NOT_ map it to avoid cache paradoxes as it's remapped non 499 * _NOT_ map it to avoid cache paradoxes as it's remapped non
481 * cacheable later on 500 * cacheable later on
@@ -573,6 +592,27 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
573 return pp; 592 return pp;
574} 593}
575 594
595/*
596 * Demote a segment to using 4k pages.
597 * For now this makes the whole process use 4k pages.
598 */
599void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
600{
601#ifdef CONFIG_PPC_64K_PAGES
602 if (mm->context.user_psize == MMU_PAGE_4K)
603 return;
604 mm->context.user_psize = MMU_PAGE_4K;
605 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
606 get_paca()->context = mm->context;
607 slb_flush_and_rebolt();
608#ifdef CONFIG_SPE_BASE
609 spu_flush_all_slbs(mm);
610#endif
611#endif
612}
613
614EXPORT_SYMBOL_GPL(demote_segment_4k);
615
576/* Result code is: 616/* Result code is:
577 * 0 - handled 617 * 0 - handled
578 * 1 - normal page fault 618 * 1 - normal page fault
@@ -665,15 +705,19 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
665#ifndef CONFIG_PPC_64K_PAGES 705#ifndef CONFIG_PPC_64K_PAGES
666 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 706 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
667#else 707#else
708 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
709 if (pte_val(*ptep) & _PAGE_4K_PFN) {
710 demote_segment_4k(mm, ea);
711 psize = MMU_PAGE_4K;
712 }
713
668 if (mmu_ci_restrictions) { 714 if (mmu_ci_restrictions) {
669 /* If this PTE is non-cacheable, switch to 4k */ 715 /* If this PTE is non-cacheable, switch to 4k */
670 if (psize == MMU_PAGE_64K && 716 if (psize == MMU_PAGE_64K &&
671 (pte_val(*ptep) & _PAGE_NO_CACHE)) { 717 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
672 if (user_region) { 718 if (user_region) {
719 demote_segment_4k(mm, ea);
673 psize = MMU_PAGE_4K; 720 psize = MMU_PAGE_4K;
674 mm->context.user_psize = MMU_PAGE_4K;
675 mm->context.sllp = SLB_VSID_USER |
676 mmu_psize_defs[MMU_PAGE_4K].sllp;
677 } else if (ea < VMALLOC_END) { 721 } else if (ea < VMALLOC_END) {
678 /* 722 /*
679 * some driver did a non-cacheable mapping 723 * some driver did a non-cacheable mapping
@@ -756,16 +800,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
756 if (mmu_ci_restrictions) { 800 if (mmu_ci_restrictions) {
757 /* If this PTE is non-cacheable, switch to 4k */ 801 /* If this PTE is non-cacheable, switch to 4k */
758 if (mm->context.user_psize == MMU_PAGE_64K && 802 if (mm->context.user_psize == MMU_PAGE_64K &&
759 (pte_val(*ptep) & _PAGE_NO_CACHE)) { 803 (pte_val(*ptep) & _PAGE_NO_CACHE))
760 mm->context.user_psize = MMU_PAGE_4K; 804 demote_segment_4k(mm, ea);
761 mm->context.sllp = SLB_VSID_USER |
762 mmu_psize_defs[MMU_PAGE_4K].sllp;
763 get_paca()->context = mm->context;
764 slb_flush_and_rebolt();
765#ifdef CONFIG_SPE_BASE
766 spu_flush_all_slbs(mm);
767#endif
768 }
769 } 805 }
770 if (mm->context.user_psize == MMU_PAGE_64K) 806 if (mm->context.user_psize == MMU_PAGE_64K)
771 __hash_page_64K(ea, access, vsid, ptep, trap, local); 807 __hash_page_64K(ea, access, vsid, ptep, trap, local);
@@ -825,3 +861,62 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
825 } 861 }
826 bad_page_fault(regs, address, SIGBUS); 862 bad_page_fault(regs, address, SIGBUS);
827} 863}
864
865#ifdef CONFIG_DEBUG_PAGEALLOC
866static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
867{
868 unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr);
869 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
870 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
871 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
872 int ret;
873
874 hash = hpt_hash(va, PAGE_SHIFT);
875 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
876
877 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
878 mode, HPTE_V_BOLTED, mmu_linear_psize);
879 BUG_ON (ret < 0);
880 spin_lock(&linear_map_hash_lock);
881 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
882 linear_map_hash_slots[lmi] = ret | 0x80;
883 spin_unlock(&linear_map_hash_lock);
884}
885
886static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
887{
888 unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr);
889 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
890
891 hash = hpt_hash(va, PAGE_SHIFT);
892 spin_lock(&linear_map_hash_lock);
893 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
894 hidx = linear_map_hash_slots[lmi] & 0x7f;
895 linear_map_hash_slots[lmi] = 0;
896 spin_unlock(&linear_map_hash_lock);
897 if (hidx & _PTEIDX_SECONDARY)
898 hash = ~hash;
899 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
900 slot += hidx & _PTEIDX_GROUP_IX;
901 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0);
902}
903
904void kernel_map_pages(struct page *page, int numpages, int enable)
905{
906 unsigned long flags, vaddr, lmi;
907 int i;
908
909 local_irq_save(flags);
910 for (i = 0; i < numpages; i++, page++) {
911 vaddr = (unsigned long)page_address(page);
912 lmi = __pa(vaddr) >> PAGE_SHIFT;
913 if (lmi >= linear_map_hash_count)
914 continue;
915 if (enable)
916 kernel_map_linear_page(vaddr, lmi);
917 else
918 kernel_unmap_linear_page(vaddr, lmi);
919 }
920 local_irq_restore(flags);
921}
922#endif /* CONFIG_DEBUG_PAGEALLOC */