aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-05-08 02:27:28 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-09 02:35:00 -0400
commit16c2d476232523260c495eafbf9cdc1be984b7df (patch)
treefb6614b9752b51864e121317478088978823792c /arch/powerpc/mm
parentd0f13e3c20b6fb73ccb467bdca97fa7cf5a574cd (diff)
[POWERPC] Add ability to 4K kernel to hash in 64K pages
This adds the ability for a kernel compiled with 4K page size to have special slices containing 64K pages and hash the right type of hash PTEs. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_64.S5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c39
-rw-r--r--arch/powerpc/mm/tlb_64.c12
3 files changed, 40 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index e64ce3eec36e..4762ff7c14df 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -615,6 +615,9 @@ htab_pte_insert_failure:
615 li r3,-1 615 li r3,-1
616 b htab_bail 616 b htab_bail
617 617
618#endif /* CONFIG_PPC_64K_PAGES */
619
620#ifdef CONFIG_PPC_HAS_HASH_64K
618 621
619/***************************************************************************** 622/*****************************************************************************
620 * * 623 * *
@@ -870,7 +873,7 @@ ht64_pte_insert_failure:
870 b ht64_bail 873 b ht64_bail
871 874
872 875
873#endif /* CONFIG_PPC_64K_PAGES */ 876#endif /* CONFIG_PPC_HAS_HASH_64K */
874 877
875 878
876/***************************************************************************** 879/*****************************************************************************
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5610ffb14211..028ba4ed03d2 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -420,7 +420,7 @@ static void __init htab_finish_init(void)
420 extern unsigned int *htab_call_hpte_remove; 420 extern unsigned int *htab_call_hpte_remove;
421 extern unsigned int *htab_call_hpte_updatepp; 421 extern unsigned int *htab_call_hpte_updatepp;
422 422
423#ifdef CONFIG_PPC_64K_PAGES 423#ifdef CONFIG_PPC_HAS_HASH_64K
424 extern unsigned int *ht64_call_hpte_insert1; 424 extern unsigned int *ht64_call_hpte_insert1;
425 extern unsigned int *ht64_call_hpte_insert2; 425 extern unsigned int *ht64_call_hpte_insert2;
426 extern unsigned int *ht64_call_hpte_remove; 426 extern unsigned int *ht64_call_hpte_remove;
@@ -648,7 +648,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
648 return 1; 648 return 1;
649 } 649 }
650 vsid = get_vsid(mm->context.id, ea); 650 vsid = get_vsid(mm->context.id, ea);
651#ifdef CONFIG_PPC_MM_SLICES
652 psize = get_slice_psize(mm, ea);
653#else
651 psize = mm->context.user_psize; 654 psize = mm->context.user_psize;
655#endif
652 break; 656 break;
653 case VMALLOC_REGION_ID: 657 case VMALLOC_REGION_ID:
654 mm = &init_mm; 658 mm = &init_mm;
@@ -678,13 +682,21 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
678 682
679#ifdef CONFIG_HUGETLB_PAGE 683#ifdef CONFIG_HUGETLB_PAGE
680 /* Handle hugepage regions */ 684 /* Handle hugepage regions */
681 if (HPAGE_SHIFT && 685 if (HPAGE_SHIFT && psize == mmu_huge_psize) {
682 unlikely(get_slice_psize(mm, ea) == mmu_huge_psize)) {
683 DBG_LOW(" -> huge page !\n"); 686 DBG_LOW(" -> huge page !\n");
684 return hash_huge_page(mm, access, ea, vsid, local, trap); 687 return hash_huge_page(mm, access, ea, vsid, local, trap);
685 } 688 }
686#endif /* CONFIG_HUGETLB_PAGE */ 689#endif /* CONFIG_HUGETLB_PAGE */
687 690
691#ifndef CONFIG_PPC_64K_PAGES
692 /* If we use 4K pages and our psize is not 4K, then we are hitting
693 * a special driver mapping, we need to align the address before
694 * we fetch the PTE
695 */
696 if (psize != MMU_PAGE_4K)
697 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
698#endif /* CONFIG_PPC_64K_PAGES */
699
688 /* Get PTE and page size from page tables */ 700 /* Get PTE and page size from page tables */
689 ptep = find_linux_pte(pgdir, ea); 701 ptep = find_linux_pte(pgdir, ea);
690 if (ptep == NULL || !pte_present(*ptep)) { 702 if (ptep == NULL || !pte_present(*ptep)) {
@@ -707,9 +719,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
707 } 719 }
708 720
709 /* Do actual hashing */ 721 /* Do actual hashing */
710#ifndef CONFIG_PPC_64K_PAGES 722#ifdef CONFIG_PPC_64K_PAGES
711 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
712#else
713 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ 723 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
714 if (pte_val(*ptep) & _PAGE_4K_PFN) { 724 if (pte_val(*ptep) & _PAGE_4K_PFN) {
715 demote_segment_4k(mm, ea); 725 demote_segment_4k(mm, ea);
@@ -751,12 +761,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
751 mmu_psize_defs[mmu_vmalloc_psize].sllp; 761 mmu_psize_defs[mmu_vmalloc_psize].sllp;
752 slb_flush_and_rebolt(); 762 slb_flush_and_rebolt();
753 } 763 }
764#endif /* CONFIG_PPC_64K_PAGES */
754 765
766#ifdef CONFIG_PPC_HAS_HASH_64K
755 if (psize == MMU_PAGE_64K) 767 if (psize == MMU_PAGE_64K)
756 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); 768 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
757 else 769 else
770#endif /* CONFIG_PPC_HAS_HASH_64K */
758 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 771 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
759#endif /* CONFIG_PPC_64K_PAGES */
760 772
761#ifndef CONFIG_PPC_64K_PAGES 773#ifndef CONFIG_PPC_64K_PAGES
762 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 774 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
@@ -812,19 +824,22 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
812 /* Get VSID */ 824 /* Get VSID */
813 vsid = get_vsid(mm->context.id, ea); 825 vsid = get_vsid(mm->context.id, ea);
814 826
815 /* Hash it in */ 827 /* Hash doesn't like irqs */
816 local_irq_save(flags); 828 local_irq_save(flags);
829
830 /* Is that local to this CPU ? */
817 mask = cpumask_of_cpu(smp_processor_id()); 831 mask = cpumask_of_cpu(smp_processor_id());
818 if (cpus_equal(mm->cpu_vm_mask, mask)) 832 if (cpus_equal(mm->cpu_vm_mask, mask))
819 local = 1; 833 local = 1;
820#ifndef CONFIG_PPC_64K_PAGES 834
821 __hash_page_4K(ea, access, vsid, ptep, trap, local); 835 /* Hash it in */
822#else 836#ifdef CONFIG_PPC_HAS_HASH_64K
823 if (mm->context.user_psize == MMU_PAGE_64K) 837 if (mm->context.user_psize == MMU_PAGE_64K)
824 __hash_page_64K(ea, access, vsid, ptep, trap, local); 838 __hash_page_64K(ea, access, vsid, ptep, trap, local);
825 else 839 else
826 __hash_page_4K(ea, access, vsid, ptep, trap, local);
827#endif /* CONFIG_PPC_64K_PAGES */ 840#endif /* CONFIG_PPC_64K_PAGES */
841 __hash_page_4K(ea, access, vsid, ptep, trap, local);
842
828 local_irq_restore(flags); 843 local_irq_restore(flags);
829} 844}
830 845
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index fd8d08c325eb..2bfc4d7e1aa2 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -143,16 +143,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
143 */ 143 */
144 addr &= PAGE_MASK; 144 addr &= PAGE_MASK;
145 145
146 /* Get page size (maybe move back to caller) */ 146 /* Get page size (maybe move back to caller).
147 *
148 * NOTE: when using special 64K mappings in 4K environment like
149 * for SPEs, we obtain the page size from the slice, which thus
150 * must still exist (and thus the VMA not reused) at the time
151 * of this call
152 */
147 if (huge) { 153 if (huge) {
148#ifdef CONFIG_HUGETLB_PAGE 154#ifdef CONFIG_HUGETLB_PAGE
149 psize = mmu_huge_psize; 155 psize = mmu_huge_psize;
150#else 156#else
151 BUG(); 157 BUG();
152 psize = pte_pagesize_index(pte); /* shutup gcc */ 158 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
153#endif 159#endif
154 } else 160 } else
155 psize = pte_pagesize_index(pte); 161 psize = pte_pagesize_index(mm, addr, pte);
156 162
157 /* Build full vaddr */ 163 /* Build full vaddr */
158 if (!is_kernel_addr(addr)) { 164 if (!is_kernel_addr(addr)) {