aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c119
1 files changed, 94 insertions, 25 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 71efb38d599b..a02266dad215 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,18 +24,17 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/spu.h> 25#include <asm/spu.h>
26 26
27#define HPAGE_SHIFT_64K 16
28#define HPAGE_SHIFT_16M 24
29
27#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
28#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
29 32
30#ifdef CONFIG_PPC_64K_PAGES 33unsigned int hugepte_shift;
31#define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT) 34#define PTRS_PER_HUGEPTE (1 << hugepte_shift)
32#else 35#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
33#define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
34#endif
35#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
36#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
37 36
38#define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE) 37#define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
39#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT) 38#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
40#define HUGEPD_MASK (~(HUGEPD_SIZE-1)) 39#define HUGEPD_MASK (~(HUGEPD_SIZE-1))
41 40
@@ -82,11 +81,35 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
82 return 0; 81 return 0;
83} 82}
84 83
84/* Base page size affects how we walk hugetlb page tables */
85#ifdef CONFIG_PPC_64K_PAGES
86#define hpmd_offset(pud, addr) pmd_offset(pud, addr)
87#define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
88#else
89static inline
90pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
91{
92 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
93 return pmd_offset(pud, addr);
94 else
95 return (pmd_t *) pud;
96}
97static inline
98pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
99{
100 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
101 return pmd_alloc(mm, pud, addr);
102 else
103 return (pmd_t *) pud;
104}
105#endif
106
85/* Modelled after find_linux_pte() */ 107/* Modelled after find_linux_pte() */
86pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 108pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
87{ 109{
88 pgd_t *pg; 110 pgd_t *pg;
89 pud_t *pu; 111 pud_t *pu;
112 pmd_t *pm;
90 113
91 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); 114 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
92 115
@@ -96,14 +119,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
96 if (!pgd_none(*pg)) { 119 if (!pgd_none(*pg)) {
97 pu = pud_offset(pg, addr); 120 pu = pud_offset(pg, addr);
98 if (!pud_none(*pu)) { 121 if (!pud_none(*pu)) {
99#ifdef CONFIG_PPC_64K_PAGES 122 pm = hpmd_offset(pu, addr);
100 pmd_t *pm;
101 pm = pmd_offset(pu, addr);
102 if (!pmd_none(*pm)) 123 if (!pmd_none(*pm))
103 return hugepte_offset((hugepd_t *)pm, addr); 124 return hugepte_offset((hugepd_t *)pm, addr);
104#else
105 return hugepte_offset((hugepd_t *)pu, addr);
106#endif
107 } 125 }
108 } 126 }
109 127
@@ -114,6 +132,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
114{ 132{
115 pgd_t *pg; 133 pgd_t *pg;
116 pud_t *pu; 134 pud_t *pu;
135 pmd_t *pm;
117 hugepd_t *hpdp = NULL; 136 hugepd_t *hpdp = NULL;
118 137
119 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); 138 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
@@ -124,14 +143,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
124 pu = pud_alloc(mm, pg, addr); 143 pu = pud_alloc(mm, pg, addr);
125 144
126 if (pu) { 145 if (pu) {
127#ifdef CONFIG_PPC_64K_PAGES 146 pm = hpmd_alloc(mm, pu, addr);
128 pmd_t *pm;
129 pm = pmd_alloc(mm, pu, addr);
130 if (pm) 147 if (pm)
131 hpdp = (hugepd_t *)pm; 148 hpdp = (hugepd_t *)pm;
132#else
133 hpdp = (hugepd_t *)pu;
134#endif
135 } 149 }
136 150
137 if (! hpdp) 151 if (! hpdp)
@@ -158,7 +172,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
158 PGF_CACHENUM_MASK)); 172 PGF_CACHENUM_MASK));
159} 173}
160 174
161#ifdef CONFIG_PPC_64K_PAGES
162static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 175static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
163 unsigned long addr, unsigned long end, 176 unsigned long addr, unsigned long end,
164 unsigned long floor, unsigned long ceiling) 177 unsigned long floor, unsigned long ceiling)
@@ -191,7 +204,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
191 pud_clear(pud); 204 pud_clear(pud);
192 pmd_free_tlb(tlb, pmd); 205 pmd_free_tlb(tlb, pmd);
193} 206}
194#endif
195 207
196static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 208static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
197 unsigned long addr, unsigned long end, 209 unsigned long addr, unsigned long end,
@@ -210,9 +222,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
210 continue; 222 continue;
211 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); 223 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
212#else 224#else
213 if (pud_none(*pud)) 225 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
214 continue; 226 if (pud_none_or_clear_bad(pud))
215 free_hugepte_range(tlb, (hugepd_t *)pud); 227 continue;
228 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
229 } else {
230 if (pud_none(*pud))
231 continue;
232 free_hugepte_range(tlb, (hugepd_t *)pud);
233 }
216#endif 234#endif
217 } while (pud++, addr = next, addr != end); 235 } while (pud++, addr = next, addr != end);
218 236
@@ -526,6 +544,57 @@ repeat:
526 return err; 544 return err;
527} 545}
528 546
547void set_huge_psize(int psize)
548{
549 /* Check that it is a page size supported by the hardware and
550 * that it fits within pagetable limits. */
551 if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
552 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
553 mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
554 HPAGE_SHIFT = mmu_psize_defs[psize].shift;
555 mmu_huge_psize = psize;
556#ifdef CONFIG_PPC_64K_PAGES
557 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
558#else
559 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
560 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
561 else
562 hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
563#endif
564
565 } else
566 HPAGE_SHIFT = 0;
567}
568
569static int __init hugepage_setup_sz(char *str)
570{
571 unsigned long long size;
572 int mmu_psize = -1;
573 int shift;
574
575 size = memparse(str, &str);
576
577 shift = __ffs(size);
578 switch (shift) {
579#ifndef CONFIG_PPC_64K_PAGES
580 case HPAGE_SHIFT_64K:
581 mmu_psize = MMU_PAGE_64K;
582 break;
583#endif
584 case HPAGE_SHIFT_16M:
585 mmu_psize = MMU_PAGE_16M;
586 break;
587 }
588
589 if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
590 set_huge_psize(mmu_psize);
591 else
592 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
593
594 return 1;
595}
596__setup("hugepagesz=", hugepage_setup_sz);
597
529static void zero_ctor(struct kmem_cache *cache, void *addr) 598static void zero_ctor(struct kmem_cache *cache, void *addr)
530{ 599{
531 memset(addr, 0, kmem_cache_size(cache)); 600 memset(addr, 0, kmem_cache_size(cache));