diff options
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 793 |
1 files changed, 291 insertions, 502 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 90df6ffe3a43..9bb249c3046e 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -7,29 +7,18 @@ | |||
7 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | 7 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
13 | #include <linux/hugetlb.h> | 11 | #include <linux/io.h> |
14 | #include <linux/pagemap.h> | ||
15 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
16 | #include <linux/err.h> | 13 | #include <linux/hugetlb.h> |
17 | #include <linux/sysctl.h> | 14 | #include <asm/pgtable.h> |
18 | #include <asm/mman.h> | ||
19 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
20 | #include <asm/tlb.h> | 16 | #include <asm/tlb.h> |
21 | #include <asm/tlbflush.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/machdep.h> | ||
24 | #include <asm/cputable.h> | ||
25 | #include <asm/spu.h> | ||
26 | 17 | ||
27 | #define PAGE_SHIFT_64K 16 | 18 | #define PAGE_SHIFT_64K 16 |
28 | #define PAGE_SHIFT_16M 24 | 19 | #define PAGE_SHIFT_16M 24 |
29 | #define PAGE_SHIFT_16G 34 | 20 | #define PAGE_SHIFT_16G 34 |
30 | 21 | ||
31 | #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) | ||
32 | #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) | ||
33 | #define MAX_NUMBER_GPAGES 1024 | 22 | #define MAX_NUMBER_GPAGES 1024 |
34 | 23 | ||
35 | /* Tracks the 16G pages after the device tree is scanned and before the | 24 | /* Tracks the 16G pages after the device tree is scanned and before the |
@@ -37,53 +26,17 @@ | |||
37 | static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; | 26 | static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; |
38 | static unsigned nr_gpages; | 27 | static unsigned nr_gpages; |
39 | 28 | ||
40 | /* Array of valid huge page sizes - non-zero value(hugepte_shift) is | ||
41 | * stored for the huge page sizes that are valid. | ||
42 | */ | ||
43 | unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */ | ||
44 | |||
45 | #define hugepte_shift mmu_huge_psizes | ||
46 | #define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize]) | ||
47 | #define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize]) | ||
48 | |||
49 | #define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \ | ||
50 | + hugepte_shift[psize]) | ||
51 | #define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize)) | ||
52 | #define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1)) | ||
53 | |||
54 | /* Subtract one from array size because we don't need a cache for 4K since | ||
55 | * is not a huge page size */ | ||
56 | #define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1) | ||
57 | #define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize]) | ||
58 | |||
59 | static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = { | ||
60 | [MMU_PAGE_64K] = "hugepte_cache_64K", | ||
61 | [MMU_PAGE_1M] = "hugepte_cache_1M", | ||
62 | [MMU_PAGE_16M] = "hugepte_cache_16M", | ||
63 | [MMU_PAGE_16G] = "hugepte_cache_16G", | ||
64 | }; | ||
65 | |||
66 | /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() | 29 | /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() |
67 | * will choke on pointers to hugepte tables, which is handy for | 30 | * will choke on pointers to hugepte tables, which is handy for |
68 | * catching screwups early. */ | 31 | * catching screwups early. */ |
69 | #define HUGEPD_OK 0x1 | ||
70 | |||
71 | typedef struct { unsigned long pd; } hugepd_t; | ||
72 | |||
73 | #define hugepd_none(hpd) ((hpd).pd == 0) | ||
74 | 32 | ||
75 | static inline int shift_to_mmu_psize(unsigned int shift) | 33 | static inline int shift_to_mmu_psize(unsigned int shift) |
76 | { | 34 | { |
77 | switch (shift) { | 35 | int psize; |
78 | #ifndef CONFIG_PPC_64K_PAGES | 36 | |
79 | case PAGE_SHIFT_64K: | 37 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) |
80 | return MMU_PAGE_64K; | 38 | if (mmu_psize_defs[psize].shift == shift) |
81 | #endif | 39 | return psize; |
82 | case PAGE_SHIFT_16M: | ||
83 | return MMU_PAGE_16M; | ||
84 | case PAGE_SHIFT_16G: | ||
85 | return MMU_PAGE_16G; | ||
86 | } | ||
87 | return -1; | 40 | return -1; |
88 | } | 41 | } |
89 | 42 | ||
@@ -94,71 +47,126 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | |||
94 | BUG(); | 47 | BUG(); |
95 | } | 48 | } |
96 | 49 | ||
50 | #define hugepd_none(hpd) ((hpd).pd == 0) | ||
51 | |||
97 | static inline pte_t *hugepd_page(hugepd_t hpd) | 52 | static inline pte_t *hugepd_page(hugepd_t hpd) |
98 | { | 53 | { |
99 | BUG_ON(!(hpd.pd & HUGEPD_OK)); | 54 | BUG_ON(!hugepd_ok(hpd)); |
100 | return (pte_t *)(hpd.pd & ~HUGEPD_OK); | 55 | return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000); |
56 | } | ||
57 | |||
58 | static inline unsigned int hugepd_shift(hugepd_t hpd) | ||
59 | { | ||
60 | return hpd.pd & HUGEPD_SHIFT_MASK; | ||
101 | } | 61 | } |
102 | 62 | ||
103 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, | 63 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift) |
104 | struct hstate *hstate) | ||
105 | { | 64 | { |
106 | unsigned int shift = huge_page_shift(hstate); | 65 | unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); |
107 | int psize = shift_to_mmu_psize(shift); | ||
108 | unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1)); | ||
109 | pte_t *dir = hugepd_page(*hpdp); | 66 | pte_t *dir = hugepd_page(*hpdp); |
110 | 67 | ||
111 | return dir + idx; | 68 | return dir + idx; |
112 | } | 69 | } |
113 | 70 | ||
71 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) | ||
72 | { | ||
73 | pgd_t *pg; | ||
74 | pud_t *pu; | ||
75 | pmd_t *pm; | ||
76 | hugepd_t *hpdp = NULL; | ||
77 | unsigned pdshift = PGDIR_SHIFT; | ||
78 | |||
79 | if (shift) | ||
80 | *shift = 0; | ||
81 | |||
82 | pg = pgdir + pgd_index(ea); | ||
83 | if (is_hugepd(pg)) { | ||
84 | hpdp = (hugepd_t *)pg; | ||
85 | } else if (!pgd_none(*pg)) { | ||
86 | pdshift = PUD_SHIFT; | ||
87 | pu = pud_offset(pg, ea); | ||
88 | if (is_hugepd(pu)) | ||
89 | hpdp = (hugepd_t *)pu; | ||
90 | else if (!pud_none(*pu)) { | ||
91 | pdshift = PMD_SHIFT; | ||
92 | pm = pmd_offset(pu, ea); | ||
93 | if (is_hugepd(pm)) | ||
94 | hpdp = (hugepd_t *)pm; | ||
95 | else if (!pmd_none(*pm)) { | ||
96 | return pte_offset_map(pm, ea); | ||
97 | } | ||
98 | } | ||
99 | } | ||
100 | |||
101 | if (!hpdp) | ||
102 | return NULL; | ||
103 | |||
104 | if (shift) | ||
105 | *shift = hugepd_shift(*hpdp); | ||
106 | return hugepte_offset(hpdp, ea, pdshift); | ||
107 | } | ||
108 | |||
109 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
110 | { | ||
111 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); | ||
112 | } | ||
113 | |||
114 | static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | 114 | static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, |
115 | unsigned long address, unsigned int psize) | 115 | unsigned long address, unsigned pdshift, unsigned pshift) |
116 | { | 116 | { |
117 | pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], | 117 | pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift), |
118 | GFP_KERNEL|__GFP_REPEAT); | 118 | GFP_KERNEL|__GFP_REPEAT); |
119 | |||
120 | BUG_ON(pshift > HUGEPD_SHIFT_MASK); | ||
121 | BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); | ||
119 | 122 | ||
120 | if (! new) | 123 | if (! new) |
121 | return -ENOMEM; | 124 | return -ENOMEM; |
122 | 125 | ||
123 | spin_lock(&mm->page_table_lock); | 126 | spin_lock(&mm->page_table_lock); |
124 | if (!hugepd_none(*hpdp)) | 127 | if (!hugepd_none(*hpdp)) |
125 | kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new); | 128 | kmem_cache_free(PGT_CACHE(pdshift - pshift), new); |
126 | else | 129 | else |
127 | hpdp->pd = (unsigned long)new | HUGEPD_OK; | 130 | hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift; |
128 | spin_unlock(&mm->page_table_lock); | 131 | spin_unlock(&mm->page_table_lock); |
129 | return 0; | 132 | return 0; |
130 | } | 133 | } |
131 | 134 | ||
132 | 135 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) | |
133 | static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate) | ||
134 | { | ||
135 | if (huge_page_shift(hstate) < PUD_SHIFT) | ||
136 | return pud_offset(pgd, addr); | ||
137 | else | ||
138 | return (pud_t *) pgd; | ||
139 | } | ||
140 | static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, | ||
141 | struct hstate *hstate) | ||
142 | { | ||
143 | if (huge_page_shift(hstate) < PUD_SHIFT) | ||
144 | return pud_alloc(mm, pgd, addr); | ||
145 | else | ||
146 | return (pud_t *) pgd; | ||
147 | } | ||
148 | static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate) | ||
149 | { | 136 | { |
150 | if (huge_page_shift(hstate) < PMD_SHIFT) | 137 | pgd_t *pg; |
151 | return pmd_offset(pud, addr); | 138 | pud_t *pu; |
152 | else | 139 | pmd_t *pm; |
153 | return (pmd_t *) pud; | 140 | hugepd_t *hpdp = NULL; |
154 | } | 141 | unsigned pshift = __ffs(sz); |
155 | static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr, | 142 | unsigned pdshift = PGDIR_SHIFT; |
156 | struct hstate *hstate) | 143 | |
157 | { | 144 | addr &= ~(sz-1); |
158 | if (huge_page_shift(hstate) < PMD_SHIFT) | 145 | |
159 | return pmd_alloc(mm, pud, addr); | 146 | pg = pgd_offset(mm, addr); |
160 | else | 147 | if (pshift >= PUD_SHIFT) { |
161 | return (pmd_t *) pud; | 148 | hpdp = (hugepd_t *)pg; |
149 | } else { | ||
150 | pdshift = PUD_SHIFT; | ||
151 | pu = pud_alloc(mm, pg, addr); | ||
152 | if (pshift >= PMD_SHIFT) { | ||
153 | hpdp = (hugepd_t *)pu; | ||
154 | } else { | ||
155 | pdshift = PMD_SHIFT; | ||
156 | pm = pmd_alloc(mm, pu, addr); | ||
157 | hpdp = (hugepd_t *)pm; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | if (!hpdp) | ||
162 | return NULL; | ||
163 | |||
164 | BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); | ||
165 | |||
166 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) | ||
167 | return NULL; | ||
168 | |||
169 | return hugepte_offset(hpdp, addr, pdshift); | ||
162 | } | 170 | } |
163 | 171 | ||
164 | /* Build list of addresses of gigantic pages. This function is used in early | 172 | /* Build list of addresses of gigantic pages. This function is used in early |
@@ -192,94 +200,38 @@ int alloc_bootmem_huge_page(struct hstate *hstate) | |||
192 | return 1; | 200 | return 1; |
193 | } | 201 | } |
194 | 202 | ||
195 | |||
196 | /* Modelled after find_linux_pte() */ | ||
197 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
198 | { | ||
199 | pgd_t *pg; | ||
200 | pud_t *pu; | ||
201 | pmd_t *pm; | ||
202 | |||
203 | unsigned int psize; | ||
204 | unsigned int shift; | ||
205 | unsigned long sz; | ||
206 | struct hstate *hstate; | ||
207 | psize = get_slice_psize(mm, addr); | ||
208 | shift = mmu_psize_to_shift(psize); | ||
209 | sz = ((1UL) << shift); | ||
210 | hstate = size_to_hstate(sz); | ||
211 | |||
212 | addr &= hstate->mask; | ||
213 | |||
214 | pg = pgd_offset(mm, addr); | ||
215 | if (!pgd_none(*pg)) { | ||
216 | pu = hpud_offset(pg, addr, hstate); | ||
217 | if (!pud_none(*pu)) { | ||
218 | pm = hpmd_offset(pu, addr, hstate); | ||
219 | if (!pmd_none(*pm)) | ||
220 | return hugepte_offset((hugepd_t *)pm, addr, | ||
221 | hstate); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | return NULL; | ||
226 | } | ||
227 | |||
228 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
229 | unsigned long addr, unsigned long sz) | ||
230 | { | ||
231 | pgd_t *pg; | ||
232 | pud_t *pu; | ||
233 | pmd_t *pm; | ||
234 | hugepd_t *hpdp = NULL; | ||
235 | struct hstate *hstate; | ||
236 | unsigned int psize; | ||
237 | hstate = size_to_hstate(sz); | ||
238 | |||
239 | psize = get_slice_psize(mm, addr); | ||
240 | BUG_ON(!mmu_huge_psizes[psize]); | ||
241 | |||
242 | addr &= hstate->mask; | ||
243 | |||
244 | pg = pgd_offset(mm, addr); | ||
245 | pu = hpud_alloc(mm, pg, addr, hstate); | ||
246 | |||
247 | if (pu) { | ||
248 | pm = hpmd_alloc(mm, pu, addr, hstate); | ||
249 | if (pm) | ||
250 | hpdp = (hugepd_t *)pm; | ||
251 | } | ||
252 | |||
253 | if (! hpdp) | ||
254 | return NULL; | ||
255 | |||
256 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize)) | ||
257 | return NULL; | ||
258 | |||
259 | return hugepte_offset(hpdp, addr, hstate); | ||
260 | } | ||
261 | |||
262 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 203 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
263 | { | 204 | { |
264 | return 0; | 205 | return 0; |
265 | } | 206 | } |
266 | 207 | ||
267 | static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp, | 208 | static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, |
268 | unsigned int psize) | 209 | unsigned long start, unsigned long end, |
210 | unsigned long floor, unsigned long ceiling) | ||
269 | { | 211 | { |
270 | pte_t *hugepte = hugepd_page(*hpdp); | 212 | pte_t *hugepte = hugepd_page(*hpdp); |
213 | unsigned shift = hugepd_shift(*hpdp); | ||
214 | unsigned long pdmask = ~((1UL << pdshift) - 1); | ||
215 | |||
216 | start &= pdmask; | ||
217 | if (start < floor) | ||
218 | return; | ||
219 | if (ceiling) { | ||
220 | ceiling &= pdmask; | ||
221 | if (! ceiling) | ||
222 | return; | ||
223 | } | ||
224 | if (end - 1 > ceiling - 1) | ||
225 | return; | ||
271 | 226 | ||
272 | hpdp->pd = 0; | 227 | hpdp->pd = 0; |
273 | tlb->need_flush = 1; | 228 | tlb->need_flush = 1; |
274 | pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, | 229 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); |
275 | HUGEPTE_CACHE_NUM+psize-1, | ||
276 | PGF_CACHENUM_MASK)); | ||
277 | } | 230 | } |
278 | 231 | ||
279 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | 232 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
280 | unsigned long addr, unsigned long end, | 233 | unsigned long addr, unsigned long end, |
281 | unsigned long floor, unsigned long ceiling, | 234 | unsigned long floor, unsigned long ceiling) |
282 | unsigned int psize) | ||
283 | { | 235 | { |
284 | pmd_t *pmd; | 236 | pmd_t *pmd; |
285 | unsigned long next; | 237 | unsigned long next; |
@@ -291,7 +243,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
291 | next = pmd_addr_end(addr, end); | 243 | next = pmd_addr_end(addr, end); |
292 | if (pmd_none(*pmd)) | 244 | if (pmd_none(*pmd)) |
293 | continue; | 245 | continue; |
294 | free_hugepte_range(tlb, (hugepd_t *)pmd, psize); | 246 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, |
247 | addr, next, floor, ceiling); | ||
295 | } while (pmd++, addr = next, addr != end); | 248 | } while (pmd++, addr = next, addr != end); |
296 | 249 | ||
297 | start &= PUD_MASK; | 250 | start &= PUD_MASK; |
@@ -317,23 +270,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
317 | pud_t *pud; | 270 | pud_t *pud; |
318 | unsigned long next; | 271 | unsigned long next; |
319 | unsigned long start; | 272 | unsigned long start; |
320 | unsigned int shift; | ||
321 | unsigned int psize = get_slice_psize(tlb->mm, addr); | ||
322 | shift = mmu_psize_to_shift(psize); | ||
323 | 273 | ||
324 | start = addr; | 274 | start = addr; |
325 | pud = pud_offset(pgd, addr); | 275 | pud = pud_offset(pgd, addr); |
326 | do { | 276 | do { |
327 | next = pud_addr_end(addr, end); | 277 | next = pud_addr_end(addr, end); |
328 | if (shift < PMD_SHIFT) { | 278 | if (!is_hugepd(pud)) { |
329 | if (pud_none_or_clear_bad(pud)) | 279 | if (pud_none_or_clear_bad(pud)) |
330 | continue; | 280 | continue; |
331 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, | 281 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
332 | ceiling, psize); | 282 | ceiling); |
333 | } else { | 283 | } else { |
334 | if (pud_none(*pud)) | 284 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, |
335 | continue; | 285 | addr, next, floor, ceiling); |
336 | free_hugepte_range(tlb, (hugepd_t *)pud, psize); | ||
337 | } | 286 | } |
338 | } while (pud++, addr = next, addr != end); | 287 | } while (pud++, addr = next, addr != end); |
339 | 288 | ||
@@ -364,121 +313,56 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
364 | { | 313 | { |
365 | pgd_t *pgd; | 314 | pgd_t *pgd; |
366 | unsigned long next; | 315 | unsigned long next; |
367 | unsigned long start; | ||
368 | 316 | ||
369 | /* | 317 | /* |
370 | * Comments below take from the normal free_pgd_range(). They | 318 | * Because there are a number of different possible pagetable |
371 | * apply here too. The tests against HUGEPD_MASK below are | 319 | * layouts for hugepage ranges, we limit knowledge of how |
372 | * essential, because we *don't* test for this at the bottom | 320 | * things should be laid out to the allocation path |
373 | * level. Without them we'll attempt to free a hugepte table | 321 | * (huge_pte_alloc(), above). Everything else works out the |
374 | * when we unmap just part of it, even if there are other | 322 | * structure as it goes from information in the hugepd |
375 | * active mappings using it. | 323 | * pointers. That means that we can't here use the |
376 | * | 324 | * optimization used in the normal page free_pgd_range(), of |
377 | * The next few lines have given us lots of grief... | 325 | * checking whether we're actually covering a large enough |
378 | * | 326 | * range to have to do anything at the top level of the walk |
379 | * Why are we testing HUGEPD* at this top level? Because | 327 | * instead of at the bottom. |
380 | * often there will be no work to do at all, and we'd prefer | ||
381 | * not to go all the way down to the bottom just to discover | ||
382 | * that. | ||
383 | * | 328 | * |
384 | * Why all these "- 1"s? Because 0 represents both the bottom | 329 | * To make sense of this, you should probably go read the big |
385 | * of the address space and the top of it (using -1 for the | 330 | * block comment at the top of the normal free_pgd_range(), |
386 | * top wouldn't help much: the masks would do the wrong thing). | 331 | * too. |
387 | * The rule is that addr 0 and floor 0 refer to the bottom of | ||
388 | * the address space, but end 0 and ceiling 0 refer to the top | ||
389 | * Comparisons need to use "end - 1" and "ceiling - 1" (though | ||
390 | * that end 0 case should be mythical). | ||
391 | * | ||
392 | * Wherever addr is brought up or ceiling brought down, we | ||
393 | * must be careful to reject "the opposite 0" before it | ||
394 | * confuses the subsequent tests. But what about where end is | ||
395 | * brought down by HUGEPD_SIZE below? no, end can't go down to | ||
396 | * 0 there. | ||
397 | * | ||
398 | * Whereas we round start (addr) and ceiling down, by different | ||
399 | * masks at different levels, in order to test whether a table | ||
400 | * now has no other vmas using it, so can be freed, we don't | ||
401 | * bother to round floor or end up - the tests don't need that. | ||
402 | */ | 332 | */ |
403 | unsigned int psize = get_slice_psize(tlb->mm, addr); | ||
404 | |||
405 | addr &= HUGEPD_MASK(psize); | ||
406 | if (addr < floor) { | ||
407 | addr += HUGEPD_SIZE(psize); | ||
408 | if (!addr) | ||
409 | return; | ||
410 | } | ||
411 | if (ceiling) { | ||
412 | ceiling &= HUGEPD_MASK(psize); | ||
413 | if (!ceiling) | ||
414 | return; | ||
415 | } | ||
416 | if (end - 1 > ceiling - 1) | ||
417 | end -= HUGEPD_SIZE(psize); | ||
418 | if (addr > end - 1) | ||
419 | return; | ||
420 | 333 | ||
421 | start = addr; | ||
422 | pgd = pgd_offset(tlb->mm, addr); | 334 | pgd = pgd_offset(tlb->mm, addr); |
423 | do { | 335 | do { |
424 | psize = get_slice_psize(tlb->mm, addr); | ||
425 | BUG_ON(!mmu_huge_psizes[psize]); | ||
426 | next = pgd_addr_end(addr, end); | 336 | next = pgd_addr_end(addr, end); |
427 | if (mmu_psize_to_shift(psize) < PUD_SHIFT) { | 337 | if (!is_hugepd(pgd)) { |
428 | if (pgd_none_or_clear_bad(pgd)) | 338 | if (pgd_none_or_clear_bad(pgd)) |
429 | continue; | 339 | continue; |
430 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | 340 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
431 | } else { | 341 | } else { |
432 | if (pgd_none(*pgd)) | 342 | free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, |
433 | continue; | 343 | addr, next, floor, ceiling); |
434 | free_hugepte_range(tlb, (hugepd_t *)pgd, psize); | ||
435 | } | 344 | } |
436 | } while (pgd++, addr = next, addr != end); | 345 | } while (pgd++, addr = next, addr != end); |
437 | } | 346 | } |
438 | 347 | ||
439 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
440 | pte_t *ptep, pte_t pte) | ||
441 | { | ||
442 | if (pte_present(*ptep)) { | ||
443 | /* We open-code pte_clear because we need to pass the right | ||
444 | * argument to hpte_need_flush (huge / !huge). Might not be | ||
445 | * necessary anymore if we make hpte_need_flush() get the | ||
446 | * page size from the slices | ||
447 | */ | ||
448 | unsigned int psize = get_slice_psize(mm, addr); | ||
449 | unsigned int shift = mmu_psize_to_shift(psize); | ||
450 | unsigned long sz = ((1UL) << shift); | ||
451 | struct hstate *hstate = size_to_hstate(sz); | ||
452 | pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1); | ||
453 | } | ||
454 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | ||
455 | } | ||
456 | |||
457 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
458 | pte_t *ptep) | ||
459 | { | ||
460 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1); | ||
461 | return __pte(old); | ||
462 | } | ||
463 | |||
464 | struct page * | 348 | struct page * |
465 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 349 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
466 | { | 350 | { |
467 | pte_t *ptep; | 351 | pte_t *ptep; |
468 | struct page *page; | 352 | struct page *page; |
469 | unsigned int mmu_psize = get_slice_psize(mm, address); | 353 | unsigned shift; |
354 | unsigned long mask; | ||
355 | |||
356 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | ||
470 | 357 | ||
471 | /* Verify it is a huge page else bail. */ | 358 | /* Verify it is a huge page else bail. */ |
472 | if (!mmu_huge_psizes[mmu_psize]) | 359 | if (!ptep || !shift) |
473 | return ERR_PTR(-EINVAL); | 360 | return ERR_PTR(-EINVAL); |
474 | 361 | ||
475 | ptep = huge_pte_offset(mm, address); | 362 | mask = (1UL << shift) - 1; |
476 | page = pte_page(*ptep); | 363 | page = pte_page(*ptep); |
477 | if (page) { | 364 | if (page) |
478 | unsigned int shift = mmu_psize_to_shift(mmu_psize); | 365 | page += (address & mask) / PAGE_SIZE; |
479 | unsigned long sz = ((1UL) << shift); | ||
480 | page += (address % sz) / PAGE_SIZE; | ||
481 | } | ||
482 | 366 | ||
483 | return page; | 367 | return page; |
484 | } | 368 | } |
@@ -501,6 +385,82 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
501 | return NULL; | 385 | return NULL; |
502 | } | 386 | } |
503 | 387 | ||
388 | static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, | ||
389 | unsigned long end, int write, struct page **pages, int *nr) | ||
390 | { | ||
391 | unsigned long mask; | ||
392 | unsigned long pte_end; | ||
393 | struct page *head, *page; | ||
394 | pte_t pte; | ||
395 | int refs; | ||
396 | |||
397 | pte_end = (addr + sz) & ~(sz-1); | ||
398 | if (pte_end < end) | ||
399 | end = pte_end; | ||
400 | |||
401 | pte = *ptep; | ||
402 | mask = _PAGE_PRESENT | _PAGE_USER; | ||
403 | if (write) | ||
404 | mask |= _PAGE_RW; | ||
405 | |||
406 | if ((pte_val(pte) & mask) != mask) | ||
407 | return 0; | ||
408 | |||
409 | /* hugepages are never "special" */ | ||
410 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | ||
411 | |||
412 | refs = 0; | ||
413 | head = pte_page(pte); | ||
414 | |||
415 | page = head + ((addr & (sz-1)) >> PAGE_SHIFT); | ||
416 | do { | ||
417 | VM_BUG_ON(compound_head(page) != head); | ||
418 | pages[*nr] = page; | ||
419 | (*nr)++; | ||
420 | page++; | ||
421 | refs++; | ||
422 | } while (addr += PAGE_SIZE, addr != end); | ||
423 | |||
424 | if (!page_cache_add_speculative(head, refs)) { | ||
425 | *nr -= refs; | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | ||
430 | /* Could be optimized better */ | ||
431 | while (*nr) { | ||
432 | put_page(page); | ||
433 | (*nr)--; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, | ||
441 | unsigned long sz) | ||
442 | { | ||
443 | unsigned long __boundary = (addr + sz) & ~(sz-1); | ||
444 | return (__boundary - 1 < end - 1) ? __boundary : end; | ||
445 | } | ||
446 | |||
447 | int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, | ||
448 | unsigned long addr, unsigned long end, | ||
449 | int write, struct page **pages, int *nr) | ||
450 | { | ||
451 | pte_t *ptep; | ||
452 | unsigned long sz = 1UL << hugepd_shift(*hugepd); | ||
453 | unsigned long next; | ||
454 | |||
455 | ptep = hugepte_offset(hugepd, addr, pdshift); | ||
456 | do { | ||
457 | next = hugepte_addr_end(addr, end, sz); | ||
458 | if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) | ||
459 | return 0; | ||
460 | } while (ptep++, addr = next, addr != end); | ||
461 | |||
462 | return 1; | ||
463 | } | ||
504 | 464 | ||
505 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 465 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
506 | unsigned long len, unsigned long pgoff, | 466 | unsigned long len, unsigned long pgoff, |
@@ -509,8 +469,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
509 | struct hstate *hstate = hstate_file(file); | 469 | struct hstate *hstate = hstate_file(file); |
510 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); | 470 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); |
511 | 471 | ||
512 | if (!mmu_huge_psizes[mmu_psize]) | ||
513 | return -EINVAL; | ||
514 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); | 472 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
515 | } | 473 | } |
516 | 474 | ||
@@ -521,229 +479,46 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | |||
521 | return 1UL << mmu_psize_to_shift(psize); | 479 | return 1UL << mmu_psize_to_shift(psize); |
522 | } | 480 | } |
523 | 481 | ||
524 | /* | 482 | static int __init add_huge_page_size(unsigned long long size) |
525 | * Called by asm hashtable.S for doing lazy icache flush | ||
526 | */ | ||
527 | static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, | ||
528 | pte_t pte, int trap, unsigned long sz) | ||
529 | { | 483 | { |
530 | struct page *page; | 484 | int shift = __ffs(size); |
531 | int i; | 485 | int mmu_psize; |
532 | |||
533 | if (!pfn_valid(pte_pfn(pte))) | ||
534 | return rflags; | ||
535 | |||
536 | page = pte_page(pte); | ||
537 | |||
538 | /* page is dirty */ | ||
539 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { | ||
540 | if (trap == 0x400) { | ||
541 | for (i = 0; i < (sz / PAGE_SIZE); i++) | ||
542 | __flush_dcache_icache(page_address(page+i)); | ||
543 | set_bit(PG_arch_1, &page->flags); | ||
544 | } else { | ||
545 | rflags |= HPTE_R_N; | ||
546 | } | ||
547 | } | ||
548 | return rflags; | ||
549 | } | ||
550 | 486 | ||
551 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | 487 | /* Check that it is a page size supported by the hardware and |
552 | unsigned long ea, unsigned long vsid, int local, | 488 | * that it fits within pagetable and slice limits. */ |
553 | unsigned long trap) | 489 | if (!is_power_of_2(size) |
554 | { | 490 | || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) |
555 | pte_t *ptep; | 491 | return -EINVAL; |
556 | unsigned long old_pte, new_pte; | ||
557 | unsigned long va, rflags, pa, sz; | ||
558 | long slot; | ||
559 | int err = 1; | ||
560 | int ssize = user_segment_size(ea); | ||
561 | unsigned int mmu_psize; | ||
562 | int shift; | ||
563 | mmu_psize = get_slice_psize(mm, ea); | ||
564 | |||
565 | if (!mmu_huge_psizes[mmu_psize]) | ||
566 | goto out; | ||
567 | ptep = huge_pte_offset(mm, ea); | ||
568 | |||
569 | /* Search the Linux page table for a match with va */ | ||
570 | va = hpt_va(ea, vsid, ssize); | ||
571 | 492 | ||
572 | /* | 493 | if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) |
573 | * If no pte found or not present, send the problem up to | 494 | return -EINVAL; |
574 | * do_page_fault | ||
575 | */ | ||
576 | if (unlikely(!ptep || pte_none(*ptep))) | ||
577 | goto out; | ||
578 | 495 | ||
579 | /* | 496 | #ifdef CONFIG_SPU_FS_64K_LS |
580 | * Check the user's access rights to the page. If access should be | 497 | /* Disable support for 64K huge pages when 64K SPU local store |
581 | * prevented then send the problem up to do_page_fault. | 498 | * support is enabled as the current implementation conflicts. |
582 | */ | ||
583 | if (unlikely(access & ~pte_val(*ptep))) | ||
584 | goto out; | ||
585 | /* | ||
586 | * At this point, we have a pte (old_pte) which can be used to build | ||
587 | * or update an HPTE. There are 2 cases: | ||
588 | * | ||
589 | * 1. There is a valid (present) pte with no associated HPTE (this is | ||
590 | * the most common case) | ||
591 | * 2. There is a valid (present) pte with an associated HPTE. The | ||
592 | * current values of the pp bits in the HPTE prevent access | ||
593 | * because we are doing software DIRTY bit management and the | ||
594 | * page is currently not DIRTY. | ||
595 | */ | 499 | */ |
500 | if (shift == PAGE_SHIFT_64K) | ||
501 | return -EINVAL; | ||
502 | #endif /* CONFIG_SPU_FS_64K_LS */ | ||
596 | 503 | ||
504 | BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); | ||
597 | 505 | ||
598 | do { | 506 | /* Return if huge page size has already been setup */ |
599 | old_pte = pte_val(*ptep); | 507 | if (size_to_hstate(size)) |
600 | if (old_pte & _PAGE_BUSY) | 508 | return 0; |
601 | goto out; | ||
602 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; | ||
603 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, | ||
604 | old_pte, new_pte)); | ||
605 | |||
606 | rflags = 0x2 | (!(new_pte & _PAGE_RW)); | ||
607 | /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ | ||
608 | rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); | ||
609 | shift = mmu_psize_to_shift(mmu_psize); | ||
610 | sz = ((1UL) << shift); | ||
611 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
612 | /* No CPU has hugepages but lacks no execute, so we | ||
613 | * don't need to worry about that case */ | ||
614 | rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), | ||
615 | trap, sz); | ||
616 | |||
617 | /* Check if pte already has an hpte (case 2) */ | ||
618 | if (unlikely(old_pte & _PAGE_HASHPTE)) { | ||
619 | /* There MIGHT be an HPTE for this pte */ | ||
620 | unsigned long hash, slot; | ||
621 | |||
622 | hash = hpt_hash(va, shift, ssize); | ||
623 | if (old_pte & _PAGE_F_SECOND) | ||
624 | hash = ~hash; | ||
625 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
626 | slot += (old_pte & _PAGE_F_GIX) >> 12; | ||
627 | |||
628 | if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, | ||
629 | ssize, local) == -1) | ||
630 | old_pte &= ~_PAGE_HPTEFLAGS; | ||
631 | } | ||
632 | |||
633 | if (likely(!(old_pte & _PAGE_HASHPTE))) { | ||
634 | unsigned long hash = hpt_hash(va, shift, ssize); | ||
635 | unsigned long hpte_group; | ||
636 | |||
637 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; | ||
638 | |||
639 | repeat: | ||
640 | hpte_group = ((hash & htab_hash_mask) * | ||
641 | HPTES_PER_GROUP) & ~0x7UL; | ||
642 | |||
643 | /* clear HPTE slot informations in new PTE */ | ||
644 | #ifdef CONFIG_PPC_64K_PAGES | ||
645 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0; | ||
646 | #else | ||
647 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; | ||
648 | #endif | ||
649 | /* Add in WIMG bits */ | ||
650 | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | | ||
651 | _PAGE_COHERENT | _PAGE_GUARDED)); | ||
652 | |||
653 | /* Insert into the hash table, primary slot */ | ||
654 | slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, | ||
655 | mmu_psize, ssize); | ||
656 | |||
657 | /* Primary is full, try the secondary */ | ||
658 | if (unlikely(slot == -1)) { | ||
659 | hpte_group = ((~hash & htab_hash_mask) * | ||
660 | HPTES_PER_GROUP) & ~0x7UL; | ||
661 | slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, | ||
662 | HPTE_V_SECONDARY, | ||
663 | mmu_psize, ssize); | ||
664 | if (slot == -1) { | ||
665 | if (mftb() & 0x1) | ||
666 | hpte_group = ((hash & htab_hash_mask) * | ||
667 | HPTES_PER_GROUP)&~0x7UL; | ||
668 | |||
669 | ppc_md.hpte_remove(hpte_group); | ||
670 | goto repeat; | ||
671 | } | ||
672 | } | ||
673 | |||
674 | if (unlikely(slot == -2)) | ||
675 | panic("hash_huge_page: pte_insert failed\n"); | ||
676 | |||
677 | new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * No need to use ldarx/stdcx here | ||
682 | */ | ||
683 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | ||
684 | |||
685 | err = 0; | ||
686 | 509 | ||
687 | out: | 510 | hugetlb_add_hstate(shift - PAGE_SHIFT); |
688 | return err; | ||
689 | } | ||
690 | 511 | ||
691 | static void __init set_huge_psize(int psize) | 512 | return 0; |
692 | { | ||
693 | /* Check that it is a page size supported by the hardware and | ||
694 | * that it fits within pagetable limits. */ | ||
695 | if (mmu_psize_defs[psize].shift && | ||
696 | mmu_psize_defs[psize].shift < SID_SHIFT_1T && | ||
697 | (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || | ||
698 | mmu_psize_defs[psize].shift == PAGE_SHIFT_64K || | ||
699 | mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) { | ||
700 | /* Return if huge page size has already been setup or is the | ||
701 | * same as the base page size. */ | ||
702 | if (mmu_huge_psizes[psize] || | ||
703 | mmu_psize_defs[psize].shift == PAGE_SHIFT) | ||
704 | return; | ||
705 | if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL)) | ||
706 | return; | ||
707 | hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT); | ||
708 | |||
709 | switch (mmu_psize_defs[psize].shift) { | ||
710 | case PAGE_SHIFT_64K: | ||
711 | /* We only allow 64k hpages with 4k base page, | ||
712 | * which was checked above, and always put them | ||
713 | * at the PMD */ | ||
714 | hugepte_shift[psize] = PMD_SHIFT; | ||
715 | break; | ||
716 | case PAGE_SHIFT_16M: | ||
717 | /* 16M pages can be at two different levels | ||
718 | * of pagestables based on base page size */ | ||
719 | if (PAGE_SHIFT == PAGE_SHIFT_64K) | ||
720 | hugepte_shift[psize] = PMD_SHIFT; | ||
721 | else /* 4k base page */ | ||
722 | hugepte_shift[psize] = PUD_SHIFT; | ||
723 | break; | ||
724 | case PAGE_SHIFT_16G: | ||
725 | /* 16G pages are always at PGD level */ | ||
726 | hugepte_shift[psize] = PGDIR_SHIFT; | ||
727 | break; | ||
728 | } | ||
729 | hugepte_shift[psize] -= mmu_psize_defs[psize].shift; | ||
730 | } else | ||
731 | hugepte_shift[psize] = 0; | ||
732 | } | 513 | } |
733 | 514 | ||
734 | static int __init hugepage_setup_sz(char *str) | 515 | static int __init hugepage_setup_sz(char *str) |
735 | { | 516 | { |
736 | unsigned long long size; | 517 | unsigned long long size; |
737 | int mmu_psize; | ||
738 | int shift; | ||
739 | 518 | ||
740 | size = memparse(str, &str); | 519 | size = memparse(str, &str); |
741 | 520 | ||
742 | shift = __ffs(size); | 521 | if (add_huge_page_size(size) != 0) |
743 | mmu_psize = shift_to_mmu_psize(shift); | ||
744 | if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift) | ||
745 | set_huge_psize(mmu_psize); | ||
746 | else | ||
747 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); | 522 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); |
748 | 523 | ||
749 | return 1; | 524 | return 1; |
@@ -752,41 +527,55 @@ __setup("hugepagesz=", hugepage_setup_sz); | |||
752 | 527 | ||
753 | static int __init hugetlbpage_init(void) | 528 | static int __init hugetlbpage_init(void) |
754 | { | 529 | { |
755 | unsigned int psize; | 530 | int psize; |
756 | 531 | ||
757 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | 532 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) |
758 | return -ENODEV; | 533 | return -ENODEV; |
759 | 534 | ||
760 | /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE | 535 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
761 | * and adjust PTE_NONCACHE_NUM if the number of supported huge page | 536 | unsigned shift; |
762 | * sizes changes. | 537 | unsigned pdshift; |
763 | */ | ||
764 | set_huge_psize(MMU_PAGE_16M); | ||
765 | set_huge_psize(MMU_PAGE_16G); | ||
766 | 538 | ||
767 | /* Temporarily disable support for 64K huge pages when 64K SPU local | 539 | if (!mmu_psize_defs[psize].shift) |
768 | * store support is enabled as the current implementation conflicts. | 540 | continue; |
769 | */ | ||
770 | #ifndef CONFIG_SPU_FS_64K_LS | ||
771 | set_huge_psize(MMU_PAGE_64K); | ||
772 | #endif | ||
773 | 541 | ||
774 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 542 | shift = mmu_psize_to_shift(psize); |
775 | if (mmu_huge_psizes[psize]) { | 543 | |
776 | pgtable_cache[HUGE_PGTABLE_INDEX(psize)] = | 544 | if (add_huge_page_size(1ULL << shift) < 0) |
777 | kmem_cache_create( | 545 | continue; |
778 | HUGEPTE_CACHE_NAME(psize), | 546 | |
779 | HUGEPTE_TABLE_SIZE(psize), | 547 | if (shift < PMD_SHIFT) |
780 | HUGEPTE_TABLE_SIZE(psize), | 548 | pdshift = PMD_SHIFT; |
781 | 0, | 549 | else if (shift < PUD_SHIFT) |
782 | NULL); | 550 | pdshift = PUD_SHIFT; |
783 | if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)]) | 551 | else |
784 | panic("hugetlbpage_init(): could not create %s"\ | 552 | pdshift = PGDIR_SHIFT; |
785 | "\n", HUGEPTE_CACHE_NAME(psize)); | 553 | |
786 | } | 554 | pgtable_cache_add(pdshift - shift, NULL); |
555 | if (!PGT_CACHE(pdshift - shift)) | ||
556 | panic("hugetlbpage_init(): could not create " | ||
557 | "pgtable cache for %d bit pagesize\n", shift); | ||
787 | } | 558 | } |
788 | 559 | ||
560 | /* Set default large page size. Currently, we pick 16M or 1M | ||
561 | * depending on what is available | ||
562 | */ | ||
563 | if (mmu_psize_defs[MMU_PAGE_16M].shift) | ||
564 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; | ||
565 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | ||
566 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; | ||
567 | |||
789 | return 0; | 568 | return 0; |
790 | } | 569 | } |
791 | 570 | ||
792 | module_init(hugetlbpage_init); | 571 | module_init(hugetlbpage_init); |
572 | |||
573 | void flush_dcache_icache_hugepage(struct page *page) | ||
574 | { | ||
575 | int i; | ||
576 | |||
577 | BUG_ON(!PageCompound(page)); | ||
578 | |||
579 | for (i = 0; i < (1UL << compound_order(page)); i++) | ||
580 | __flush_dcache_icache(page_address(page+i)); | ||
581 | } | ||