diff options
Diffstat (limited to 'arch/powerpc/mm/slice.c')
-rw-r--r-- | arch/powerpc/mm/slice.c | 633 |
1 files changed, 633 insertions, 0 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c new file mode 100644 index 000000000000..f833dba2a028 --- /dev/null +++ b/arch/powerpc/mm/slice.c | |||
@@ -0,0 +1,633 @@ | |||
1 | /* | ||
2 | * address space "slices" (meta-segments) support | ||
3 | * | ||
4 | * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. | ||
5 | * | ||
6 | * Based on hugetlb implementation | ||
7 | * | ||
8 | * Copyright (C) 2003 David Gibson, IBM Corporation. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | */ | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/pagemap.h> | ||
30 | #include <linux/err.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <asm/mman.h> | ||
34 | #include <asm/mmu.h> | ||
35 | #include <asm/spu.h> | ||
36 | |||
37 | static spinlock_t slice_convert_lock = SPIN_LOCK_UNLOCKED; | ||
38 | |||
39 | |||
40 | #ifdef DEBUG | ||
41 | int _slice_debug = 1; | ||
42 | |||
43 | static void slice_print_mask(const char *label, struct slice_mask mask) | ||
44 | { | ||
45 | char *p, buf[16 + 3 + 16 + 1]; | ||
46 | int i; | ||
47 | |||
48 | if (!_slice_debug) | ||
49 | return; | ||
50 | p = buf; | ||
51 | for (i = 0; i < SLICE_NUM_LOW; i++) | ||
52 | *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0'; | ||
53 | *(p++) = ' '; | ||
54 | *(p++) = '-'; | ||
55 | *(p++) = ' '; | ||
56 | for (i = 0; i < SLICE_NUM_HIGH; i++) | ||
57 | *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; | ||
58 | *(p++) = 0; | ||
59 | |||
60 | printk(KERN_DEBUG "%s:%s\n", label, buf); | ||
61 | } | ||
62 | |||
63 | #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0) | ||
64 | |||
65 | #else | ||
66 | |||
67 | static void slice_print_mask(const char *label, struct slice_mask mask) {} | ||
68 | #define slice_dbg(fmt...) | ||
69 | |||
70 | #endif | ||
71 | |||
72 | static struct slice_mask slice_range_to_mask(unsigned long start, | ||
73 | unsigned long len) | ||
74 | { | ||
75 | unsigned long end = start + len - 1; | ||
76 | struct slice_mask ret = { 0, 0 }; | ||
77 | |||
78 | if (start < SLICE_LOW_TOP) { | ||
79 | unsigned long mend = min(end, SLICE_LOW_TOP); | ||
80 | unsigned long mstart = min(start, SLICE_LOW_TOP); | ||
81 | |||
82 | ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) | ||
83 | - (1u << GET_LOW_SLICE_INDEX(mstart)); | ||
84 | } | ||
85 | |||
86 | if ((start + len) > SLICE_LOW_TOP) | ||
87 | ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) | ||
88 | - (1u << GET_HIGH_SLICE_INDEX(start)); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, | ||
94 | unsigned long len) | ||
95 | { | ||
96 | struct vm_area_struct *vma; | ||
97 | |||
98 | if ((mm->task_size - len) < addr) | ||
99 | return 0; | ||
100 | vma = find_vma(mm, addr); | ||
101 | return (!vma || (addr + len) <= vma->vm_start); | ||
102 | } | ||
103 | |||
104 | static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) | ||
105 | { | ||
106 | return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, | ||
107 | 1ul << SLICE_LOW_SHIFT); | ||
108 | } | ||
109 | |||
110 | static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) | ||
111 | { | ||
112 | unsigned long start = slice << SLICE_HIGH_SHIFT; | ||
113 | unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); | ||
114 | |||
115 | /* Hack, so that each addresses is controlled by exactly one | ||
116 | * of the high or low area bitmaps, the first high area starts | ||
117 | * at 4GB, not 0 */ | ||
118 | if (start == 0) | ||
119 | start = SLICE_LOW_TOP; | ||
120 | |||
121 | return !slice_area_is_free(mm, start, end - start); | ||
122 | } | ||
123 | |||
124 | static struct slice_mask slice_mask_for_free(struct mm_struct *mm) | ||
125 | { | ||
126 | struct slice_mask ret = { 0, 0 }; | ||
127 | unsigned long i; | ||
128 | |||
129 | for (i = 0; i < SLICE_NUM_LOW; i++) | ||
130 | if (!slice_low_has_vma(mm, i)) | ||
131 | ret.low_slices |= 1u << i; | ||
132 | |||
133 | if (mm->task_size <= SLICE_LOW_TOP) | ||
134 | return ret; | ||
135 | |||
136 | for (i = 0; i < SLICE_NUM_HIGH; i++) | ||
137 | if (!slice_high_has_vma(mm, i)) | ||
138 | ret.high_slices |= 1u << i; | ||
139 | |||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) | ||
144 | { | ||
145 | struct slice_mask ret = { 0, 0 }; | ||
146 | unsigned long i; | ||
147 | u64 psizes; | ||
148 | |||
149 | psizes = mm->context.low_slices_psize; | ||
150 | for (i = 0; i < SLICE_NUM_LOW; i++) | ||
151 | if (((psizes >> (i * 4)) & 0xf) == psize) | ||
152 | ret.low_slices |= 1u << i; | ||
153 | |||
154 | psizes = mm->context.high_slices_psize; | ||
155 | for (i = 0; i < SLICE_NUM_HIGH; i++) | ||
156 | if (((psizes >> (i * 4)) & 0xf) == psize) | ||
157 | ret.high_slices |= 1u << i; | ||
158 | |||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | static int slice_check_fit(struct slice_mask mask, struct slice_mask available) | ||
163 | { | ||
164 | return (mask.low_slices & available.low_slices) == mask.low_slices && | ||
165 | (mask.high_slices & available.high_slices) == mask.high_slices; | ||
166 | } | ||
167 | |||
168 | static void slice_flush_segments(void *parm) | ||
169 | { | ||
170 | struct mm_struct *mm = parm; | ||
171 | unsigned long flags; | ||
172 | |||
173 | if (mm != current->active_mm) | ||
174 | return; | ||
175 | |||
176 | /* update the paca copy of the context struct */ | ||
177 | get_paca()->context = current->active_mm->context; | ||
178 | |||
179 | local_irq_save(flags); | ||
180 | slb_flush_and_rebolt(); | ||
181 | local_irq_restore(flags); | ||
182 | } | ||
183 | |||
184 | static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) | ||
185 | { | ||
186 | /* Write the new slice psize bits */ | ||
187 | u64 lpsizes, hpsizes; | ||
188 | unsigned long i, flags; | ||
189 | |||
190 | slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); | ||
191 | slice_print_mask(" mask", mask); | ||
192 | |||
193 | /* We need to use a spinlock here to protect against | ||
194 | * concurrent 64k -> 4k demotion ... | ||
195 | */ | ||
196 | spin_lock_irqsave(&slice_convert_lock, flags); | ||
197 | |||
198 | lpsizes = mm->context.low_slices_psize; | ||
199 | for (i = 0; i < SLICE_NUM_LOW; i++) | ||
200 | if (mask.low_slices & (1u << i)) | ||
201 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | ||
202 | (((unsigned long)psize) << (i * 4)); | ||
203 | |||
204 | hpsizes = mm->context.high_slices_psize; | ||
205 | for (i = 0; i < SLICE_NUM_HIGH; i++) | ||
206 | if (mask.high_slices & (1u << i)) | ||
207 | hpsizes = (hpsizes & ~(0xful << (i * 4))) | | ||
208 | (((unsigned long)psize) << (i * 4)); | ||
209 | |||
210 | mm->context.low_slices_psize = lpsizes; | ||
211 | mm->context.high_slices_psize = hpsizes; | ||
212 | |||
213 | slice_dbg(" lsps=%lx, hsps=%lx\n", | ||
214 | mm->context.low_slices_psize, | ||
215 | mm->context.high_slices_psize); | ||
216 | |||
217 | spin_unlock_irqrestore(&slice_convert_lock, flags); | ||
218 | mb(); | ||
219 | |||
220 | /* XXX this is sub-optimal but will do for now */ | ||
221 | on_each_cpu(slice_flush_segments, mm, 0, 1); | ||
222 | #ifdef CONFIG_SPU_BASE | ||
223 | spu_flush_all_slbs(mm); | ||
224 | #endif | ||
225 | } | ||
226 | |||
227 | static unsigned long slice_find_area_bottomup(struct mm_struct *mm, | ||
228 | unsigned long len, | ||
229 | struct slice_mask available, | ||
230 | int psize, int use_cache) | ||
231 | { | ||
232 | struct vm_area_struct *vma; | ||
233 | unsigned long start_addr, addr; | ||
234 | struct slice_mask mask; | ||
235 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | ||
236 | |||
237 | if (use_cache) { | ||
238 | if (len <= mm->cached_hole_size) { | ||
239 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
240 | mm->cached_hole_size = 0; | ||
241 | } else | ||
242 | start_addr = addr = mm->free_area_cache; | ||
243 | } else | ||
244 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
245 | |||
246 | full_search: | ||
247 | for (;;) { | ||
248 | addr = _ALIGN_UP(addr, 1ul << pshift); | ||
249 | if ((TASK_SIZE - len) < addr) | ||
250 | break; | ||
251 | vma = find_vma(mm, addr); | ||
252 | BUG_ON(vma && (addr >= vma->vm_end)); | ||
253 | |||
254 | mask = slice_range_to_mask(addr, len); | ||
255 | if (!slice_check_fit(mask, available)) { | ||
256 | if (addr < SLICE_LOW_TOP) | ||
257 | addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT); | ||
258 | else | ||
259 | addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); | ||
260 | continue; | ||
261 | } | ||
262 | if (!vma || addr + len <= vma->vm_start) { | ||
263 | /* | ||
264 | * Remember the place where we stopped the search: | ||
265 | */ | ||
266 | if (use_cache) | ||
267 | mm->free_area_cache = addr + len; | ||
268 | return addr; | ||
269 | } | ||
270 | if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) | ||
271 | mm->cached_hole_size = vma->vm_start - addr; | ||
272 | addr = vma->vm_end; | ||
273 | } | ||
274 | |||
275 | /* Make sure we didn't miss any holes */ | ||
276 | if (use_cache && start_addr != TASK_UNMAPPED_BASE) { | ||
277 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
278 | mm->cached_hole_size = 0; | ||
279 | goto full_search; | ||
280 | } | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | static unsigned long slice_find_area_topdown(struct mm_struct *mm, | ||
285 | unsigned long len, | ||
286 | struct slice_mask available, | ||
287 | int psize, int use_cache) | ||
288 | { | ||
289 | struct vm_area_struct *vma; | ||
290 | unsigned long addr; | ||
291 | struct slice_mask mask; | ||
292 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | ||
293 | |||
294 | /* check if free_area_cache is useful for us */ | ||
295 | if (use_cache) { | ||
296 | if (len <= mm->cached_hole_size) { | ||
297 | mm->cached_hole_size = 0; | ||
298 | mm->free_area_cache = mm->mmap_base; | ||
299 | } | ||
300 | |||
301 | /* either no address requested or can't fit in requested | ||
302 | * address hole | ||
303 | */ | ||
304 | addr = mm->free_area_cache; | ||
305 | |||
306 | /* make sure it can fit in the remaining address space */ | ||
307 | if (addr > len) { | ||
308 | addr = _ALIGN_DOWN(addr - len, 1ul << pshift); | ||
309 | mask = slice_range_to_mask(addr, len); | ||
310 | if (slice_check_fit(mask, available) && | ||
311 | slice_area_is_free(mm, addr, len)) | ||
312 | /* remember the address as a hint for | ||
313 | * next time | ||
314 | */ | ||
315 | return (mm->free_area_cache = addr); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | addr = mm->mmap_base; | ||
320 | while (addr > len) { | ||
321 | /* Go down by chunk size */ | ||
322 | addr = _ALIGN_DOWN(addr - len, 1ul << pshift); | ||
323 | |||
324 | /* Check for hit with different page size */ | ||
325 | mask = slice_range_to_mask(addr, len); | ||
326 | if (!slice_check_fit(mask, available)) { | ||
327 | if (addr < SLICE_LOW_TOP) | ||
328 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT); | ||
329 | else if (addr < (1ul << SLICE_HIGH_SHIFT)) | ||
330 | addr = SLICE_LOW_TOP; | ||
331 | else | ||
332 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT); | ||
333 | continue; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Lookup failure means no vma is above this address, | ||
338 | * else if new region fits below vma->vm_start, | ||
339 | * return with success: | ||
340 | */ | ||
341 | vma = find_vma(mm, addr); | ||
342 | if (!vma || (addr + len) <= vma->vm_start) { | ||
343 | /* remember the address as a hint for next time */ | ||
344 | if (use_cache) | ||
345 | mm->free_area_cache = addr; | ||
346 | return addr; | ||
347 | } | ||
348 | |||
349 | /* remember the largest hole we saw so far */ | ||
350 | if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) | ||
351 | mm->cached_hole_size = vma->vm_start - addr; | ||
352 | |||
353 | /* try just below the current vma->vm_start */ | ||
354 | addr = vma->vm_start; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * A failed mmap() very likely causes application failure, | ||
359 | * so fall back to the bottom-up function here. This scenario | ||
360 | * can happen with large stack limits and large mmap() | ||
361 | * allocations. | ||
362 | */ | ||
363 | addr = slice_find_area_bottomup(mm, len, available, psize, 0); | ||
364 | |||
365 | /* | ||
366 | * Restore the topdown base: | ||
367 | */ | ||
368 | if (use_cache) { | ||
369 | mm->free_area_cache = mm->mmap_base; | ||
370 | mm->cached_hole_size = ~0UL; | ||
371 | } | ||
372 | |||
373 | return addr; | ||
374 | } | ||
375 | |||
376 | |||
377 | static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, | ||
378 | struct slice_mask mask, int psize, | ||
379 | int topdown, int use_cache) | ||
380 | { | ||
381 | if (topdown) | ||
382 | return slice_find_area_topdown(mm, len, mask, psize, use_cache); | ||
383 | else | ||
384 | return slice_find_area_bottomup(mm, len, mask, psize, use_cache); | ||
385 | } | ||
386 | |||
387 | unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, | ||
388 | unsigned long flags, unsigned int psize, | ||
389 | int topdown, int use_cache) | ||
390 | { | ||
391 | struct slice_mask mask; | ||
392 | struct slice_mask good_mask; | ||
393 | struct slice_mask potential_mask = {0,0} /* silence stupid warning */; | ||
394 | int pmask_set = 0; | ||
395 | int fixed = (flags & MAP_FIXED); | ||
396 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | ||
397 | struct mm_struct *mm = current->mm; | ||
398 | |||
399 | /* Sanity checks */ | ||
400 | BUG_ON(mm->task_size == 0); | ||
401 | |||
402 | slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); | ||
403 | slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n", | ||
404 | addr, len, flags, topdown, use_cache); | ||
405 | |||
406 | if (len > mm->task_size) | ||
407 | return -ENOMEM; | ||
408 | if (fixed && (addr & ((1ul << pshift) - 1))) | ||
409 | return -EINVAL; | ||
410 | if (fixed && addr > (mm->task_size - len)) | ||
411 | return -EINVAL; | ||
412 | |||
413 | /* If hint, make sure it matches our alignment restrictions */ | ||
414 | if (!fixed && addr) { | ||
415 | addr = _ALIGN_UP(addr, 1ul << pshift); | ||
416 | slice_dbg(" aligned addr=%lx\n", addr); | ||
417 | } | ||
418 | |||
419 | /* First makeup a "good" mask of slices that have the right size | ||
420 | * already | ||
421 | */ | ||
422 | good_mask = slice_mask_for_size(mm, psize); | ||
423 | slice_print_mask(" good_mask", good_mask); | ||
424 | |||
425 | /* First check hint if it's valid or if we have MAP_FIXED */ | ||
426 | if ((addr != 0 || fixed) && (mm->task_size - len) >= addr) { | ||
427 | |||
428 | /* Don't bother with hint if it overlaps a VMA */ | ||
429 | if (!fixed && !slice_area_is_free(mm, addr, len)) | ||
430 | goto search; | ||
431 | |||
432 | /* Build a mask for the requested range */ | ||
433 | mask = slice_range_to_mask(addr, len); | ||
434 | slice_print_mask(" mask", mask); | ||
435 | |||
436 | /* Check if we fit in the good mask. If we do, we just return, | ||
437 | * nothing else to do | ||
438 | */ | ||
439 | if (slice_check_fit(mask, good_mask)) { | ||
440 | slice_dbg(" fits good !\n"); | ||
441 | return addr; | ||
442 | } | ||
443 | |||
444 | /* We don't fit in the good mask, check what other slices are | ||
445 | * empty and thus can be converted | ||
446 | */ | ||
447 | potential_mask = slice_mask_for_free(mm); | ||
448 | potential_mask.low_slices |= good_mask.low_slices; | ||
449 | potential_mask.high_slices |= good_mask.high_slices; | ||
450 | pmask_set = 1; | ||
451 | slice_print_mask(" potential", potential_mask); | ||
452 | if (slice_check_fit(mask, potential_mask)) { | ||
453 | slice_dbg(" fits potential !\n"); | ||
454 | goto convert; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | /* If we have MAP_FIXED and failed the above step, then error out */ | ||
459 | if (fixed) | ||
460 | return -EBUSY; | ||
461 | |||
462 | search: | ||
463 | slice_dbg(" search...\n"); | ||
464 | |||
465 | /* Now let's see if we can find something in the existing slices | ||
466 | * for that size | ||
467 | */ | ||
468 | addr = slice_find_area(mm, len, good_mask, psize, topdown, use_cache); | ||
469 | if (addr != -ENOMEM) { | ||
470 | /* Found within the good mask, we don't have to setup, | ||
471 | * we thus return directly | ||
472 | */ | ||
473 | slice_dbg(" found area at 0x%lx\n", addr); | ||
474 | return addr; | ||
475 | } | ||
476 | |||
477 | /* Won't fit, check what can be converted */ | ||
478 | if (!pmask_set) { | ||
479 | potential_mask = slice_mask_for_free(mm); | ||
480 | potential_mask.low_slices |= good_mask.low_slices; | ||
481 | potential_mask.high_slices |= good_mask.high_slices; | ||
482 | pmask_set = 1; | ||
483 | slice_print_mask(" potential", potential_mask); | ||
484 | } | ||
485 | |||
486 | /* Now let's see if we can find something in the existing slices | ||
487 | * for that size | ||
488 | */ | ||
489 | addr = slice_find_area(mm, len, potential_mask, psize, topdown, | ||
490 | use_cache); | ||
491 | if (addr == -ENOMEM) | ||
492 | return -ENOMEM; | ||
493 | |||
494 | mask = slice_range_to_mask(addr, len); | ||
495 | slice_dbg(" found potential area at 0x%lx\n", addr); | ||
496 | slice_print_mask(" mask", mask); | ||
497 | |||
498 | convert: | ||
499 | slice_convert(mm, mask, psize); | ||
500 | return addr; | ||
501 | |||
502 | } | ||
503 | EXPORT_SYMBOL_GPL(slice_get_unmapped_area); | ||
504 | |||
505 | unsigned long arch_get_unmapped_area(struct file *filp, | ||
506 | unsigned long addr, | ||
507 | unsigned long len, | ||
508 | unsigned long pgoff, | ||
509 | unsigned long flags) | ||
510 | { | ||
511 | return slice_get_unmapped_area(addr, len, flags, | ||
512 | current->mm->context.user_psize, | ||
513 | 0, 1); | ||
514 | } | ||
515 | |||
516 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | ||
517 | const unsigned long addr0, | ||
518 | const unsigned long len, | ||
519 | const unsigned long pgoff, | ||
520 | const unsigned long flags) | ||
521 | { | ||
522 | return slice_get_unmapped_area(addr0, len, flags, | ||
523 | current->mm->context.user_psize, | ||
524 | 1, 1); | ||
525 | } | ||
526 | |||
527 | unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) | ||
528 | { | ||
529 | u64 psizes; | ||
530 | int index; | ||
531 | |||
532 | if (addr < SLICE_LOW_TOP) { | ||
533 | psizes = mm->context.low_slices_psize; | ||
534 | index = GET_LOW_SLICE_INDEX(addr); | ||
535 | } else { | ||
536 | psizes = mm->context.high_slices_psize; | ||
537 | index = GET_HIGH_SLICE_INDEX(addr); | ||
538 | } | ||
539 | |||
540 | return (psizes >> (index * 4)) & 0xf; | ||
541 | } | ||
542 | EXPORT_SYMBOL_GPL(get_slice_psize); | ||
543 | |||
544 | /* | ||
545 | * This is called by hash_page when it needs to do a lazy conversion of | ||
546 | * an address space from real 64K pages to combo 4K pages (typically | ||
547 | * when hitting a non cacheable mapping on a processor or hypervisor | ||
548 | * that won't allow them for 64K pages). | ||
549 | * | ||
550 | * This is also called in init_new_context() to change back the user | ||
551 | * psize from whatever the parent context had it set to | ||
552 | * | ||
553 | * This function will only change the content of the {low,high)_slice_psize | ||
554 | * masks, it will not flush SLBs as this shall be handled lazily by the | ||
555 | * caller. | ||
556 | */ | ||
557 | void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) | ||
558 | { | ||
559 | unsigned long flags, lpsizes, hpsizes; | ||
560 | unsigned int old_psize; | ||
561 | int i; | ||
562 | |||
563 | slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); | ||
564 | |||
565 | spin_lock_irqsave(&slice_convert_lock, flags); | ||
566 | |||
567 | old_psize = mm->context.user_psize; | ||
568 | slice_dbg(" old_psize=%d\n", old_psize); | ||
569 | if (old_psize == psize) | ||
570 | goto bail; | ||
571 | |||
572 | mm->context.user_psize = psize; | ||
573 | wmb(); | ||
574 | |||
575 | lpsizes = mm->context.low_slices_psize; | ||
576 | for (i = 0; i < SLICE_NUM_LOW; i++) | ||
577 | if (((lpsizes >> (i * 4)) & 0xf) == old_psize) | ||
578 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | ||
579 | (((unsigned long)psize) << (i * 4)); | ||
580 | |||
581 | hpsizes = mm->context.high_slices_psize; | ||
582 | for (i = 0; i < SLICE_NUM_HIGH; i++) | ||
583 | if (((hpsizes >> (i * 4)) & 0xf) == old_psize) | ||
584 | hpsizes = (hpsizes & ~(0xful << (i * 4))) | | ||
585 | (((unsigned long)psize) << (i * 4)); | ||
586 | |||
587 | mm->context.low_slices_psize = lpsizes; | ||
588 | mm->context.high_slices_psize = hpsizes; | ||
589 | |||
590 | slice_dbg(" lsps=%lx, hsps=%lx\n", | ||
591 | mm->context.low_slices_psize, | ||
592 | mm->context.high_slices_psize); | ||
593 | |||
594 | bail: | ||
595 | spin_unlock_irqrestore(&slice_convert_lock, flags); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * is_hugepage_only_range() is used by generic code to verify wether | ||
600 | * a normal mmap mapping (non hugetlbfs) is valid on a given area. | ||
601 | * | ||
602 | * until the generic code provides a more generic hook and/or starts | ||
603 | * calling arch get_unmapped_area for MAP_FIXED (which our implementation | ||
604 | * here knows how to deal with), we hijack it to keep standard mappings | ||
605 | * away from us. | ||
606 | * | ||
607 | * because of that generic code limitation, MAP_FIXED mapping cannot | ||
608 | * "convert" back a slice with no VMAs to the standard page size, only | ||
609 | * get_unmapped_area() can. It would be possible to fix it here but I | ||
610 | * prefer working on fixing the generic code instead. | ||
611 | * | ||
612 | * WARNING: This will not work if hugetlbfs isn't enabled since the | ||
613 | * generic code will redefine that function as 0 in that. This is ok | ||
614 | * for now as we only use slices with hugetlbfs enabled. This should | ||
615 | * be fixed as the generic code gets fixed. | ||
616 | */ | ||
617 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, | ||
618 | unsigned long len) | ||
619 | { | ||
620 | struct slice_mask mask, available; | ||
621 | |||
622 | mask = slice_range_to_mask(addr, len); | ||
623 | available = slice_mask_for_size(mm, mm->context.user_psize); | ||
624 | |||
625 | #if 0 /* too verbose */ | ||
626 | slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", | ||
627 | mm, addr, len); | ||
628 | slice_print_mask(" mask", mask); | ||
629 | slice_print_mask(" available", available); | ||
630 | #endif | ||
631 | return !slice_check_fit(mask, available); | ||
632 | } | ||
633 | |||