aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slice.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slice.c')
-rw-r--r--arch/powerpc/mm/slice.c223
1 files changed, 93 insertions, 130 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index cf9dada734b6..3e99c149271a 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -237,134 +237,112 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
237#endif 237#endif
238} 238}
239 239
240/*
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
246 */
247static bool slice_scan_available(unsigned long addr,
248 struct slice_mask available,
249 int end,
250 unsigned long *boundary_addr)
251{
252 unsigned long slice;
253 if (addr < SLICE_LOW_TOP) {
254 slice = GET_LOW_SLICE_INDEX(addr);
255 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
256 return !!(available.low_slices & (1u << slice));
257 } else {
258 slice = GET_HIGH_SLICE_INDEX(addr);
259 *boundary_addr = (slice + end) ?
260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261 return !!(available.high_slices & (1u << slice));
262 }
263}
264
240static unsigned long slice_find_area_bottomup(struct mm_struct *mm, 265static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
241 unsigned long len, 266 unsigned long len,
242 struct slice_mask available, 267 struct slice_mask available,
243 int psize, int use_cache) 268 int psize)
244{ 269{
245 struct vm_area_struct *vma;
246 unsigned long start_addr, addr;
247 struct slice_mask mask;
248 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 270 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
249 271 unsigned long addr, found, next_end;
250 if (use_cache) { 272 struct vm_unmapped_area_info info;
251 if (len <= mm->cached_hole_size) { 273
252 start_addr = addr = TASK_UNMAPPED_BASE; 274 info.flags = 0;
253 mm->cached_hole_size = 0; 275 info.length = len;
254 } else 276 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
255 start_addr = addr = mm->free_area_cache; 277 info.align_offset = 0;
256 } else 278
257 start_addr = addr = TASK_UNMAPPED_BASE; 279 addr = TASK_UNMAPPED_BASE;
258 280 while (addr < TASK_SIZE) {
259full_search: 281 info.low_limit = addr;
260 for (;;) { 282 if (!slice_scan_available(addr, available, 1, &addr))
261 addr = _ALIGN_UP(addr, 1ul << pshift);
262 if ((TASK_SIZE - len) < addr)
263 break;
264 vma = find_vma(mm, addr);
265 BUG_ON(vma && (addr >= vma->vm_end));
266
267 mask = slice_range_to_mask(addr, len);
268 if (!slice_check_fit(mask, available)) {
269 if (addr < SLICE_LOW_TOP)
270 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
271 else
272 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
273 continue; 283 continue;
284
285 next_slice:
286 /*
287 * At this point [info.low_limit; addr) covers
288 * available slices only and ends at a slice boundary.
289 * Check if we need to reduce the range, or if we can
290 * extend it to cover the next available slice.
291 */
292 if (addr >= TASK_SIZE)
293 addr = TASK_SIZE;
294 else if (slice_scan_available(addr, available, 1, &next_end)) {
295 addr = next_end;
296 goto next_slice;
274 } 297 }
275 if (!vma || addr + len <= vma->vm_start) { 298 info.high_limit = addr;
276 /*
277 * Remember the place where we stopped the search:
278 */
279 if (use_cache)
280 mm->free_area_cache = addr + len;
281 return addr;
282 }
283 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
284 mm->cached_hole_size = vma->vm_start - addr;
285 addr = vma->vm_end;
286 }
287 299
288 /* Make sure we didn't miss any holes */ 300 found = vm_unmapped_area(&info);
289 if (use_cache && start_addr != TASK_UNMAPPED_BASE) { 301 if (!(found & ~PAGE_MASK))
290 start_addr = addr = TASK_UNMAPPED_BASE; 302 return found;
291 mm->cached_hole_size = 0;
292 goto full_search;
293 } 303 }
304
294 return -ENOMEM; 305 return -ENOMEM;
295} 306}
296 307
297static unsigned long slice_find_area_topdown(struct mm_struct *mm, 308static unsigned long slice_find_area_topdown(struct mm_struct *mm,
298 unsigned long len, 309 unsigned long len,
299 struct slice_mask available, 310 struct slice_mask available,
300 int psize, int use_cache) 311 int psize)
301{ 312{
302 struct vm_area_struct *vma;
303 unsigned long addr;
304 struct slice_mask mask;
305 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 313 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
314 unsigned long addr, found, prev;
315 struct vm_unmapped_area_info info;
306 316
307 /* check if free_area_cache is useful for us */ 317 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
308 if (use_cache) { 318 info.length = len;
309 if (len <= mm->cached_hole_size) { 319 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
310 mm->cached_hole_size = 0; 320 info.align_offset = 0;
311 mm->free_area_cache = mm->mmap_base;
312 }
313
314 /* either no address requested or can't fit in requested
315 * address hole
316 */
317 addr = mm->free_area_cache;
318
319 /* make sure it can fit in the remaining address space */
320 if (addr > len) {
321 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
322 mask = slice_range_to_mask(addr, len);
323 if (slice_check_fit(mask, available) &&
324 slice_area_is_free(mm, addr, len))
325 /* remember the address as a hint for
326 * next time
327 */
328 return (mm->free_area_cache = addr);
329 }
330 }
331 321
332 addr = mm->mmap_base; 322 addr = mm->mmap_base;
333 while (addr > len) { 323 while (addr > PAGE_SIZE) {
334 /* Go down by chunk size */ 324 info.high_limit = addr;
335 addr = _ALIGN_DOWN(addr - len, 1ul << pshift); 325 if (!slice_scan_available(addr - 1, available, 0, &addr))
336
337 /* Check for hit with different page size */
338 mask = slice_range_to_mask(addr, len);
339 if (!slice_check_fit(mask, available)) {
340 if (addr < SLICE_LOW_TOP)
341 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
342 else if (addr < (1ul << SLICE_HIGH_SHIFT))
343 addr = SLICE_LOW_TOP;
344 else
345 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
346 continue; 326 continue;
347 }
348 327
328 prev_slice:
349 /* 329 /*
350 * Lookup failure means no vma is above this address, 330 * At this point [addr; info.high_limit) covers
351 * else if new region fits below vma->vm_start, 331 * available slices only and starts at a slice boundary.
352 * return with success: 332 * Check if we need to reduce the range, or if we can
333 * extend it to cover the previous available slice.
353 */ 334 */
354 vma = find_vma(mm, addr); 335 if (addr < PAGE_SIZE)
355 if (!vma || (addr + len) <= vma->vm_start) { 336 addr = PAGE_SIZE;
356 /* remember the address as a hint for next time */ 337 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
357 if (use_cache) 338 addr = prev;
358 mm->free_area_cache = addr; 339 goto prev_slice;
359 return addr;
360 } 340 }
341 info.low_limit = addr;
361 342
362 /* remember the largest hole we saw so far */ 343 found = vm_unmapped_area(&info);
363 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) 344 if (!(found & ~PAGE_MASK))
364 mm->cached_hole_size = vma->vm_start - addr; 345 return found;
365
366 /* try just below the current vma->vm_start */
367 addr = vma->vm_start;
368 } 346 }
369 347
370 /* 348 /*
@@ -373,28 +351,18 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
373 * can happen with large stack limits and large mmap() 351 * can happen with large stack limits and large mmap()
374 * allocations. 352 * allocations.
375 */ 353 */
376 addr = slice_find_area_bottomup(mm, len, available, psize, 0); 354 return slice_find_area_bottomup(mm, len, available, psize);
377
378 /*
379 * Restore the topdown base:
380 */
381 if (use_cache) {
382 mm->free_area_cache = mm->mmap_base;
383 mm->cached_hole_size = ~0UL;
384 }
385
386 return addr;
387} 355}
388 356
389 357
390static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, 358static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
391 struct slice_mask mask, int psize, 359 struct slice_mask mask, int psize,
392 int topdown, int use_cache) 360 int topdown)
393{ 361{
394 if (topdown) 362 if (topdown)
395 return slice_find_area_topdown(mm, len, mask, psize, use_cache); 363 return slice_find_area_topdown(mm, len, mask, psize);
396 else 364 else
397 return slice_find_area_bottomup(mm, len, mask, psize, use_cache); 365 return slice_find_area_bottomup(mm, len, mask, psize);
398} 366}
399 367
400#define or_mask(dst, src) do { \ 368#define or_mask(dst, src) do { \
@@ -415,7 +383,7 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
415 383
416unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, 384unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
417 unsigned long flags, unsigned int psize, 385 unsigned long flags, unsigned int psize,
418 int topdown, int use_cache) 386 int topdown)
419{ 387{
420 struct slice_mask mask = {0, 0}; 388 struct slice_mask mask = {0, 0};
421 struct slice_mask good_mask; 389 struct slice_mask good_mask;
@@ -430,8 +398,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
430 BUG_ON(mm->task_size == 0); 398 BUG_ON(mm->task_size == 0);
431 399
432 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 400 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
433 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n", 401 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
434 addr, len, flags, topdown, use_cache); 402 addr, len, flags, topdown);
435 403
436 if (len > mm->task_size) 404 if (len > mm->task_size)
437 return -ENOMEM; 405 return -ENOMEM;
@@ -503,8 +471,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
503 /* Now let's see if we can find something in the existing 471 /* Now let's see if we can find something in the existing
504 * slices for that size 472 * slices for that size
505 */ 473 */
506 newaddr = slice_find_area(mm, len, good_mask, psize, topdown, 474 newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
507 use_cache);
508 if (newaddr != -ENOMEM) { 475 if (newaddr != -ENOMEM) {
509 /* Found within the good mask, we don't have to setup, 476 /* Found within the good mask, we don't have to setup,
510 * we thus return directly 477 * we thus return directly
@@ -536,8 +503,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
536 * anywhere in the good area. 503 * anywhere in the good area.
537 */ 504 */
538 if (addr) { 505 if (addr) {
539 addr = slice_find_area(mm, len, good_mask, psize, topdown, 506 addr = slice_find_area(mm, len, good_mask, psize, topdown);
540 use_cache);
541 if (addr != -ENOMEM) { 507 if (addr != -ENOMEM) {
542 slice_dbg(" found area at 0x%lx\n", addr); 508 slice_dbg(" found area at 0x%lx\n", addr);
543 return addr; 509 return addr;
@@ -547,15 +513,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
547 /* Now let's see if we can find something in the existing slices 513 /* Now let's see if we can find something in the existing slices
548 * for that size plus free slices 514 * for that size plus free slices
549 */ 515 */
550 addr = slice_find_area(mm, len, potential_mask, psize, topdown, 516 addr = slice_find_area(mm, len, potential_mask, psize, topdown);
551 use_cache);
552 517
553#ifdef CONFIG_PPC_64K_PAGES 518#ifdef CONFIG_PPC_64K_PAGES
554 if (addr == -ENOMEM && psize == MMU_PAGE_64K) { 519 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
555 /* retry the search with 4k-page slices included */ 520 /* retry the search with 4k-page slices included */
556 or_mask(potential_mask, compat_mask); 521 or_mask(potential_mask, compat_mask);
557 addr = slice_find_area(mm, len, potential_mask, psize, 522 addr = slice_find_area(mm, len, potential_mask, psize,
558 topdown, use_cache); 523 topdown);
559 } 524 }
560#endif 525#endif
561 526
@@ -586,8 +551,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
586 unsigned long flags) 551 unsigned long flags)
587{ 552{
588 return slice_get_unmapped_area(addr, len, flags, 553 return slice_get_unmapped_area(addr, len, flags,
589 current->mm->context.user_psize, 554 current->mm->context.user_psize, 0);
590 0, 1);
591} 555}
592 556
593unsigned long arch_get_unmapped_area_topdown(struct file *filp, 557unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -597,8 +561,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
597 const unsigned long flags) 561 const unsigned long flags)
598{ 562{
599 return slice_get_unmapped_area(addr0, len, flags, 563 return slice_get_unmapped_area(addr0, len, flags,
600 current->mm->context.user_psize, 564 current->mm->context.user_psize, 1);
601 1, 1);
602} 565}
603 566
604unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 567unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)