diff options
author | Michel Lespinasse <walken@google.com> | 2012-12-11 19:02:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 20:22:25 -0500 |
commit | cdc17344959e30ba8d0b98448832514024282e14 (patch) | |
tree | bbf87178fefae76c606a704b7efe32fafbc9de1a /arch/x86/mm/hugetlbpage.c | |
parent | 0865935598bb112a02f40017e8aaa6bce8577f23 (diff) |
mm: use vm_unmapped_area() in hugetlbfs on i386 architecture
Update the i386 hugetlb_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm/hugetlbpage.c')
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 130 |
1 files changed, 25 insertions, 105 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 937bff5cdaa7..ae1aa71d0115 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -274,42 +274,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | |||
274 | unsigned long pgoff, unsigned long flags) | 274 | unsigned long pgoff, unsigned long flags) |
275 | { | 275 | { |
276 | struct hstate *h = hstate_file(file); | 276 | struct hstate *h = hstate_file(file); |
277 | struct mm_struct *mm = current->mm; | 277 | struct vm_unmapped_area_info info; |
278 | struct vm_area_struct *vma; | 278 | |
279 | unsigned long start_addr; | 279 | info.flags = 0; |
280 | 280 | info.length = len; | |
281 | if (len > mm->cached_hole_size) { | 281 | info.low_limit = TASK_UNMAPPED_BASE; |
282 | start_addr = mm->free_area_cache; | 282 | info.high_limit = TASK_SIZE; |
283 | } else { | 283 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
284 | start_addr = TASK_UNMAPPED_BASE; | 284 | info.align_offset = 0; |
285 | mm->cached_hole_size = 0; | 285 | return vm_unmapped_area(&info); |
286 | } | ||
287 | |||
288 | full_search: | ||
289 | addr = ALIGN(start_addr, huge_page_size(h)); | ||
290 | |||
291 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
292 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
293 | if (TASK_SIZE - len < addr) { | ||
294 | /* | ||
295 | * Start a new search - just in case we missed | ||
296 | * some holes. | ||
297 | */ | ||
298 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
299 | start_addr = TASK_UNMAPPED_BASE; | ||
300 | mm->cached_hole_size = 0; | ||
301 | goto full_search; | ||
302 | } | ||
303 | return -ENOMEM; | ||
304 | } | ||
305 | if (!vma || addr + len <= vma->vm_start) { | ||
306 | mm->free_area_cache = addr + len; | ||
307 | return addr; | ||
308 | } | ||
309 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
310 | mm->cached_hole_size = vma->vm_start - addr; | ||
311 | addr = ALIGN(vma->vm_end, huge_page_size(h)); | ||
312 | } | ||
313 | } | 286 | } |
314 | 287 | ||
315 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | 288 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, |
@@ -317,83 +290,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | |||
317 | unsigned long pgoff, unsigned long flags) | 290 | unsigned long pgoff, unsigned long flags) |
318 | { | 291 | { |
319 | struct hstate *h = hstate_file(file); | 292 | struct hstate *h = hstate_file(file); |
320 | struct mm_struct *mm = current->mm; | 293 | struct vm_unmapped_area_info info; |
321 | struct vm_area_struct *vma; | 294 | unsigned long addr; |
322 | unsigned long base = mm->mmap_base; | ||
323 | unsigned long addr = addr0; | ||
324 | unsigned long largest_hole = mm->cached_hole_size; | ||
325 | unsigned long start_addr; | ||
326 | |||
327 | /* don't allow allocations above current base */ | ||
328 | if (mm->free_area_cache > base) | ||
329 | mm->free_area_cache = base; | ||
330 | |||
331 | if (len <= largest_hole) { | ||
332 | largest_hole = 0; | ||
333 | mm->free_area_cache = base; | ||
334 | } | ||
335 | try_again: | ||
336 | start_addr = mm->free_area_cache; | ||
337 | |||
338 | /* make sure it can fit in the remaining address space */ | ||
339 | if (mm->free_area_cache < len) | ||
340 | goto fail; | ||
341 | |||
342 | /* either no address requested or can't fit in requested address hole */ | ||
343 | addr = (mm->free_area_cache - len) & huge_page_mask(h); | ||
344 | do { | ||
345 | /* | ||
346 | * Lookup failure means no vma is above this address, | ||
347 | * i.e. return with success: | ||
348 | */ | ||
349 | vma = find_vma(mm, addr); | ||
350 | if (!vma) | ||
351 | return addr; | ||
352 | 295 | ||
353 | if (addr + len <= vma->vm_start) { | 296 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
354 | /* remember the address as a hint for next time */ | 297 | info.length = len; |
355 | mm->cached_hole_size = largest_hole; | 298 | info.low_limit = PAGE_SIZE; |
356 | return (mm->free_area_cache = addr); | 299 | info.high_limit = current->mm->mmap_base; |
357 | } else if (mm->free_area_cache == vma->vm_end) { | 300 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
358 | /* pull free_area_cache down to the first hole */ | 301 | info.align_offset = 0; |
359 | mm->free_area_cache = vma->vm_start; | 302 | addr = vm_unmapped_area(&info); |
360 | mm->cached_hole_size = largest_hole; | ||
361 | } | ||
362 | 303 | ||
363 | /* remember the largest hole we saw so far */ | ||
364 | if (addr + largest_hole < vma->vm_start) | ||
365 | largest_hole = vma->vm_start - addr; | ||
366 | |||
367 | /* try just below the current vma->vm_start */ | ||
368 | addr = (vma->vm_start - len) & huge_page_mask(h); | ||
369 | } while (len <= vma->vm_start); | ||
370 | |||
371 | fail: | ||
372 | /* | ||
373 | * if hint left us with no space for the requested | ||
374 | * mapping then try again: | ||
375 | */ | ||
376 | if (start_addr != base) { | ||
377 | mm->free_area_cache = base; | ||
378 | largest_hole = 0; | ||
379 | goto try_again; | ||
380 | } | ||
381 | /* | 304 | /* |
382 | * A failed mmap() very likely causes application failure, | 305 | * A failed mmap() very likely causes application failure, |
383 | * so fall back to the bottom-up function here. This scenario | 306 | * so fall back to the bottom-up function here. This scenario |
384 | * can happen with large stack limits and large mmap() | 307 | * can happen with large stack limits and large mmap() |
385 | * allocations. | 308 | * allocations. |
386 | */ | 309 | */ |
387 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 310 | if (addr & ~PAGE_MASK) { |
388 | mm->cached_hole_size = ~0UL; | 311 | VM_BUG_ON(addr != -ENOMEM); |
389 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | 312 | info.flags = 0; |
390 | len, pgoff, flags); | 313 | info.low_limit = TASK_UNMAPPED_BASE; |
391 | 314 | info.high_limit = TASK_SIZE; | |
392 | /* | 315 | addr = vm_unmapped_area(&info); |
393 | * Restore the topdown base: | 316 | } |
394 | */ | ||
395 | mm->free_area_cache = base; | ||
396 | mm->cached_hole_size = ~0UL; | ||
397 | 317 | ||
398 | return addr; | 318 | return addr; |
399 | } | 319 | } |