aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-12-11 19:02:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:26 -0500
commitdd5295965b77e5da6705953ef8a7fc528ea328a1 (patch)
tree3c03f9811af136639572a44443c582ae5fd2ec25 /arch/tile
parenta046be3d3ce33c747340b0716fda31f8f63a48b9 (diff)
mm: use vm_unmapped_area() in hugetlbfs on tile architecture
Update the tile hugetlb_get_unmapped_area function to make use of vm_unmapped_area() instead of implementing a brute force search. [akpm@linux-foundation.org: fix build] Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/mm/hugetlbpage.c139
1 files changed, 25 insertions, 114 deletions
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 812e2d037972..650ccff8378c 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
231 unsigned long pgoff, unsigned long flags) 231 unsigned long pgoff, unsigned long flags)
232{ 232{
233 struct hstate *h = hstate_file(file); 233 struct hstate *h = hstate_file(file);
234 struct mm_struct *mm = current->mm; 234 struct vm_unmapped_area_info info;
235 struct vm_area_struct *vma; 235
236 unsigned long start_addr; 236 info.flags = 0;
237 237 info.length = len;
238 if (len > mm->cached_hole_size) { 238 info.low_limit = TASK_UNMAPPED_BASE;
239 start_addr = mm->free_area_cache; 239 info.high_limit = TASK_SIZE;
240 } else { 240 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
241 start_addr = TASK_UNMAPPED_BASE; 241 info.align_offset = 0;
242 mm->cached_hole_size = 0; 242 return vm_unmapped_area(&info);
243 }
244
245full_search:
246 addr = ALIGN(start_addr, huge_page_size(h));
247
248 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
249 /* At this point: (!vma || addr < vma->vm_end). */
250 if (TASK_SIZE - len < addr) {
251 /*
252 * Start a new search - just in case we missed
253 * some holes.
254 */
255 if (start_addr != TASK_UNMAPPED_BASE) {
256 start_addr = TASK_UNMAPPED_BASE;
257 mm->cached_hole_size = 0;
258 goto full_search;
259 }
260 return -ENOMEM;
261 }
262 if (!vma || addr + len <= vma->vm_start) {
263 mm->free_area_cache = addr + len;
264 return addr;
265 }
266 if (addr + mm->cached_hole_size < vma->vm_start)
267 mm->cached_hole_size = vma->vm_start - addr;
268 addr = ALIGN(vma->vm_end, huge_page_size(h));
269 }
270} 243}
271 244
272static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 245static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
274 unsigned long pgoff, unsigned long flags) 247 unsigned long pgoff, unsigned long flags)
275{ 248{
276 struct hstate *h = hstate_file(file); 249 struct hstate *h = hstate_file(file);
277 struct mm_struct *mm = current->mm; 250 struct vm_unmapped_area_info info;
278 struct vm_area_struct *vma, *prev_vma; 251 unsigned long addr;
279 unsigned long base = mm->mmap_base, addr = addr0;
280 unsigned long largest_hole = mm->cached_hole_size;
281 int first_time = 1;
282
283 /* don't allow allocations above current base */
284 if (mm->free_area_cache > base)
285 mm->free_area_cache = base;
286
287 if (len <= largest_hole) {
288 largest_hole = 0;
289 mm->free_area_cache = base;
290 }
291try_again:
292 /* make sure it can fit in the remaining address space */
293 if (mm->free_area_cache < len)
294 goto fail;
295
296 /* either no address requested or can't fit in requested address hole */
297 addr = (mm->free_area_cache - len) & huge_page_mask(h);
298 do {
299 /*
300 * Lookup failure means no vma is above this address,
301 * i.e. return with success:
302 */
303 vma = find_vma_prev(mm, addr, &prev_vma);
304 if (!vma) {
305 return addr;
306 break;
307 }
308
309 /*
310 * new region fits between prev_vma->vm_end and
311 * vma->vm_start, use it:
312 */
313 if (addr + len <= vma->vm_start &&
314 (!prev_vma || (addr >= prev_vma->vm_end))) {
315 /* remember the address as a hint for next time */
316 mm->cached_hole_size = largest_hole;
317 mm->free_area_cache = addr;
318 return addr;
319 } else {
320 /* pull free_area_cache down to the first hole */
321 if (mm->free_area_cache == vma->vm_end) {
322 mm->free_area_cache = vma->vm_start;
323 mm->cached_hole_size = largest_hole;
324 }
325 }
326 252
327 /* remember the largest hole we saw so far */ 253 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
328 if (addr + largest_hole < vma->vm_start) 254 info.length = len;
329 largest_hole = vma->vm_start - addr; 255 info.low_limit = PAGE_SIZE;
256 info.high_limit = current->mm->mmap_base;
257 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
258 info.align_offset = 0;
259 addr = vm_unmapped_area(&info);
330 260
331 /* try just below the current vma->vm_start */
332 addr = (vma->vm_start - len) & huge_page_mask(h);
333
334 } while (len <= vma->vm_start);
335
336fail:
337 /*
338 * if hint left us with no space for the requested
339 * mapping then try again:
340 */
341 if (first_time) {
342 mm->free_area_cache = base;
343 largest_hole = 0;
344 first_time = 0;
345 goto try_again;
346 }
347 /* 261 /*
348 * A failed mmap() very likely causes application failure, 262 * A failed mmap() very likely causes application failure,
349 * so fall back to the bottom-up function here. This scenario 263 * so fall back to the bottom-up function here. This scenario
350 * can happen with large stack limits and large mmap() 264 * can happen with large stack limits and large mmap()
351 * allocations. 265 * allocations.
352 */ 266 */
353 mm->free_area_cache = TASK_UNMAPPED_BASE; 267 if (addr & ~PAGE_MASK) {
354 mm->cached_hole_size = ~0UL; 268 VM_BUG_ON(addr != -ENOMEM);
355 addr = hugetlb_get_unmapped_area_bottomup(file, addr0, 269 info.flags = 0;
356 len, pgoff, flags); 270 info.low_limit = TASK_UNMAPPED_BASE;
357 271 info.high_limit = TASK_SIZE;
358 /* 272 addr = vm_unmapped_area(&info);
359 * Restore the topdown base: 273 }
360 */
361 mm->free_area_cache = base;
362 mm->cached_hole_size = ~0UL;
363 274
364 return addr; 275 return addr;
365} 276}