aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c46
1 files changed, 27 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e2c80631d36a..f43b3dca12b5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -353,35 +353,43 @@ void return_unused_surplus_pages(unsigned long unused_resv_pages)
353 } 353 }
354} 354}
355 355
356static struct page *alloc_huge_page(struct vm_area_struct *vma, 356
357 unsigned long addr) 357static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
358 unsigned long addr)
358{ 359{
359 struct page *page = NULL; 360 struct page *page;
360 int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
361 361
362 spin_lock(&hugetlb_lock); 362 spin_lock(&hugetlb_lock);
363 if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
364 goto fail;
365
366 page = dequeue_huge_page(vma, addr); 363 page = dequeue_huge_page(vma, addr);
367 if (!page)
368 goto fail;
369
370 spin_unlock(&hugetlb_lock); 364 spin_unlock(&hugetlb_lock);
371 set_page_refcounted(page);
372 return page; 365 return page;
366}
373 367
374fail: 368static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
375 spin_unlock(&hugetlb_lock); 369 unsigned long addr)
370{
371 struct page *page = NULL;
376 372
377 /* 373 spin_lock(&hugetlb_lock);
378 * Private mappings do not use reserved huge pages so the allocation 374 if (free_huge_pages > resv_huge_pages)
379 * may have failed due to an undersized hugetlb pool. Try to grab a 375 page = dequeue_huge_page(vma, addr);
380 * surplus huge page from the buddy allocator. 376 spin_unlock(&hugetlb_lock);
381 */ 377 if (!page)
382 if (!use_reserved_page)
383 page = alloc_buddy_huge_page(vma, addr); 378 page = alloc_buddy_huge_page(vma, addr);
379 return page;
380}
384 381
382static struct page *alloc_huge_page(struct vm_area_struct *vma,
383 unsigned long addr)
384{
385 struct page *page;
386
387 if (vma->vm_flags & VM_MAYSHARE)
388 page = alloc_huge_page_shared(vma, addr);
389 else
390 page = alloc_huge_page_private(vma, addr);
391 if (page)
392 set_page_refcounted(page);
385 return page; 393 return page;
386} 394}
387 395