aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 20e04c64468d..dcacc811e70e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
71 free_huge_pages_node[nid]++; 71 free_huge_pages_node[nid]++;
72} 72}
73 73
74static struct page *dequeue_huge_page(struct vm_area_struct *vma, 74static struct page *dequeue_huge_page(void)
75{
76 int nid;
77 struct page *page = NULL;
78
79 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
80 if (!list_empty(&hugepage_freelists[nid])) {
81 page = list_entry(hugepage_freelists[nid].next,
82 struct page, lru);
83 list_del(&page->lru);
84 free_huge_pages--;
85 free_huge_pages_node[nid]--;
86 break;
87 }
88 }
89 return page;
90}
91
92static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
75 unsigned long address) 93 unsigned long address)
76{ 94{
77 int nid; 95 int nid;
@@ -410,7 +428,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
410 struct page *page; 428 struct page *page;
411 429
412 spin_lock(&hugetlb_lock); 430 spin_lock(&hugetlb_lock);
413 page = dequeue_huge_page(vma, addr); 431 page = dequeue_huge_page_vma(vma, addr);
414 spin_unlock(&hugetlb_lock); 432 spin_unlock(&hugetlb_lock);
415 return page ? page : ERR_PTR(-VM_FAULT_OOM); 433 return page ? page : ERR_PTR(-VM_FAULT_OOM);
416} 434}
@@ -425,7 +443,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
425 443
426 spin_lock(&hugetlb_lock); 444 spin_lock(&hugetlb_lock);
427 if (free_huge_pages > resv_huge_pages) 445 if (free_huge_pages > resv_huge_pages)
428 page = dequeue_huge_page(vma, addr); 446 page = dequeue_huge_page_vma(vma, addr);
429 spin_unlock(&hugetlb_lock); 447 spin_unlock(&hugetlb_lock);
430 if (!page) { 448 if (!page) {
431 page = alloc_buddy_huge_page(vma, addr); 449 page = alloc_buddy_huge_page(vma, addr);
@@ -578,7 +596,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
578 min_count = max(count, min_count); 596 min_count = max(count, min_count);
579 try_to_free_low(min_count); 597 try_to_free_low(min_count);
580 while (min_count < persistent_huge_pages) { 598 while (min_count < persistent_huge_pages) {
581 struct page *page = dequeue_huge_page(NULL, 0); 599 struct page *page = dequeue_huge_page();
582 if (!page) 600 if (!page)
583 break; 601 break;
584 update_and_free_page(page); 602 update_and_free_page(page);