aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorNishanth Aravamudan <nacc@us.ibm.com>2008-03-04 17:29:42 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:18 -0500
commit348e1e04b5229a481891699ce86da009b793f29e (patch)
tree481caab1f0178e64ace723fc9bd7e36627525e39 /mm/hugetlb.c
parentac09b3a15154af5f081fed509c6c3662e79de785 (diff)
hugetlb: fix pool shrinking while in restricted cpuset
Adam Litke noticed that currently we grow the hugepage pool independent of any cpuset the running process may be in, but when shrinking the pool, the cpuset is checked. This leads to inconsistency when shrinking the pool in a restricted cpuset -- an administrator may have been able to grow the pool on a node restricted by a containing cpuset, but they cannot shrink it there. There are two options: either prevent growing of the pool outside of the cpuset or allow shrinking outside of the cpuset. >From previous discussions on linux-mm, /proc/sys/vm/nr_hugepages is an administrative interface that should not be restricted by cpusets. So allow shrinking the pool by removing pages from nodes outside of current's cpuset. Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com> Acked-by: Adam Litke <agl@us.ibm.com> Cc: William Irwin <wli@holomorphy.com> Cc: Lee Schermerhorn <Lee.Schermerhonr@hp.com> Cc: Christoph Lameter <clameter@sgi.com> Cc: Paul Jackson <pj@sgi.com> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 20e04c64468d..dcacc811e70e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
71 free_huge_pages_node[nid]++; 71 free_huge_pages_node[nid]++;
72} 72}
73 73
74static struct page *dequeue_huge_page(struct vm_area_struct *vma, 74static struct page *dequeue_huge_page(void)
75{
76 int nid;
77 struct page *page = NULL;
78
79 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
80 if (!list_empty(&hugepage_freelists[nid])) {
81 page = list_entry(hugepage_freelists[nid].next,
82 struct page, lru);
83 list_del(&page->lru);
84 free_huge_pages--;
85 free_huge_pages_node[nid]--;
86 break;
87 }
88 }
89 return page;
90}
91
92static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
75 unsigned long address) 93 unsigned long address)
76{ 94{
77 int nid; 95 int nid;
@@ -410,7 +428,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
410 struct page *page; 428 struct page *page;
411 429
412 spin_lock(&hugetlb_lock); 430 spin_lock(&hugetlb_lock);
413 page = dequeue_huge_page(vma, addr); 431 page = dequeue_huge_page_vma(vma, addr);
414 spin_unlock(&hugetlb_lock); 432 spin_unlock(&hugetlb_lock);
415 return page ? page : ERR_PTR(-VM_FAULT_OOM); 433 return page ? page : ERR_PTR(-VM_FAULT_OOM);
416} 434}
@@ -425,7 +443,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
425 443
426 spin_lock(&hugetlb_lock); 444 spin_lock(&hugetlb_lock);
427 if (free_huge_pages > resv_huge_pages) 445 if (free_huge_pages > resv_huge_pages)
428 page = dequeue_huge_page(vma, addr); 446 page = dequeue_huge_page_vma(vma, addr);
429 spin_unlock(&hugetlb_lock); 447 spin_unlock(&hugetlb_lock);
430 if (!page) { 448 if (!page) {
431 page = alloc_buddy_huge_page(vma, addr); 449 page = alloc_buddy_huge_page(vma, addr);
@@ -578,7 +596,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
578 min_count = max(count, min_count); 596 min_count = max(count, min_count);
579 try_to_free_low(min_count); 597 try_to_free_low(min_count);
580 while (min_count < persistent_huge_pages) { 598 while (min_count < persistent_huge_pages) {
581 struct page *page = dequeue_huge_page(NULL, 0); 599 struct page *page = dequeue_huge_page();
582 if (!page) 600 if (!page)
583 break; 601 break;
584 update_and_free_page(page); 602 update_and_free_page(page);