aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:42:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:40 -0400
commit0edaecfab218d747d30de4575e911907371e2cd2 (patch)
tree352ef3e6d2dd13b90efddfd275e4472caeb4a697 /mm/hugetlb.c
parent189ebff2894a9d0f4e250dd1e154d282ef0a6779 (diff)
hugetlb: add a list for tracking in-use HugeTLB pages
hugepage_activelist will be used to track currently used HugeTLB pages. We need to find the in-use HugeTLB pages to support HugeTLB cgroup removal. On cgroup removal we update the page's HugeTLB cgroup to point to parent cgroup. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e54b695336f9..b5b6e156ca76 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -510,7 +510,7 @@ void copy_huge_page(struct page *dst, struct page *src)
510static void enqueue_huge_page(struct hstate *h, struct page *page) 510static void enqueue_huge_page(struct hstate *h, struct page *page)
511{ 511{
512 int nid = page_to_nid(page); 512 int nid = page_to_nid(page);
513 list_add(&page->lru, &h->hugepage_freelists[nid]); 513 list_move(&page->lru, &h->hugepage_freelists[nid]);
514 h->free_huge_pages++; 514 h->free_huge_pages++;
515 h->free_huge_pages_node[nid]++; 515 h->free_huge_pages_node[nid]++;
516} 516}
@@ -522,7 +522,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
522 if (list_empty(&h->hugepage_freelists[nid])) 522 if (list_empty(&h->hugepage_freelists[nid]))
523 return NULL; 523 return NULL;
524 page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); 524 page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
525 list_del(&page->lru); 525 list_move(&page->lru, &h->hugepage_activelist);
526 set_page_refcounted(page); 526 set_page_refcounted(page);
527 h->free_huge_pages--; 527 h->free_huge_pages--;
528 h->free_huge_pages_node[nid]--; 528 h->free_huge_pages_node[nid]--;
@@ -626,10 +626,11 @@ static void free_huge_page(struct page *page)
626 page->mapping = NULL; 626 page->mapping = NULL;
627 BUG_ON(page_count(page)); 627 BUG_ON(page_count(page));
628 BUG_ON(page_mapcount(page)); 628 BUG_ON(page_mapcount(page));
629 INIT_LIST_HEAD(&page->lru);
630 629
631 spin_lock(&hugetlb_lock); 630 spin_lock(&hugetlb_lock);
632 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 631 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
632 /* remove the page from active list */
633 list_del(&page->lru);
633 update_and_free_page(h, page); 634 update_and_free_page(h, page);
634 h->surplus_huge_pages--; 635 h->surplus_huge_pages--;
635 h->surplus_huge_pages_node[nid]--; 636 h->surplus_huge_pages_node[nid]--;
@@ -642,6 +643,7 @@ static void free_huge_page(struct page *page)
642 643
643static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 644static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
644{ 645{
646 INIT_LIST_HEAD(&page->lru);
645 set_compound_page_dtor(page, free_huge_page); 647 set_compound_page_dtor(page, free_huge_page);
646 spin_lock(&hugetlb_lock); 648 spin_lock(&hugetlb_lock);
647 h->nr_huge_pages++; 649 h->nr_huge_pages++;
@@ -890,6 +892,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
890 892
891 spin_lock(&hugetlb_lock); 893 spin_lock(&hugetlb_lock);
892 if (page) { 894 if (page) {
895 INIT_LIST_HEAD(&page->lru);
893 r_nid = page_to_nid(page); 896 r_nid = page_to_nid(page);
894 set_compound_page_dtor(page, free_huge_page); 897 set_compound_page_dtor(page, free_huge_page);
895 /* 898 /*
@@ -994,7 +997,6 @@ retry:
994 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 997 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
995 if ((--needed) < 0) 998 if ((--needed) < 0)
996 break; 999 break;
997 list_del(&page->lru);
998 /* 1000 /*
999 * This page is now managed by the hugetlb allocator and has 1001 * This page is now managed by the hugetlb allocator and has
1000 * no users -- drop the buddy allocator's reference. 1002 * no users -- drop the buddy allocator's reference.
@@ -1009,7 +1011,6 @@ free:
1009 /* Free unnecessary surplus pages to the buddy allocator */ 1011 /* Free unnecessary surplus pages to the buddy allocator */
1010 if (!list_empty(&surplus_list)) { 1012 if (!list_empty(&surplus_list)) {
1011 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 1013 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1012 list_del(&page->lru);
1013 put_page(page); 1014 put_page(page);
1014 } 1015 }
1015 } 1016 }
@@ -1909,6 +1910,7 @@ void __init hugetlb_add_hstate(unsigned order)
1909 h->free_huge_pages = 0; 1910 h->free_huge_pages = 0;
1910 for (i = 0; i < MAX_NUMNODES; ++i) 1911 for (i = 0; i < MAX_NUMNODES; ++i)
1911 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 1912 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1913 INIT_LIST_HEAD(&h->hugepage_activelist);
1912 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); 1914 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1913 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); 1915 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1914 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1916 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",