aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:42:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:41 -0400
commit9dd540e23111d8884773ab942a736f3aba4040d4 (patch)
tree84c7dcfa97c43d1f09ad88cdc836ff1b99da76e4
parent2bc64a2046975410505bb119bba32705892b9255 (diff)
hugetlb/cgroup: add the cgroup pointer to page lru
Add the hugetlb cgroup pointer to 3rd page lru.next. This limit the usage to hugetlb cgroup to only hugepages with 3 or more normal pages. I guess that is an acceptable limitation. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/hugetlb_cgroup.h37
-rw-r--r--mm/hugetlb.c4
2 files changed, 41 insertions, 0 deletions
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index f19889e56b47..e5451a3b4ebc 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -18,8 +18,34 @@
18#include <linux/res_counter.h> 18#include <linux/res_counter.h>
19 19
20struct hugetlb_cgroup; 20struct hugetlb_cgroup;
21/*
22 * Minimum page order trackable by hugetlb cgroup.
23 * At least 3 pages are necessary for all the tracking information.
24 */
25#define HUGETLB_CGROUP_MIN_ORDER 2
21 26
22#ifdef CONFIG_CGROUP_HUGETLB 27#ifdef CONFIG_CGROUP_HUGETLB
28
29static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
30{
31 VM_BUG_ON(!PageHuge(page));
32
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
34 return NULL;
35 return (struct hugetlb_cgroup *)page[2].lru.next;
36}
37
38static inline
39int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
40{
41 VM_BUG_ON(!PageHuge(page));
42
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
44 return -1;
45 page[2].lru.next = (void *)h_cg;
46 return 0;
47}
48
23static inline bool hugetlb_cgroup_disabled(void) 49static inline bool hugetlb_cgroup_disabled(void)
24{ 50{
25 if (hugetlb_subsys.disabled) 51 if (hugetlb_subsys.disabled)
@@ -28,6 +54,17 @@ static inline bool hugetlb_cgroup_disabled(void)
28} 54}
29 55
30#else 56#else
57static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
58{
59 return NULL;
60}
61
62static inline
63int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
64{
65 return 0;
66}
67
31static inline bool hugetlb_cgroup_disabled(void) 68static inline bool hugetlb_cgroup_disabled(void)
32{ 69{
33 return true; 70 return true;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d5971597736b..efe29b53daff 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/hugetlb.h> 30#include <linux/hugetlb.h>
31#include <linux/hugetlb_cgroup.h>
31#include <linux/node.h> 32#include <linux/node.h>
32#include "internal.h" 33#include "internal.h"
33 34
@@ -591,6 +592,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
591 1 << PG_active | 1 << PG_reserved | 592 1 << PG_active | 1 << PG_reserved |
592 1 << PG_private | 1 << PG_writeback); 593 1 << PG_private | 1 << PG_writeback);
593 } 594 }
595 VM_BUG_ON(hugetlb_cgroup_from_page(page));
594 set_compound_page_dtor(page, NULL); 596 set_compound_page_dtor(page, NULL);
595 set_page_refcounted(page); 597 set_page_refcounted(page);
596 arch_release_hugepage(page); 598 arch_release_hugepage(page);
@@ -643,6 +645,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
643 INIT_LIST_HEAD(&page->lru); 645 INIT_LIST_HEAD(&page->lru);
644 set_compound_page_dtor(page, free_huge_page); 646 set_compound_page_dtor(page, free_huge_page);
645 spin_lock(&hugetlb_lock); 647 spin_lock(&hugetlb_lock);
648 set_hugetlb_cgroup(page, NULL);
646 h->nr_huge_pages++; 649 h->nr_huge_pages++;
647 h->nr_huge_pages_node[nid]++; 650 h->nr_huge_pages_node[nid]++;
648 spin_unlock(&hugetlb_lock); 651 spin_unlock(&hugetlb_lock);
@@ -892,6 +895,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
892 INIT_LIST_HEAD(&page->lru); 895 INIT_LIST_HEAD(&page->lru);
893 r_nid = page_to_nid(page); 896 r_nid = page_to_nid(page);
894 set_compound_page_dtor(page, free_huge_page); 897 set_compound_page_dtor(page, free_huge_page);
898 set_hugetlb_cgroup(page, NULL);
895 /* 899 /*
896 * We incremented the global counters already 900 * We incremented the global counters already
897 */ 901 */