aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb_cgroup.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:42:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:41 -0400
commit6d76dcf40405144a448040a350fd214ddc243d5e (patch)
tree025d7201f63bcba6e8b19b5e21ffb4371ebb69cf /mm/hugetlb_cgroup.c
parent9dd540e23111d8884773ab942a736f3aba4040d4 (diff)
hugetlb/cgroup: add charge/uncharge routines for hugetlb cgroup
Add the charge and uncharge routines for hugetlb cgroup. We do cgroup charging in page alloc and uncharge in compound page destructor. Assigning page's hugetlb cgroup is protected by hugetlb_lock. [liwp@linux.vnet.ibm.com: add huge_page_order check to avoid incorrect uncharge] Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Wanpeng Li <liwp.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb_cgroup.c')
-rw-r--r--mm/hugetlb_cgroup.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 0d1a66e9039b..63e04cfa437d 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -111,6 +111,86 @@ static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
111 return -EBUSY; 111 return -EBUSY;
112} 112}
113 113
114int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
115 struct hugetlb_cgroup **ptr)
116{
117 int ret = 0;
118 struct res_counter *fail_res;
119 struct hugetlb_cgroup *h_cg = NULL;
120 unsigned long csize = nr_pages * PAGE_SIZE;
121
122 if (hugetlb_cgroup_disabled())
123 goto done;
124 /*
125 * We don't charge any cgroup if the compound page have less
126 * than 3 pages.
127 */
128 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
129 goto done;
130again:
131 rcu_read_lock();
132 h_cg = hugetlb_cgroup_from_task(current);
133 if (!css_tryget(&h_cg->css)) {
134 rcu_read_unlock();
135 goto again;
136 }
137 rcu_read_unlock();
138
139 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
140 css_put(&h_cg->css);
141done:
142 *ptr = h_cg;
143 return ret;
144}
145
146void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
147 struct hugetlb_cgroup *h_cg,
148 struct page *page)
149{
150 if (hugetlb_cgroup_disabled() || !h_cg)
151 return;
152
153 spin_lock(&hugetlb_lock);
154 set_hugetlb_cgroup(page, h_cg);
155 spin_unlock(&hugetlb_lock);
156 return;
157}
158
159/*
160 * Should be called with hugetlb_lock held
161 */
162void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
163 struct page *page)
164{
165 struct hugetlb_cgroup *h_cg;
166 unsigned long csize = nr_pages * PAGE_SIZE;
167
168 if (hugetlb_cgroup_disabled())
169 return;
170 VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
171 h_cg = hugetlb_cgroup_from_page(page);
172 if (unlikely(!h_cg))
173 return;
174 set_hugetlb_cgroup(page, NULL);
175 res_counter_uncharge(&h_cg->hugepage[idx], csize);
176 return;
177}
178
179void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
180 struct hugetlb_cgroup *h_cg)
181{
182 unsigned long csize = nr_pages * PAGE_SIZE;
183
184 if (hugetlb_cgroup_disabled() || !h_cg)
185 return;
186
187 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
188 return;
189
190 res_counter_uncharge(&h_cg->hugepage[idx], csize);
191 return;
192}
193
114struct cgroup_subsys hugetlb_subsys = { 194struct cgroup_subsys hugetlb_subsys = {
115 .name = "hugetlb", 195 .name = "hugetlb",
116 .create = hugetlb_cgroup_create, 196 .create = hugetlb_cgroup_create,