aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>2010-05-26 17:42:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:43 -0400
commit90254a65833b67502d14736410b3857a15535c67 (patch)
treee03ec501b4a585c0d112469f36100ec41a715345 /mm/memcontrol.c
parent3c11ecf448eff8f12922c498b8274ce98587eb74 (diff)
memcg: clean up move charge
This patch cleans up move charge code by: - define functions to handle pte for each types, and make is_target_pte_for_mc() cleaner. - instead of checking the MOVE_CHARGE_TYPE_ANON bit, define a function that checks the bit. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c96
1 files changed, 59 insertions, 37 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53eb30ebdb4..e5277e8a42a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -266,6 +266,12 @@ static struct move_charge_struct {
266 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 266 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
267}; 267};
268 268
269static bool move_anon(void)
270{
271 return test_bit(MOVE_CHARGE_TYPE_ANON,
272 &mc.to->move_charge_at_immigrate);
273}
274
269/* 275/*
270 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 276 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
271 * limit reclaim to prevent infinite loops, if they ever occur. 277 * limit reclaim to prevent infinite loops, if they ever occur.
@@ -4162,50 +4168,66 @@ enum mc_target_type {
4162 MC_TARGET_SWAP, 4168 MC_TARGET_SWAP,
4163}; 4169};
4164 4170
4165static int is_target_pte_for_mc(struct vm_area_struct *vma, 4171static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4166 unsigned long addr, pte_t ptent, union mc_target *target) 4172 unsigned long addr, pte_t ptent)
4167{ 4173{
4168 struct page *page = NULL; 4174 struct page *page = vm_normal_page(vma, addr, ptent);
4169 struct page_cgroup *pc;
4170 int ret = 0;
4171 swp_entry_t ent = { .val = 0 };
4172 int usage_count = 0;
4173 bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
4174 &mc.to->move_charge_at_immigrate);
4175 4175
4176 if (!pte_present(ptent)) { 4176 if (!page || !page_mapped(page))
4177 /* TODO: handle swap of shmes/tmpfs */ 4177 return NULL;
4178 if (pte_none(ptent) || pte_file(ptent)) 4178 if (PageAnon(page)) {
4179 return 0; 4179 /* we don't move shared anon */
4180 else if (is_swap_pte(ptent)) { 4180 if (!move_anon() || page_mapcount(page) > 2)
4181 ent = pte_to_swp_entry(ptent); 4181 return NULL;
4182 if (!move_anon || non_swap_entry(ent)) 4182 } else
4183 return 0;
4184 usage_count = mem_cgroup_count_swap_user(ent, &page);
4185 }
4186 } else {
4187 page = vm_normal_page(vma, addr, ptent);
4188 if (!page || !page_mapped(page))
4189 return 0;
4190 /* 4183 /*
4191 * TODO: We don't move charges of file(including shmem/tmpfs) 4184 * TODO: We don't move charges of file(including shmem/tmpfs)
4192 * pages for now. 4185 * pages for now.
4193 */ 4186 */
4194 if (!move_anon || !PageAnon(page)) 4187 return NULL;
4195 return 0; 4188 if (!get_page_unless_zero(page))
4196 if (!get_page_unless_zero(page)) 4189 return NULL;
4197 return 0; 4190
4198 usage_count = page_mapcount(page); 4191 return page;
4199 } 4192}
4200 if (usage_count > 1) { 4193
4201 /* 4194static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4202 * TODO: We don't move charges of shared(used by multiple 4195 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4203 * processes) pages for now. 4196{
4204 */ 4197 int usage_count;
4198 struct page *page = NULL;
4199 swp_entry_t ent = pte_to_swp_entry(ptent);
4200
4201 if (!move_anon() || non_swap_entry(ent))
4202 return NULL;
4203 usage_count = mem_cgroup_count_swap_user(ent, &page);
4204 if (usage_count > 1) { /* we don't move shared anon */
4205 if (page) 4205 if (page)
4206 put_page(page); 4206 put_page(page);
4207 return 0; 4207 return NULL;
4208 } 4208 }
4209 if (do_swap_account)
4210 entry->val = ent.val;
4211
4212 return page;
4213}
4214
4215static int is_target_pte_for_mc(struct vm_area_struct *vma,
4216 unsigned long addr, pte_t ptent, union mc_target *target)
4217{
4218 struct page *page = NULL;
4219 struct page_cgroup *pc;
4220 int ret = 0;
4221 swp_entry_t ent = { .val = 0 };
4222
4223 if (pte_present(ptent))
4224 page = mc_handle_present_pte(vma, addr, ptent);
4225 else if (is_swap_pte(ptent))
4226 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4227 /* TODO: handle swap of shmes/tmpfs */
4228
4229 if (!page && !ent.val)
4230 return 0;
4209 if (page) { 4231 if (page) {
4210 pc = lookup_page_cgroup(page); 4232 pc = lookup_page_cgroup(page);
4211 /* 4233 /*
@@ -4221,8 +4243,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
4221 if (!ret || !target) 4243 if (!ret || !target)
4222 put_page(page); 4244 put_page(page);
4223 } 4245 }
4224 /* throught */ 4246 /* There is a swap entry and a page doesn't exist or isn't charged */
4225 if (ent.val && do_swap_account && !ret && 4247 if (ent.val && !ret &&
4226 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 4248 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4227 ret = MC_TARGET_SWAP; 4249 ret = MC_TARGET_SWAP;
4228 if (target) 4250 if (target)