aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>2010-03-10 18:22:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-12 18:52:36 -0500
commit483c30b514bd3037fa3f19fa42327c94c10f51c8 (patch)
treeaaf96db52bf4bdb0c83f209bc9c6a1237867718f /mm
parent024914477e15ef8b17f271ec47f1bb8a589f0806 (diff)
memcg: improve performance in moving swap charge
Try to reduce overheads in moving swap charge by: - Adds a new function(__mem_cgroup_put), which takes "count" as a arg and decrement mem->refcnt by "count". - Removed res_counter_uncharge, css_put, and mem_cgroup_put from the path of moving swap account, and consolidate all of them into mem_cgroup_clear_mc. We cannot do that about mc.to->refcnt. These changes reduces the overhead from 1.35sec to 0.9sec to move charges of 1G anonymous memory(including 500MB swap) in my test environment. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <menage@google.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c75
1 files changed, 60 insertions, 15 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e883198baf81..b00ec74a4c18 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -255,6 +255,7 @@ static struct move_charge_struct {
255 struct mem_cgroup *to; 255 struct mem_cgroup *to;
256 unsigned long precharge; 256 unsigned long precharge;
257 unsigned long moved_charge; 257 unsigned long moved_charge;
258 unsigned long moved_swap;
258 struct task_struct *moving_task; /* a task moving charges */ 259 struct task_struct *moving_task; /* a task moving charges */
259 wait_queue_head_t waitq; /* a waitq for other context */ 260 wait_queue_head_t waitq; /* a waitq for other context */
260} mc = { 261} mc = {
@@ -2277,6 +2278,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
2277 * @entry: swap entry to be moved 2278 * @entry: swap entry to be moved
2278 * @from: mem_cgroup which the entry is moved from 2279 * @from: mem_cgroup which the entry is moved from
2279 * @to: mem_cgroup which the entry is moved to 2280 * @to: mem_cgroup which the entry is moved to
2281 * @need_fixup: whether we should fixup res_counters and refcounts.
2280 * 2282 *
2281 * It succeeds only when the swap_cgroup's record for this entry is the same 2283 * It succeeds only when the swap_cgroup's record for this entry is the same
2282 * as the mem_cgroup's id of @from. 2284 * as the mem_cgroup's id of @from.
@@ -2287,7 +2289,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
2287 * both res and memsw, and called css_get(). 2289 * both res and memsw, and called css_get().
2288 */ 2290 */
2289static int mem_cgroup_move_swap_account(swp_entry_t entry, 2291static int mem_cgroup_move_swap_account(swp_entry_t entry,
2290 struct mem_cgroup *from, struct mem_cgroup *to) 2292 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2291{ 2293{
2292 unsigned short old_id, new_id; 2294 unsigned short old_id, new_id;
2293 2295
@@ -2295,27 +2297,36 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
2295 new_id = css_id(&to->css); 2297 new_id = css_id(&to->css);
2296 2298
2297 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2299 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2298 if (!mem_cgroup_is_root(from))
2299 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2300 mem_cgroup_swap_statistics(from, false); 2300 mem_cgroup_swap_statistics(from, false);
2301 mem_cgroup_put(from); 2301 mem_cgroup_swap_statistics(to, true);
2302 /* 2302 /*
2303 * we charged both to->res and to->memsw, so we should uncharge 2303 * This function is only called from task migration context now.
2304 * to->res. 2304 * It postpones res_counter and refcount handling till the end
2305 * of task migration(mem_cgroup_clear_mc()) for performance
2306 * improvement. But we cannot postpone mem_cgroup_get(to)
2307 * because if the process that has been moved to @to does
2308 * swap-in, the refcount of @to might be decreased to 0.
2305 */ 2309 */
2306 if (!mem_cgroup_is_root(to))
2307 res_counter_uncharge(&to->res, PAGE_SIZE);
2308 mem_cgroup_swap_statistics(to, true);
2309 mem_cgroup_get(to); 2310 mem_cgroup_get(to);
2310 css_put(&to->css); 2311 if (need_fixup) {
2311 2312 if (!mem_cgroup_is_root(from))
2313 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2314 mem_cgroup_put(from);
2315 /*
2316 * we charged both to->res and to->memsw, so we should
2317 * uncharge to->res.
2318 */
2319 if (!mem_cgroup_is_root(to))
2320 res_counter_uncharge(&to->res, PAGE_SIZE);
2321 css_put(&to->css);
2322 }
2312 return 0; 2323 return 0;
2313 } 2324 }
2314 return -EINVAL; 2325 return -EINVAL;
2315} 2326}
2316#else 2327#else
2317static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2328static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2318 struct mem_cgroup *from, struct mem_cgroup *to) 2329 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2319{ 2330{
2320 return -EINVAL; 2331 return -EINVAL;
2321} 2332}
@@ -3398,9 +3409,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
3398 atomic_inc(&mem->refcnt); 3409 atomic_inc(&mem->refcnt);
3399} 3410}
3400 3411
3401static void mem_cgroup_put(struct mem_cgroup *mem) 3412static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
3402{ 3413{
3403 if (atomic_dec_and_test(&mem->refcnt)) { 3414 if (atomic_sub_and_test(count, &mem->refcnt)) {
3404 struct mem_cgroup *parent = parent_mem_cgroup(mem); 3415 struct mem_cgroup *parent = parent_mem_cgroup(mem);
3405 __mem_cgroup_free(mem); 3416 __mem_cgroup_free(mem);
3406 if (parent) 3417 if (parent)
@@ -3408,6 +3419,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
3408 } 3419 }
3409} 3420}
3410 3421
3422static void mem_cgroup_put(struct mem_cgroup *mem)
3423{
3424 __mem_cgroup_put(mem, 1);
3425}
3426
3411/* 3427/*
3412 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 3428 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3413 */ 3429 */
@@ -3789,6 +3805,29 @@ static void mem_cgroup_clear_mc(void)
3789 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 3805 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
3790 mc.moved_charge = 0; 3806 mc.moved_charge = 0;
3791 } 3807 }
3808 /* we must fixup refcnts and charges */
3809 if (mc.moved_swap) {
3810 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
3811 /* uncharge swap account from the old cgroup */
3812 if (!mem_cgroup_is_root(mc.from))
3813 res_counter_uncharge(&mc.from->memsw,
3814 PAGE_SIZE * mc.moved_swap);
3815 __mem_cgroup_put(mc.from, mc.moved_swap);
3816
3817 if (!mem_cgroup_is_root(mc.to)) {
3818 /*
3819 * we charged both to->res and to->memsw, so we should
3820 * uncharge to->res.
3821 */
3822 res_counter_uncharge(&mc.to->res,
3823 PAGE_SIZE * mc.moved_swap);
3824 VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
3825 __css_put(&mc.to->css, mc.moved_swap);
3826 }
3827 /* we've already done mem_cgroup_get(mc.to) */
3828
3829 mc.moved_swap = 0;
3830 }
3792 mc.from = NULL; 3831 mc.from = NULL;
3793 mc.to = NULL; 3832 mc.to = NULL;
3794 mc.moving_task = NULL; 3833 mc.moving_task = NULL;
@@ -3818,11 +3857,13 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
3818 VM_BUG_ON(mc.to); 3857 VM_BUG_ON(mc.to);
3819 VM_BUG_ON(mc.precharge); 3858 VM_BUG_ON(mc.precharge);
3820 VM_BUG_ON(mc.moved_charge); 3859 VM_BUG_ON(mc.moved_charge);
3860 VM_BUG_ON(mc.moved_swap);
3821 VM_BUG_ON(mc.moving_task); 3861 VM_BUG_ON(mc.moving_task);
3822 mc.from = from; 3862 mc.from = from;
3823 mc.to = mem; 3863 mc.to = mem;
3824 mc.precharge = 0; 3864 mc.precharge = 0;
3825 mc.moved_charge = 0; 3865 mc.moved_charge = 0;
3866 mc.moved_swap = 0;
3826 mc.moving_task = current; 3867 mc.moving_task = current;
3827 3868
3828 ret = mem_cgroup_precharge_mc(mm); 3869 ret = mem_cgroup_precharge_mc(mm);
@@ -3883,8 +3924,12 @@ put: /* is_target_pte_for_mc() gets the page */
3883 break; 3924 break;
3884 case MC_TARGET_SWAP: 3925 case MC_TARGET_SWAP:
3885 ent = target.ent; 3926 ent = target.ent;
3886 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) 3927 if (!mem_cgroup_move_swap_account(ent,
3928 mc.from, mc.to, false)) {
3887 mc.precharge--; 3929 mc.precharge--;
3930 /* we fixup refcnts and charges later. */
3931 mc.moved_swap++;
3932 }
3888 break; 3933 break;
3889 default: 3934 default:
3890 break; 3935 break;