diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2010-03-10 18:22:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-12 18:52:36 -0500 |
commit | 024914477e15ef8b17f271ec47f1bb8a589f0806 (patch) | |
tree | 9a6a8b4224c94fcdd1b8c3127b301ee3537f8cc2 /mm/page_cgroup.c | |
parent | 8033b97c9b5ef063e3f4bf2efe1cd0a22093aaff (diff) |
memcg: move charges of anonymous swap
This patch is another core part of this move-charge-at-task-migration
feature. It enables moving charges of anonymous swaps.
To move the charge of swap, we need to exchange swap_cgroup's record.
In current implementation, swap_cgroup's record is protected by:
- page lock: if the entry is on swap cache.
- swap_lock: if the entry is not on swap cache.
This works well in usual swap-in/out activity.
But this behavior make the feature of moving swap charge check many
conditions to exchange swap_cgroup's record safely.
So I changed modification of swap_cgroup's recored(swap_cgroup_record())
to use xchg, and define a new function to cmpxchg swap_cgroup's record.
This patch also enables moving charge of non pte_present but not uncharged
swap caches, which can be exist on swap-out path, by getting the target
pages via find_get_page() as do_mincore() does.
[kosaki.motohiro@jp.fujitsu.com: fix ia64 build]
[akpm@linux-foundation.org: fix typos]
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_cgroup.c')
-rw-r--r-- | mm/page_cgroup.c | 34 |
1 files changed, 32 insertions, 2 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3d535d594826..3dd88539a0e6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -335,6 +335,37 @@ not_enough_page: | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /** | 337 | /** |
338 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. | ||
339 | * @end: swap entry to be cmpxchged | ||
340 | * @old: old id | ||
341 | * @new: new id | ||
342 | * | ||
343 | * Returns old id at success, 0 at failure. | ||
344 | * (There is no mem_cgroup useing 0 as its id) | ||
345 | */ | ||
346 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | ||
347 | unsigned short old, unsigned short new) | ||
348 | { | ||
349 | int type = swp_type(ent); | ||
350 | unsigned long offset = swp_offset(ent); | ||
351 | unsigned long idx = offset / SC_PER_PAGE; | ||
352 | unsigned long pos = offset & SC_POS_MASK; | ||
353 | struct swap_cgroup_ctrl *ctrl; | ||
354 | struct page *mappage; | ||
355 | struct swap_cgroup *sc; | ||
356 | |||
357 | ctrl = &swap_cgroup_ctrl[type]; | ||
358 | |||
359 | mappage = ctrl->map[idx]; | ||
360 | sc = page_address(mappage); | ||
361 | sc += pos; | ||
362 | if (cmpxchg(&sc->id, old, new) == old) | ||
363 | return old; | ||
364 | else | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /** | ||
338 | * swap_cgroup_record - record mem_cgroup for this swp_entry. | 369 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
339 | * @ent: swap entry to be recorded into | 370 | * @ent: swap entry to be recorded into |
340 | * @mem: mem_cgroup to be recorded | 371 | * @mem: mem_cgroup to be recorded |
@@ -358,8 +389,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | |||
358 | mappage = ctrl->map[idx]; | 389 | mappage = ctrl->map[idx]; |
359 | sc = page_address(mappage); | 390 | sc = page_address(mappage); |
360 | sc += pos; | 391 | sc += pos; |
361 | old = sc->id; | 392 | old = xchg(&sc->id, id); |
362 | sc->id = id; | ||
363 | 393 | ||
364 | return old; | 394 | return old; |
365 | } | 395 | } |