aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>2010-03-10 18:22:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-12 18:52:36 -0500
commit024914477e15ef8b17f271ec47f1bb8a589f0806 (patch)
tree9a6a8b4224c94fcdd1b8c3127b301ee3537f8cc2 /include/linux/swap.h
parent8033b97c9b5ef063e3f4bf2efe1cd0a22093aaff (diff)
memcg: move charges of anonymous swap
This patch is another core part of this move-charge-at-task-migration feature. It enables moving charges of anonymous swaps. To move the charge of swap, we need to exchange swap_cgroup's record. In current implementation, swap_cgroup's record is protected by: - page lock: if the entry is on swap cache. - swap_lock: if the entry is not on swap cache. This works well in usual swap-in/out activity. But this behavior make the feature of moving swap charge check many conditions to exchange swap_cgroup's record safely. So I changed modification of swap_cgroup's recored(swap_cgroup_record()) to use xchg, and define a new function to cmpxchg swap_cgroup's record. This patch also enables moving charge of non pte_present but not uncharged swap caches, which can be exist on swap-out path, by getting the target pages via find_get_page() as do_mincore() does. [kosaki.motohiro@jp.fujitsu.com: fix ia64 build] [akpm@linux-foundation.org: fix typos] Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <menage@google.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a2602a8207a6..1f59d9340c4d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -355,6 +355,7 @@ static inline void disable_swap_token(void)
355#ifdef CONFIG_CGROUP_MEM_RES_CTLR 355#ifdef CONFIG_CGROUP_MEM_RES_CTLR
356extern void 356extern void
357mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 357mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
358extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
358#else 359#else
359static inline void 360static inline void
360mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 361mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -485,6 +486,14 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
485{ 486{
486} 487}
487 488
489#ifdef CONFIG_CGROUP_MEM_RES_CTLR
490static inline int
491mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
492{
493 return 0;
494}
495#endif
496
488#endif /* CONFIG_SWAP */ 497#endif /* CONFIG_SWAP */
489#endif /* __KERNEL__*/ 498#endif /* __KERNEL__*/
490#endif /* _LINUX_SWAP_H */ 499#endif /* _LINUX_SWAP_H */