aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-01-07 21:07:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:04 -0500
commit7a81b88cb53e335ff7d019e6398c95792c817d93 (patch)
tree6ebca4d509a541ac707e10f9369916549e90c0ad /include
parent0b82ac37b889ec881b645860da3775118effb3ca (diff)
memcg: introduce charge-commit-cancel style of functions
There is a small race in do_swap_page(). When the page swapped-in is charged, the mapcount can be greater than 0. But, at the same time some process (shares it ) call unmap and make mapcount 1->0 and the page is uncharged. CPUA CPUB mapcount == 1. (1) charge if mapcount==0 zap_pte_range() (2) mapcount 1 => 0. (3) uncharge(). (success) (4) set page's rmap() mapcount 0=>1 Then, this swap page's account is leaked. For fixing this, I added a new interface. - charge account to res_counter by PAGE_SIZE and try to free pages if necessary. - commit register page_cgroup and add to LRU if necessary. - cancel uncharge PAGE_SIZE because of do_swap_page failure. CPUA (1) charge (always) (2) set page's rmap (mapcount > 0) (3) commit charge was necessary or not after set_pte(). This protocol uses PCG_USED bit on page_cgroup for avoiding over accounting. Usual mem_cgroup_charge_common() does charge -> commit at a time. And this patch also adds following function to clarify all charges. - mem_cgroup_newpage_charge() ....replacement for mem_cgroup_charge() called against newly allocated anon pages. - mem_cgroup_charge_migrate_fixup() called only from remove_migration_ptes(). we'll have to rewrite this later.(this patch just keeps old behavior) This function will be removed by additional patch to make migration clearer. Good for clarifying "what we do" Then, we have 4 following charge points. - newpage - swap-in - add-to-cache. - migration. [akpm@linux-foundation.org: add missing inline directives to stubs] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h36
1 files changed, 34 insertions, 2 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1fbe14d39521..c592f315cd02 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -27,8 +27,17 @@ struct mm_struct;
27 27
28#ifdef CONFIG_CGROUP_MEM_RES_CTLR 28#ifdef CONFIG_CGROUP_MEM_RES_CTLR
29 29
30extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, 30extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
31 gfp_t gfp_mask); 31 gfp_t gfp_mask);
32extern int mem_cgroup_charge_migrate_fixup(struct page *page,
33 struct mm_struct *mm, gfp_t gfp_mask);
34/* for swap handling */
35extern int mem_cgroup_try_charge(struct mm_struct *mm,
36 gfp_t gfp_mask, struct mem_cgroup **ptr);
37extern void mem_cgroup_commit_charge_swapin(struct page *page,
38 struct mem_cgroup *ptr);
39extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
40
32extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 41extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
33 gfp_t gfp_mask); 42 gfp_t gfp_mask);
34extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); 43extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
@@ -71,7 +80,9 @@ extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
71 80
72 81
73#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 82#else /* CONFIG_CGROUP_MEM_RES_CTLR */
74static inline int mem_cgroup_charge(struct page *page, 83struct mem_cgroup;
84
85static inline int mem_cgroup_newpage_charge(struct page *page,
75 struct mm_struct *mm, gfp_t gfp_mask) 86 struct mm_struct *mm, gfp_t gfp_mask)
76{ 87{
77 return 0; 88 return 0;
@@ -83,6 +94,27 @@ static inline int mem_cgroup_cache_charge(struct page *page,
83 return 0; 94 return 0;
84} 95}
85 96
97static inline int mem_cgroup_charge_migrate_fixup(struct page *page,
98 struct mm_struct *mm, gfp_t gfp_mask)
99{
100 return 0;
101}
102
103static inline int mem_cgroup_try_charge(struct mm_struct *mm,
104 gfp_t gfp_mask, struct mem_cgroup **ptr)
105{
106 return 0;
107}
108
109static inline void mem_cgroup_commit_charge_swapin(struct page *page,
110 struct mem_cgroup *ptr)
111{
112}
113
114static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
115{
116}
117
86static inline void mem_cgroup_uncharge_page(struct page *page) 118static inline void mem_cgroup_uncharge_page(struct page *page)
87{ 119{
88} 120}