diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-01-07 21:07:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:04 -0500 |
commit | 01b1ae63c2270cbacfd43fea94578c17950eb548 (patch) | |
tree | ab0275f32e8548c4413014d43cab1f52f03c9c5c /include/linux/memcontrol.h | |
parent | bced0520fe462bb94021dcabd32e99630c171be2 (diff) |
memcg: simple migration handling
Now, management of "charge" under page migration is done under following
manner. (Assume migrate page contents from oldpage to newpage)
before
- "newpage" is charged before migration.
at success.
- "oldpage" is uncharged at somewhere(unmap, radix-tree-replace)
at failure
- "newpage" is uncharged.
- "oldpage" is charged if necessary (*1)
But (*1) is not reliable....because of GFP_ATOMIC.
This patch tries to change behavior as following by charge/commit/cancel ops.
before
- charge PAGE_SIZE (no target page)
success
- commit charge against "newpage".
failure
- commit charge against "oldpage".
(PCG_USED bit works effectively to avoid double-counting)
- if "oldpage" is obsolete, cancel charge of PAGE_SIZE.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r-- | include/linux/memcontrol.h | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c592f315cd0..b095f5f6ecf 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -29,8 +29,6 @@ struct mm_struct; | |||
29 | 29 | ||
30 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, | 30 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, |
31 | gfp_t gfp_mask); | 31 | gfp_t gfp_mask); |
32 | extern int mem_cgroup_charge_migrate_fixup(struct page *page, | ||
33 | struct mm_struct *mm, gfp_t gfp_mask); | ||
34 | /* for swap handling */ | 32 | /* for swap handling */ |
35 | extern int mem_cgroup_try_charge(struct mm_struct *mm, | 33 | extern int mem_cgroup_try_charge(struct mm_struct *mm, |
36 | gfp_t gfp_mask, struct mem_cgroup **ptr); | 34 | gfp_t gfp_mask, struct mem_cgroup **ptr); |
@@ -60,8 +58,9 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | |||
60 | ((cgroup) == mem_cgroup_from_task((mm)->owner)) | 58 | ((cgroup) == mem_cgroup_from_task((mm)->owner)) |
61 | 59 | ||
62 | extern int | 60 | extern int |
63 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage); | 61 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); |
64 | extern void mem_cgroup_end_migration(struct page *page); | 62 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, |
63 | struct page *oldpage, struct page *newpage); | ||
65 | 64 | ||
66 | /* | 65 | /* |
67 | * For memory reclaim. | 66 | * For memory reclaim. |
@@ -94,12 +93,6 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
94 | return 0; | 93 | return 0; |
95 | } | 94 | } |
96 | 95 | ||
97 | static inline int mem_cgroup_charge_migrate_fixup(struct page *page, | ||
98 | struct mm_struct *mm, gfp_t gfp_mask) | ||
99 | { | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static inline int mem_cgroup_try_charge(struct mm_struct *mm, | 96 | static inline int mem_cgroup_try_charge(struct mm_struct *mm, |
104 | gfp_t gfp_mask, struct mem_cgroup **ptr) | 97 | gfp_t gfp_mask, struct mem_cgroup **ptr) |
105 | { | 98 | { |
@@ -144,12 +137,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, | |||
144 | } | 137 | } |
145 | 138 | ||
146 | static inline int | 139 | static inline int |
147 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage) | 140 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) |
148 | { | 141 | { |
149 | return 0; | 142 | return 0; |
150 | } | 143 | } |
151 | 144 | ||
152 | static inline void mem_cgroup_end_migration(struct page *page) | 145 | static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, |
146 | struct page *oldpage, | ||
147 | struct page *newpage) | ||
153 | { | 148 | { |
154 | } | 149 | } |
155 | 150 | ||