diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:06 -0500 |
commit | 72835c86ca15d0126354b73d5f29ce9194931c9b (patch) | |
tree | 369f31168b405e4d597b3cd067a1fd0ac2025dbc | |
parent | ec0fffd84b162e0563a28a81aa049f946b31a8e2 (diff) |
mm: unify remaining mem_cont, mem, etc. variable names to memcg
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 16 | ||||
-rw-r--r-- | include/linux/oom.h | 2 | ||||
-rw-r--r-- | include/linux/rmap.h | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 52 | ||||
-rw-r--r-- | mm/oom_kill.c | 38 | ||||
-rw-r--r-- | mm/rmap.c | 20 | ||||
-rw-r--r-- | mm/swapfile.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 12 |
8 files changed, 78 insertions, 75 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cee3761666f0..b80de520670b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -54,10 +54,10 @@ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, | |||
54 | gfp_t gfp_mask); | 54 | gfp_t gfp_mask); |
55 | /* for swap handling */ | 55 | /* for swap handling */ |
56 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 56 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
57 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); | 57 | struct page *page, gfp_t mask, struct mem_cgroup **memcgp); |
58 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | 58 | extern void mem_cgroup_commit_charge_swapin(struct page *page, |
59 | struct mem_cgroup *ptr); | 59 | struct mem_cgroup *memcg); |
60 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); | 60 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); |
61 | 61 | ||
62 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 62 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
63 | gfp_t gfp_mask); | 63 | gfp_t gfp_mask); |
@@ -101,7 +101,7 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | |||
101 | 101 | ||
102 | extern int | 102 | extern int |
103 | mem_cgroup_prepare_migration(struct page *page, | 103 | mem_cgroup_prepare_migration(struct page *page, |
104 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); | 104 | struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); |
105 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 105 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
106 | struct page *oldpage, struct page *newpage, bool migration_ok); | 106 | struct page *oldpage, struct page *newpage, bool migration_ok); |
107 | 107 | ||
@@ -186,17 +186,17 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
186 | } | 186 | } |
187 | 187 | ||
188 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 188 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
189 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) | 189 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) |
190 | { | 190 | { |
191 | return 0; | 191 | return 0; |
192 | } | 192 | } |
193 | 193 | ||
194 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | 194 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, |
195 | struct mem_cgroup *ptr) | 195 | struct mem_cgroup *memcg) |
196 | { | 196 | { |
197 | } | 197 | } |
198 | 198 | ||
199 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | 199 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) |
200 | { | 200 | { |
201 | } | 201 | } |
202 | 202 | ||
@@ -275,7 +275,7 @@ static inline struct cgroup_subsys_state | |||
275 | 275 | ||
276 | static inline int | 276 | static inline int |
277 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, | 277 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
278 | struct mem_cgroup **ptr, gfp_t gfp_mask) | 278 | struct mem_cgroup **memcgp, gfp_t gfp_mask) |
279 | { | 279 | { |
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 6f9d04a85336..552fba9c7d5a 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -43,7 +43,7 @@ enum oom_constraint { | |||
43 | extern void compare_swap_oom_score_adj(int old_val, int new_val); | 43 | extern void compare_swap_oom_score_adj(int old_val, int new_val); |
44 | extern int test_set_oom_score_adj(int new_val); | 44 | extern int test_set_oom_score_adj(int new_val); |
45 | 45 | ||
46 | extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | 46 | extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
47 | const nodemask_t *nodemask, unsigned long totalpages); | 47 | const nodemask_t *nodemask, unsigned long totalpages); |
48 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 48 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
49 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 49 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1afb9954bbf1..1cdd62a2788a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -158,7 +158,7 @@ static inline void page_dup_rmap(struct page *page) | |||
158 | * Called from mm/vmscan.c to handle paging out | 158 | * Called from mm/vmscan.c to handle paging out |
159 | */ | 159 | */ |
160 | int page_referenced(struct page *, int is_locked, | 160 | int page_referenced(struct page *, int is_locked, |
161 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 161 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
162 | int page_referenced_one(struct page *, struct vm_area_struct *, | 162 | int page_referenced_one(struct page *, struct vm_area_struct *, |
163 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | 163 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); |
164 | 164 | ||
@@ -236,7 +236,7 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | |||
236 | #define anon_vma_link(vma) do {} while (0) | 236 | #define anon_vma_link(vma) do {} while (0) |
237 | 237 | ||
238 | static inline int page_referenced(struct page *page, int is_locked, | 238 | static inline int page_referenced(struct page *page, int is_locked, |
239 | struct mem_cgroup *cnt, | 239 | struct mem_cgroup *memcg, |
240 | unsigned long *vm_flags) | 240 | unsigned long *vm_flags) |
241 | { | 241 | { |
242 | *vm_flags = 0; | 242 | *vm_flags = 0; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index abb66a2cba65..aeb23933a052 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2844,12 +2844,12 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
2844 | */ | 2844 | */ |
2845 | int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 2845 | int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
2846 | struct page *page, | 2846 | struct page *page, |
2847 | gfp_t mask, struct mem_cgroup **ptr) | 2847 | gfp_t mask, struct mem_cgroup **memcgp) |
2848 | { | 2848 | { |
2849 | struct mem_cgroup *memcg; | 2849 | struct mem_cgroup *memcg; |
2850 | int ret; | 2850 | int ret; |
2851 | 2851 | ||
2852 | *ptr = NULL; | 2852 | *memcgp = NULL; |
2853 | 2853 | ||
2854 | if (mem_cgroup_disabled()) | 2854 | if (mem_cgroup_disabled()) |
2855 | return 0; | 2855 | return 0; |
@@ -2867,27 +2867,27 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
2867 | memcg = try_get_mem_cgroup_from_page(page); | 2867 | memcg = try_get_mem_cgroup_from_page(page); |
2868 | if (!memcg) | 2868 | if (!memcg) |
2869 | goto charge_cur_mm; | 2869 | goto charge_cur_mm; |
2870 | *ptr = memcg; | 2870 | *memcgp = memcg; |
2871 | ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); | 2871 | ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); |
2872 | css_put(&memcg->css); | 2872 | css_put(&memcg->css); |
2873 | return ret; | 2873 | return ret; |
2874 | charge_cur_mm: | 2874 | charge_cur_mm: |
2875 | if (unlikely(!mm)) | 2875 | if (unlikely(!mm)) |
2876 | mm = &init_mm; | 2876 | mm = &init_mm; |
2877 | return __mem_cgroup_try_charge(mm, mask, 1, ptr, true); | 2877 | return __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); |
2878 | } | 2878 | } |
2879 | 2879 | ||
2880 | static void | 2880 | static void |
2881 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | 2881 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, |
2882 | enum charge_type ctype) | 2882 | enum charge_type ctype) |
2883 | { | 2883 | { |
2884 | if (mem_cgroup_disabled()) | 2884 | if (mem_cgroup_disabled()) |
2885 | return; | 2885 | return; |
2886 | if (!ptr) | 2886 | if (!memcg) |
2887 | return; | 2887 | return; |
2888 | cgroup_exclude_rmdir(&ptr->css); | 2888 | cgroup_exclude_rmdir(&memcg->css); |
2889 | 2889 | ||
2890 | __mem_cgroup_commit_charge_lrucare(page, ptr, ctype); | 2890 | __mem_cgroup_commit_charge_lrucare(page, memcg, ctype); |
2891 | /* | 2891 | /* |
2892 | * Now swap is on-memory. This means this page may be | 2892 | * Now swap is on-memory. This means this page may be |
2893 | * counted both as mem and swap....double count. | 2893 | * counted both as mem and swap....double count. |
@@ -2897,21 +2897,22 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | |||
2897 | */ | 2897 | */ |
2898 | if (do_swap_account && PageSwapCache(page)) { | 2898 | if (do_swap_account && PageSwapCache(page)) { |
2899 | swp_entry_t ent = {.val = page_private(page)}; | 2899 | swp_entry_t ent = {.val = page_private(page)}; |
2900 | struct mem_cgroup *swap_memcg; | ||
2900 | unsigned short id; | 2901 | unsigned short id; |
2901 | struct mem_cgroup *memcg; | ||
2902 | 2902 | ||
2903 | id = swap_cgroup_record(ent, 0); | 2903 | id = swap_cgroup_record(ent, 0); |
2904 | rcu_read_lock(); | 2904 | rcu_read_lock(); |
2905 | memcg = mem_cgroup_lookup(id); | 2905 | swap_memcg = mem_cgroup_lookup(id); |
2906 | if (memcg) { | 2906 | if (swap_memcg) { |
2907 | /* | 2907 | /* |
2908 | * This recorded memcg can be obsolete one. So, avoid | 2908 | * This recorded memcg can be obsolete one. So, avoid |
2909 | * calling css_tryget | 2909 | * calling css_tryget |
2910 | */ | 2910 | */ |
2911 | if (!mem_cgroup_is_root(memcg)) | 2911 | if (!mem_cgroup_is_root(swap_memcg)) |
2912 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 2912 | res_counter_uncharge(&swap_memcg->memsw, |
2913 | mem_cgroup_swap_statistics(memcg, false); | 2913 | PAGE_SIZE); |
2914 | mem_cgroup_put(memcg); | 2914 | mem_cgroup_swap_statistics(swap_memcg, false); |
2915 | mem_cgroup_put(swap_memcg); | ||
2915 | } | 2916 | } |
2916 | rcu_read_unlock(); | 2917 | rcu_read_unlock(); |
2917 | } | 2918 | } |
@@ -2920,13 +2921,14 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | |||
2920 | * So, rmdir()->pre_destroy() can be called while we do this charge. | 2921 | * So, rmdir()->pre_destroy() can be called while we do this charge. |
2921 | * In that case, we need to call pre_destroy() again. check it here. | 2922 | * In that case, we need to call pre_destroy() again. check it here. |
2922 | */ | 2923 | */ |
2923 | cgroup_release_and_wakeup_rmdir(&ptr->css); | 2924 | cgroup_release_and_wakeup_rmdir(&memcg->css); |
2924 | } | 2925 | } |
2925 | 2926 | ||
2926 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | 2927 | void mem_cgroup_commit_charge_swapin(struct page *page, |
2928 | struct mem_cgroup *memcg) | ||
2927 | { | 2929 | { |
2928 | __mem_cgroup_commit_charge_swapin(page, ptr, | 2930 | __mem_cgroup_commit_charge_swapin(page, memcg, |
2929 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | 2931 | MEM_CGROUP_CHARGE_TYPE_MAPPED); |
2930 | } | 2932 | } |
2931 | 2933 | ||
2932 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) | 2934 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) |
@@ -3255,14 +3257,14 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, | |||
3255 | * page belongs to. | 3257 | * page belongs to. |
3256 | */ | 3258 | */ |
3257 | int mem_cgroup_prepare_migration(struct page *page, | 3259 | int mem_cgroup_prepare_migration(struct page *page, |
3258 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) | 3260 | struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask) |
3259 | { | 3261 | { |
3260 | struct mem_cgroup *memcg = NULL; | 3262 | struct mem_cgroup *memcg = NULL; |
3261 | struct page_cgroup *pc; | 3263 | struct page_cgroup *pc; |
3262 | enum charge_type ctype; | 3264 | enum charge_type ctype; |
3263 | int ret = 0; | 3265 | int ret = 0; |
3264 | 3266 | ||
3265 | *ptr = NULL; | 3267 | *memcgp = NULL; |
3266 | 3268 | ||
3267 | VM_BUG_ON(PageTransHuge(page)); | 3269 | VM_BUG_ON(PageTransHuge(page)); |
3268 | if (mem_cgroup_disabled()) | 3270 | if (mem_cgroup_disabled()) |
@@ -3313,10 +3315,10 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3313 | if (!memcg) | 3315 | if (!memcg) |
3314 | return 0; | 3316 | return 0; |
3315 | 3317 | ||
3316 | *ptr = memcg; | 3318 | *memcgp = memcg; |
3317 | ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); | 3319 | ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false); |
3318 | css_put(&memcg->css);/* drop extra refcnt */ | 3320 | css_put(&memcg->css);/* drop extra refcnt */ |
3319 | if (ret || *ptr == NULL) { | 3321 | if (ret || *memcgp == NULL) { |
3320 | if (PageAnon(page)) { | 3322 | if (PageAnon(page)) { |
3321 | lock_page_cgroup(pc); | 3323 | lock_page_cgroup(pc); |
3322 | ClearPageCgroupMigration(pc); | 3324 | ClearPageCgroupMigration(pc); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 05c0f27d4ed1..2958fd8e7c9a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -152,7 +152,7 @@ struct task_struct *find_lock_task_mm(struct task_struct *p) | |||
152 | 152 | ||
153 | /* return true if the task is not adequate as candidate victim task. */ | 153 | /* return true if the task is not adequate as candidate victim task. */ |
154 | static bool oom_unkillable_task(struct task_struct *p, | 154 | static bool oom_unkillable_task(struct task_struct *p, |
155 | const struct mem_cgroup *mem, const nodemask_t *nodemask) | 155 | const struct mem_cgroup *memcg, const nodemask_t *nodemask) |
156 | { | 156 | { |
157 | if (is_global_init(p)) | 157 | if (is_global_init(p)) |
158 | return true; | 158 | return true; |
@@ -160,7 +160,7 @@ static bool oom_unkillable_task(struct task_struct *p, | |||
160 | return true; | 160 | return true; |
161 | 161 | ||
162 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | 162 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ |
163 | if (mem && !task_in_mem_cgroup(p, mem)) | 163 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
164 | return true; | 164 | return true; |
165 | 165 | ||
166 | /* p may not have freeable memory in nodemask */ | 166 | /* p may not have freeable memory in nodemask */ |
@@ -179,12 +179,12 @@ static bool oom_unkillable_task(struct task_struct *p, | |||
179 | * predictable as possible. The goal is to return the highest value for the | 179 | * predictable as possible. The goal is to return the highest value for the |
180 | * task consuming the most memory to avoid subsequent oom failures. | 180 | * task consuming the most memory to avoid subsequent oom failures. |
181 | */ | 181 | */ |
182 | unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | 182 | unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
183 | const nodemask_t *nodemask, unsigned long totalpages) | 183 | const nodemask_t *nodemask, unsigned long totalpages) |
184 | { | 184 | { |
185 | long points; | 185 | long points; |
186 | 186 | ||
187 | if (oom_unkillable_task(p, mem, nodemask)) | 187 | if (oom_unkillable_task(p, memcg, nodemask)) |
188 | return 0; | 188 | return 0; |
189 | 189 | ||
190 | p = find_lock_task_mm(p); | 190 | p = find_lock_task_mm(p); |
@@ -308,7 +308,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |||
308 | * (not docbooked, we don't want this one cluttering up the manual) | 308 | * (not docbooked, we don't want this one cluttering up the manual) |
309 | */ | 309 | */ |
310 | static struct task_struct *select_bad_process(unsigned int *ppoints, | 310 | static struct task_struct *select_bad_process(unsigned int *ppoints, |
311 | unsigned long totalpages, struct mem_cgroup *mem, | 311 | unsigned long totalpages, struct mem_cgroup *memcg, |
312 | const nodemask_t *nodemask) | 312 | const nodemask_t *nodemask) |
313 | { | 313 | { |
314 | struct task_struct *g, *p; | 314 | struct task_struct *g, *p; |
@@ -320,7 +320,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
320 | 320 | ||
321 | if (p->exit_state) | 321 | if (p->exit_state) |
322 | continue; | 322 | continue; |
323 | if (oom_unkillable_task(p, mem, nodemask)) | 323 | if (oom_unkillable_task(p, memcg, nodemask)) |
324 | continue; | 324 | continue; |
325 | 325 | ||
326 | /* | 326 | /* |
@@ -364,7 +364,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
364 | } | 364 | } |
365 | } | 365 | } |
366 | 366 | ||
367 | points = oom_badness(p, mem, nodemask, totalpages); | 367 | points = oom_badness(p, memcg, nodemask, totalpages); |
368 | if (points > *ppoints) { | 368 | if (points > *ppoints) { |
369 | chosen = p; | 369 | chosen = p; |
370 | *ppoints = points; | 370 | *ppoints = points; |
@@ -387,14 +387,14 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
387 | * | 387 | * |
388 | * Call with tasklist_lock read-locked. | 388 | * Call with tasklist_lock read-locked. |
389 | */ | 389 | */ |
390 | static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) | 390 | static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask) |
391 | { | 391 | { |
392 | struct task_struct *p; | 392 | struct task_struct *p; |
393 | struct task_struct *task; | 393 | struct task_struct *task; |
394 | 394 | ||
395 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); | 395 | pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); |
396 | for_each_process(p) { | 396 | for_each_process(p) { |
397 | if (oom_unkillable_task(p, mem, nodemask)) | 397 | if (oom_unkillable_task(p, memcg, nodemask)) |
398 | continue; | 398 | continue; |
399 | 399 | ||
400 | task = find_lock_task_mm(p); | 400 | task = find_lock_task_mm(p); |
@@ -417,7 +417,7 @@ static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) | |||
417 | } | 417 | } |
418 | 418 | ||
419 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | 419 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
420 | struct mem_cgroup *mem, const nodemask_t *nodemask) | 420 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
421 | { | 421 | { |
422 | task_lock(current); | 422 | task_lock(current); |
423 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " | 423 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
@@ -427,10 +427,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
427 | cpuset_print_task_mems_allowed(current); | 427 | cpuset_print_task_mems_allowed(current); |
428 | task_unlock(current); | 428 | task_unlock(current); |
429 | dump_stack(); | 429 | dump_stack(); |
430 | mem_cgroup_print_oom_info(mem, p); | 430 | mem_cgroup_print_oom_info(memcg, p); |
431 | show_mem(SHOW_MEM_FILTER_NODES); | 431 | show_mem(SHOW_MEM_FILTER_NODES); |
432 | if (sysctl_oom_dump_tasks) | 432 | if (sysctl_oom_dump_tasks) |
433 | dump_tasks(mem, nodemask); | 433 | dump_tasks(memcg, nodemask); |
434 | } | 434 | } |
435 | 435 | ||
436 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 436 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
@@ -484,7 +484,7 @@ static int oom_kill_task(struct task_struct *p) | |||
484 | 484 | ||
485 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | 485 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
486 | unsigned int points, unsigned long totalpages, | 486 | unsigned int points, unsigned long totalpages, |
487 | struct mem_cgroup *mem, nodemask_t *nodemask, | 487 | struct mem_cgroup *memcg, nodemask_t *nodemask, |
488 | const char *message) | 488 | const char *message) |
489 | { | 489 | { |
490 | struct task_struct *victim = p; | 490 | struct task_struct *victim = p; |
@@ -493,7 +493,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
493 | unsigned int victim_points = 0; | 493 | unsigned int victim_points = 0; |
494 | 494 | ||
495 | if (printk_ratelimit()) | 495 | if (printk_ratelimit()) |
496 | dump_header(p, gfp_mask, order, mem, nodemask); | 496 | dump_header(p, gfp_mask, order, memcg, nodemask); |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * If the task is already exiting, don't alarm the sysadmin or kill | 499 | * If the task is already exiting, don't alarm the sysadmin or kill |
@@ -524,7 +524,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
524 | /* | 524 | /* |
525 | * oom_badness() returns 0 if the thread is unkillable | 525 | * oom_badness() returns 0 if the thread is unkillable |
526 | */ | 526 | */ |
527 | child_points = oom_badness(child, mem, nodemask, | 527 | child_points = oom_badness(child, memcg, nodemask, |
528 | totalpages); | 528 | totalpages); |
529 | if (child_points > victim_points) { | 529 | if (child_points > victim_points) { |
530 | victim = child; | 530 | victim = child; |
@@ -561,7 +561,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | |||
561 | } | 561 | } |
562 | 562 | ||
563 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 563 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
564 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | 564 | void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask) |
565 | { | 565 | { |
566 | unsigned long limit; | 566 | unsigned long limit; |
567 | unsigned int points = 0; | 567 | unsigned int points = 0; |
@@ -578,14 +578,14 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
578 | } | 578 | } |
579 | 579 | ||
580 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); | 580 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); |
581 | limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; | 581 | limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT; |
582 | read_lock(&tasklist_lock); | 582 | read_lock(&tasklist_lock); |
583 | retry: | 583 | retry: |
584 | p = select_bad_process(&points, limit, mem, NULL); | 584 | p = select_bad_process(&points, limit, memcg, NULL); |
585 | if (!p || PTR_ERR(p) == -1UL) | 585 | if (!p || PTR_ERR(p) == -1UL) |
586 | goto out; | 586 | goto out; |
587 | 587 | ||
588 | if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL, | 588 | if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL, |
589 | "Memory cgroup out of memory")) | 589 | "Memory cgroup out of memory")) |
590 | goto retry; | 590 | goto retry; |
591 | out: | 591 | out: |
@@ -773,7 +773,7 @@ out: | |||
773 | } | 773 | } |
774 | 774 | ||
775 | static int page_referenced_anon(struct page *page, | 775 | static int page_referenced_anon(struct page *page, |
776 | struct mem_cgroup *mem_cont, | 776 | struct mem_cgroup *memcg, |
777 | unsigned long *vm_flags) | 777 | unsigned long *vm_flags) |
778 | { | 778 | { |
779 | unsigned int mapcount; | 779 | unsigned int mapcount; |
@@ -796,7 +796,7 @@ static int page_referenced_anon(struct page *page, | |||
796 | * counting on behalf of references from different | 796 | * counting on behalf of references from different |
797 | * cgroups | 797 | * cgroups |
798 | */ | 798 | */ |
799 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 799 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) |
800 | continue; | 800 | continue; |
801 | referenced += page_referenced_one(page, vma, address, | 801 | referenced += page_referenced_one(page, vma, address, |
802 | &mapcount, vm_flags); | 802 | &mapcount, vm_flags); |
@@ -811,7 +811,7 @@ static int page_referenced_anon(struct page *page, | |||
811 | /** | 811 | /** |
812 | * page_referenced_file - referenced check for object-based rmap | 812 | * page_referenced_file - referenced check for object-based rmap |
813 | * @page: the page we're checking references on. | 813 | * @page: the page we're checking references on. |
814 | * @mem_cont: target memory controller | 814 | * @memcg: target memory control group |
815 | * @vm_flags: collect encountered vma->vm_flags who actually referenced the page | 815 | * @vm_flags: collect encountered vma->vm_flags who actually referenced the page |
816 | * | 816 | * |
817 | * For an object-based mapped page, find all the places it is mapped and | 817 | * For an object-based mapped page, find all the places it is mapped and |
@@ -822,7 +822,7 @@ static int page_referenced_anon(struct page *page, | |||
822 | * This function is only called from page_referenced for object-based pages. | 822 | * This function is only called from page_referenced for object-based pages. |
823 | */ | 823 | */ |
824 | static int page_referenced_file(struct page *page, | 824 | static int page_referenced_file(struct page *page, |
825 | struct mem_cgroup *mem_cont, | 825 | struct mem_cgroup *memcg, |
826 | unsigned long *vm_flags) | 826 | unsigned long *vm_flags) |
827 | { | 827 | { |
828 | unsigned int mapcount; | 828 | unsigned int mapcount; |
@@ -864,7 +864,7 @@ static int page_referenced_file(struct page *page, | |||
864 | * counting on behalf of references from different | 864 | * counting on behalf of references from different |
865 | * cgroups | 865 | * cgroups |
866 | */ | 866 | */ |
867 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 867 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) |
868 | continue; | 868 | continue; |
869 | referenced += page_referenced_one(page, vma, address, | 869 | referenced += page_referenced_one(page, vma, address, |
870 | &mapcount, vm_flags); | 870 | &mapcount, vm_flags); |
@@ -880,7 +880,7 @@ static int page_referenced_file(struct page *page, | |||
880 | * page_referenced - test if the page was referenced | 880 | * page_referenced - test if the page was referenced |
881 | * @page: the page to test | 881 | * @page: the page to test |
882 | * @is_locked: caller holds lock on the page | 882 | * @is_locked: caller holds lock on the page |
883 | * @mem_cont: target memory controller | 883 | * @memcg: target memory cgroup |
884 | * @vm_flags: collect encountered vma->vm_flags who actually referenced the page | 884 | * @vm_flags: collect encountered vma->vm_flags who actually referenced the page |
885 | * | 885 | * |
886 | * Quick test_and_clear_referenced for all mappings to a page, | 886 | * Quick test_and_clear_referenced for all mappings to a page, |
@@ -888,7 +888,7 @@ static int page_referenced_file(struct page *page, | |||
888 | */ | 888 | */ |
889 | int page_referenced(struct page *page, | 889 | int page_referenced(struct page *page, |
890 | int is_locked, | 890 | int is_locked, |
891 | struct mem_cgroup *mem_cont, | 891 | struct mem_cgroup *memcg, |
892 | unsigned long *vm_flags) | 892 | unsigned long *vm_flags) |
893 | { | 893 | { |
894 | int referenced = 0; | 894 | int referenced = 0; |
@@ -904,13 +904,13 @@ int page_referenced(struct page *page, | |||
904 | } | 904 | } |
905 | } | 905 | } |
906 | if (unlikely(PageKsm(page))) | 906 | if (unlikely(PageKsm(page))) |
907 | referenced += page_referenced_ksm(page, mem_cont, | 907 | referenced += page_referenced_ksm(page, memcg, |
908 | vm_flags); | 908 | vm_flags); |
909 | else if (PageAnon(page)) | 909 | else if (PageAnon(page)) |
910 | referenced += page_referenced_anon(page, mem_cont, | 910 | referenced += page_referenced_anon(page, memcg, |
911 | vm_flags); | 911 | vm_flags); |
912 | else if (page->mapping) | 912 | else if (page->mapping) |
913 | referenced += page_referenced_file(page, mem_cont, | 913 | referenced += page_referenced_file(page, memcg, |
914 | vm_flags); | 914 | vm_flags); |
915 | if (we_locked) | 915 | if (we_locked) |
916 | unlock_page(page); | 916 | unlock_page(page); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 9520592d4231..d999f090dfda 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -847,12 +847,13 @@ unsigned int count_swap_pages(int type, int free) | |||
847 | static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | 847 | static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, |
848 | unsigned long addr, swp_entry_t entry, struct page *page) | 848 | unsigned long addr, swp_entry_t entry, struct page *page) |
849 | { | 849 | { |
850 | struct mem_cgroup *ptr; | 850 | struct mem_cgroup *memcg; |
851 | spinlock_t *ptl; | 851 | spinlock_t *ptl; |
852 | pte_t *pte; | 852 | pte_t *pte; |
853 | int ret = 1; | 853 | int ret = 1; |
854 | 854 | ||
855 | if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) { | 855 | if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, |
856 | GFP_KERNEL, &memcg)) { | ||
856 | ret = -ENOMEM; | 857 | ret = -ENOMEM; |
857 | goto out_nolock; | 858 | goto out_nolock; |
858 | } | 859 | } |
@@ -860,7 +861,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
860 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 861 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
861 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 862 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { |
862 | if (ret > 0) | 863 | if (ret > 0) |
863 | mem_cgroup_cancel_charge_swapin(ptr); | 864 | mem_cgroup_cancel_charge_swapin(memcg); |
864 | ret = 0; | 865 | ret = 0; |
865 | goto out; | 866 | goto out; |
866 | } | 867 | } |
@@ -871,7 +872,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
871 | set_pte_at(vma->vm_mm, addr, pte, | 872 | set_pte_at(vma->vm_mm, addr, pte, |
872 | pte_mkold(mk_pte(page, vma->vm_page_prot))); | 873 | pte_mkold(mk_pte(page, vma->vm_page_prot))); |
873 | page_add_anon_rmap(page, vma, addr); | 874 | page_add_anon_rmap(page, vma, addr); |
874 | mem_cgroup_commit_charge_swapin(page, ptr); | 875 | mem_cgroup_commit_charge_swapin(page, memcg); |
875 | swap_free(entry); | 876 | swap_free(entry); |
876 | /* | 877 | /* |
877 | * Move the page to the active list so it is not | 878 | * Move the page to the active list so it is not |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 813aae820a27..e16ca8384ef7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2376,7 +2376,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
2376 | 2376 | ||
2377 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 2377 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
2378 | 2378 | ||
2379 | unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | 2379 | unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, |
2380 | gfp_t gfp_mask, bool noswap, | 2380 | gfp_t gfp_mask, bool noswap, |
2381 | struct zone *zone, | 2381 | struct zone *zone, |
2382 | unsigned long *nr_scanned) | 2382 | unsigned long *nr_scanned) |
@@ -2388,10 +2388,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2388 | .may_unmap = 1, | 2388 | .may_unmap = 1, |
2389 | .may_swap = !noswap, | 2389 | .may_swap = !noswap, |
2390 | .order = 0, | 2390 | .order = 0, |
2391 | .target_mem_cgroup = mem, | 2391 | .target_mem_cgroup = memcg, |
2392 | }; | 2392 | }; |
2393 | struct mem_cgroup_zone mz = { | 2393 | struct mem_cgroup_zone mz = { |
2394 | .mem_cgroup = mem, | 2394 | .mem_cgroup = memcg, |
2395 | .zone = zone, | 2395 | .zone = zone, |
2396 | }; | 2396 | }; |
2397 | 2397 | ||
@@ -2417,7 +2417,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2417 | return sc.nr_reclaimed; | 2417 | return sc.nr_reclaimed; |
2418 | } | 2418 | } |
2419 | 2419 | ||
2420 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | 2420 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
2421 | gfp_t gfp_mask, | 2421 | gfp_t gfp_mask, |
2422 | bool noswap) | 2422 | bool noswap) |
2423 | { | 2423 | { |
@@ -2430,7 +2430,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2430 | .may_swap = !noswap, | 2430 | .may_swap = !noswap, |
2431 | .nr_to_reclaim = SWAP_CLUSTER_MAX, | 2431 | .nr_to_reclaim = SWAP_CLUSTER_MAX, |
2432 | .order = 0, | 2432 | .order = 0, |
2433 | .target_mem_cgroup = mem_cont, | 2433 | .target_mem_cgroup = memcg, |
2434 | .nodemask = NULL, /* we don't care the placement */ | 2434 | .nodemask = NULL, /* we don't care the placement */ |
2435 | .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2435 | .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
2436 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), | 2436 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), |
@@ -2444,7 +2444,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2444 | * take care of from where we get pages. So the node where we start the | 2444 | * take care of from where we get pages. So the node where we start the |
2445 | * scan does not need to be the current node. | 2445 | * scan does not need to be the current node. |
2446 | */ | 2446 | */ |
2447 | nid = mem_cgroup_select_victim_node(mem_cont); | 2447 | nid = mem_cgroup_select_victim_node(memcg); |
2448 | 2448 | ||
2449 | zonelist = NODE_DATA(nid)->node_zonelists; | 2449 | zonelist = NODE_DATA(nid)->node_zonelists; |
2450 | 2450 | ||