aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2012-03-21 19:34:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:55:01 -0400
commitb24028572fb69e9dd6de8c359eba2b2c66baa889 (patch)
tree563594eba40e5fd0b61c36c09705f128a8dfbe40 /mm/memcontrol.c
parentca464d69b19120a826aa2534de2511a6f542edf5 (diff)
memcg: remove PCG_CACHE page_cgroup flag
We record 'the page is cache' with the PCG_CACHE bit in page_cgroup. Here, "CACHE" means anonymous user pages (and SwapCache). This doesn't include shmem. Considering callers, at charge/uncharge, the caller should know what the page is and we don't need to record it by using one bit per page. This patch removes PCG_CACHE bit and make callers of mem_cgroup_charge_statistics() to specify what the page is. About page migration: Mapping of the used page is not touched during migra tion (see page_remove_rmap) so we can rely on it and push the correct charge type down to __mem_cgroup_uncharge_common from end_migration for unused page. The force flag was misleading was abused for skipping the needless page_mapped() / PageCgroupMigration() check, as we know the unused page is no longer mapped and cleared the migration flag just a few lines up. But doing the checks is no biggie and it's not worth adding another flag just to skip them. [akpm@linux-foundation.org: checkpatch fixes] [hughd@google.com: fix PageAnon uncharging] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Ying Han <yinghan@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 69af5d5801fc..88113ee32ac8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -690,15 +690,19 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
690} 690}
691 691
692static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 692static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
693 bool file, int nr_pages) 693 bool anon, int nr_pages)
694{ 694{
695 preempt_disable(); 695 preempt_disable();
696 696
697 if (file) 697 /*
698 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 698 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
699 * counted as CACHE even if it's on ANON LRU.
700 */
701 if (anon)
702 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
699 nr_pages); 703 nr_pages);
700 else 704 else
701 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 705 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
702 nr_pages); 706 nr_pages);
703 707
704 /* pagein of a big page is an event. So, ignore page size */ 708 /* pagein of a big page is an event. So, ignore page size */
@@ -2442,6 +2446,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2442{ 2446{
2443 struct zone *uninitialized_var(zone); 2447 struct zone *uninitialized_var(zone);
2444 bool was_on_lru = false; 2448 bool was_on_lru = false;
2449 bool anon;
2445 2450
2446 lock_page_cgroup(pc); 2451 lock_page_cgroup(pc);
2447 if (unlikely(PageCgroupUsed(pc))) { 2452 if (unlikely(PageCgroupUsed(pc))) {
@@ -2477,19 +2482,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2477 * See mem_cgroup_add_lru_list(), etc. 2482 * See mem_cgroup_add_lru_list(), etc.
2478 */ 2483 */
2479 smp_wmb(); 2484 smp_wmb();
2480 switch (ctype) { 2485 SetPageCgroupUsed(pc);
2481 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2482 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2483 SetPageCgroupCache(pc);
2484 SetPageCgroupUsed(pc);
2485 break;
2486 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2487 ClearPageCgroupCache(pc);
2488 SetPageCgroupUsed(pc);
2489 break;
2490 default:
2491 break;
2492 }
2493 2486
2494 if (lrucare) { 2487 if (lrucare) {
2495 if (was_on_lru) { 2488 if (was_on_lru) {
@@ -2500,7 +2493,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2500 spin_unlock_irq(&zone->lru_lock); 2493 spin_unlock_irq(&zone->lru_lock);
2501 } 2494 }
2502 2495
2503 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); 2496 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2497 anon = true;
2498 else
2499 anon = false;
2500
2501 mem_cgroup_charge_statistics(memcg, anon, nr_pages);
2504 unlock_page_cgroup(pc); 2502 unlock_page_cgroup(pc);
2505 2503
2506 /* 2504 /*
@@ -2565,6 +2563,7 @@ static int mem_cgroup_move_account(struct page *page,
2565{ 2563{
2566 unsigned long flags; 2564 unsigned long flags;
2567 int ret; 2565 int ret;
2566 bool anon = PageAnon(page);
2568 2567
2569 VM_BUG_ON(from == to); 2568 VM_BUG_ON(from == to);
2570 VM_BUG_ON(PageLRU(page)); 2569 VM_BUG_ON(PageLRU(page));
@@ -2593,14 +2592,14 @@ static int mem_cgroup_move_account(struct page *page,
2593 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2592 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2594 preempt_enable(); 2593 preempt_enable();
2595 } 2594 }
2596 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); 2595 mem_cgroup_charge_statistics(from, anon, -nr_pages);
2597 if (uncharge) 2596 if (uncharge)
2598 /* This is not "cancel", but cancel_charge does all we need. */ 2597 /* This is not "cancel", but cancel_charge does all we need. */
2599 __mem_cgroup_cancel_charge(from, nr_pages); 2598 __mem_cgroup_cancel_charge(from, nr_pages);
2600 2599
2601 /* caller should have done css_get */ 2600 /* caller should have done css_get */
2602 pc->mem_cgroup = to; 2601 pc->mem_cgroup = to;
2603 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); 2602 mem_cgroup_charge_statistics(to, anon, nr_pages);
2604 /* 2603 /*
2605 * We charges against "to" which may not have any tasks. Then, "to" 2604 * We charges against "to" which may not have any tasks. Then, "to"
2606 * can be under rmdir(). But in current implementation, caller of 2605 * can be under rmdir(). But in current implementation, caller of
@@ -2921,6 +2920,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2921 struct mem_cgroup *memcg = NULL; 2920 struct mem_cgroup *memcg = NULL;
2922 unsigned int nr_pages = 1; 2921 unsigned int nr_pages = 1;
2923 struct page_cgroup *pc; 2922 struct page_cgroup *pc;
2923 bool anon;
2924 2924
2925 if (mem_cgroup_disabled()) 2925 if (mem_cgroup_disabled())
2926 return NULL; 2926 return NULL;
@@ -2946,8 +2946,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2946 if (!PageCgroupUsed(pc)) 2946 if (!PageCgroupUsed(pc))
2947 goto unlock_out; 2947 goto unlock_out;
2948 2948
2949 anon = PageAnon(page);
2950
2949 switch (ctype) { 2951 switch (ctype) {
2950 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2952 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2953 anon = true;
2954 /* fallthrough */
2951 case MEM_CGROUP_CHARGE_TYPE_DROP: 2955 case MEM_CGROUP_CHARGE_TYPE_DROP:
2952 /* See mem_cgroup_prepare_migration() */ 2956 /* See mem_cgroup_prepare_migration() */
2953 if (page_mapped(page) || PageCgroupMigration(pc)) 2957 if (page_mapped(page) || PageCgroupMigration(pc))
@@ -2964,7 +2968,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2964 break; 2968 break;
2965 } 2969 }
2966 2970
2967 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages); 2971 mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
2968 2972
2969 ClearPageCgroupUsed(pc); 2973 ClearPageCgroupUsed(pc);
2970 /* 2974 /*
@@ -3271,6 +3275,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3271{ 3275{
3272 struct page *used, *unused; 3276 struct page *used, *unused;
3273 struct page_cgroup *pc; 3277 struct page_cgroup *pc;
3278 bool anon;
3274 3279
3275 if (!memcg) 3280 if (!memcg)
3276 return; 3281 return;
@@ -3292,8 +3297,10 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3292 lock_page_cgroup(pc); 3297 lock_page_cgroup(pc);
3293 ClearPageCgroupMigration(pc); 3298 ClearPageCgroupMigration(pc);
3294 unlock_page_cgroup(pc); 3299 unlock_page_cgroup(pc);
3295 3300 anon = PageAnon(used);
3296 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); 3301 __mem_cgroup_uncharge_common(unused,
3302 anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
3303 : MEM_CGROUP_CHARGE_TYPE_CACHE);
3297 3304
3298 /* 3305 /*
3299 * If a page is a file cache, radix-tree replacement is very atomic 3306 * If a page is a file cache, radix-tree replacement is very atomic
@@ -3303,7 +3310,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3303 * and USED bit check in mem_cgroup_uncharge_page() will do enough 3310 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3304 * check. (see prepare_charge() also) 3311 * check. (see prepare_charge() also)
3305 */ 3312 */
3306 if (PageAnon(used)) 3313 if (anon)
3307 mem_cgroup_uncharge_page(used); 3314 mem_cgroup_uncharge_page(used);
3308 /* 3315 /*
3309 * At migration, we may charge account against cgroup which has no 3316 * At migration, we may charge account against cgroup which has no
@@ -3333,7 +3340,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3333 /* fix accounting on old pages */ 3340 /* fix accounting on old pages */
3334 lock_page_cgroup(pc); 3341 lock_page_cgroup(pc);
3335 memcg = pc->mem_cgroup; 3342 memcg = pc->mem_cgroup;
3336 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); 3343 mem_cgroup_charge_statistics(memcg, false, -1);
3337 ClearPageCgroupUsed(pc); 3344 ClearPageCgroupUsed(pc);
3338 unlock_page_cgroup(pc); 3345 unlock_page_cgroup(pc);
3339 3346