aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-05-03 17:55:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 18:52:11 -0400
commit71cd31135d4cf030a057ed7079a75a40c0a4a796 (patch)
treec95c8da1e70c21e412d9ab7328fc414ecfb80451 /mm/memcontrol.c
parentdf0e53d0619e83b465e363c088bf4eeb2848273b (diff)
mm: memcontrol: re-use node VM page state enum
The current duplication is a high-maintenance mess, and it's painful to add new items or query memcg state from the rest of the VM. This increases the size of the stat array marginally, but we should aim to track all these stats on a per-cgroup level anyway. Link: http://lkml.kernel.org/r/20170404220148.28338-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c135
1 files changed, 68 insertions, 67 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6b42887e5f14..6fe4c7fafbfc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -100,18 +100,7 @@ static bool do_memsw_account(void)
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101} 101}
102 102
103static const char * const mem_cgroup_stat_names[] = { 103static const char *const mem_cgroup_lru_names[] = {
104 "cache",
105 "rss",
106 "rss_huge",
107 "shmem",
108 "mapped_file",
109 "dirty",
110 "writeback",
111 "swap",
112};
113
114static const char * const mem_cgroup_lru_names[] = {
115 "inactive_anon", 104 "inactive_anon",
116 "active_anon", 105 "active_anon",
117 "inactive_file", 106 "inactive_file",
@@ -583,20 +572,16 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
583 * counted as CACHE even if it's on ANON LRU. 572 * counted as CACHE even if it's on ANON LRU.
584 */ 573 */
585 if (PageAnon(page)) 574 if (PageAnon(page))
586 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 575 __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
587 nr_pages);
588 else { 576 else {
589 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 577 __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
590 nr_pages);
591 if (PageSwapBacked(page)) 578 if (PageSwapBacked(page))
592 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], 579 __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
593 nr_pages);
594 } 580 }
595 581
596 if (compound) { 582 if (compound) {
597 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 583 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
598 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 584 __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
599 nr_pages);
600 } 585 }
601 586
602 /* pagein of a big page is an event. So, ignore page size */ 587 /* pagein of a big page is an event. So, ignore page size */
@@ -1125,6 +1110,28 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1125 return false; 1110 return false;
1126} 1111}
1127 1112
1113unsigned int memcg1_stats[] = {
1114 MEMCG_CACHE,
1115 MEMCG_RSS,
1116 MEMCG_RSS_HUGE,
1117 NR_SHMEM,
1118 NR_FILE_MAPPED,
1119 NR_FILE_DIRTY,
1120 NR_WRITEBACK,
1121 MEMCG_SWAP,
1122};
1123
1124static const char *const memcg1_stat_names[] = {
1125 "cache",
1126 "rss",
1127 "rss_huge",
1128 "shmem",
1129 "mapped_file",
1130 "dirty",
1131 "writeback",
1132 "swap",
1133};
1134
1128#define K(x) ((x) << (PAGE_SHIFT-10)) 1135#define K(x) ((x) << (PAGE_SHIFT-10))
1129/** 1136/**
1130 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1137 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1169,11 +1176,11 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1169 pr_cont_cgroup_path(iter->css.cgroup); 1176 pr_cont_cgroup_path(iter->css.cgroup);
1170 pr_cont(":"); 1177 pr_cont(":");
1171 1178
1172 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1179 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1173 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1174 continue; 1181 continue;
1175 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1182 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1176 K(mem_cgroup_read_stat(iter, i))); 1183 K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
1177 } 1184 }
1178 1185
1179 for (i = 0; i < NR_LRU_LISTS; i++) 1186 for (i = 0; i < NR_LRU_LISTS; i++)
@@ -2362,7 +2369,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2362 for (i = 1; i < HPAGE_PMD_NR; i++) 2369 for (i = 1; i < HPAGE_PMD_NR; i++)
2363 head[i].mem_cgroup = head->mem_cgroup; 2370 head[i].mem_cgroup = head->mem_cgroup;
2364 2371
2365 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2372 __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
2366 HPAGE_PMD_NR); 2373 HPAGE_PMD_NR);
2367} 2374}
2368#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2375#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -2372,7 +2379,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2372 bool charge) 2379 bool charge)
2373{ 2380{
2374 int val = (charge) ? 1 : -1; 2381 int val = (charge) ? 1 : -1;
2375 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2382 this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
2376} 2383}
2377 2384
2378/** 2385/**
@@ -2731,13 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2731 struct mem_cgroup *iter; 2738 struct mem_cgroup *iter;
2732 2739
2733 for_each_mem_cgroup_tree(iter, memcg) { 2740 for_each_mem_cgroup_tree(iter, memcg) {
2734 val += mem_cgroup_read_stat(iter, 2741 val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
2735 MEM_CGROUP_STAT_CACHE); 2742 val += mem_cgroup_read_stat(iter, MEMCG_RSS);
2736 val += mem_cgroup_read_stat(iter,
2737 MEM_CGROUP_STAT_RSS);
2738 if (swap) 2743 if (swap)
2739 val += mem_cgroup_read_stat(iter, 2744 val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
2740 MEM_CGROUP_STAT_SWAP);
2741 } 2745 }
2742 } else { 2746 } else {
2743 if (!swap) 2747 if (!swap)
@@ -3134,15 +3138,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3134 struct mem_cgroup *mi; 3138 struct mem_cgroup *mi;
3135 unsigned int i; 3139 unsigned int i;
3136 3140
3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3141 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3138 MEM_CGROUP_STAT_NSTATS);
3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140 3143
3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3144 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3143 continue; 3146 continue;
3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3148 mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
3149 PAGE_SIZE);
3146 } 3150 }
3147 3151
3148 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -3165,14 +3169,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3165 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3169 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE); 3170 (u64)memsw * PAGE_SIZE);
3167 3171
3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3172 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3169 unsigned long long val = 0; 3173 unsigned long long val = 0;
3170 3174
3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3172 continue; 3176 continue;
3173 for_each_mem_cgroup_tree(mi, memcg) 3177 for_each_mem_cgroup_tree(mi, memcg)
3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3178 val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3179 PAGE_SIZE;
3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3176 } 3181 }
3177 3182
3178 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) { 3183 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
@@ -3645,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3645 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3646 struct mem_cgroup *parent; 3651 struct mem_cgroup *parent;
3647 3652
3648 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3653 *pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
3649 3654
3650 /* this should eventually include NR_UNSTABLE_NFS */ 3655 /* this should eventually include NR_UNSTABLE_NFS */
3651 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3656 *pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
3652 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3653 (1 << LRU_ACTIVE_FILE)); 3658 (1 << LRU_ACTIVE_FILE));
3654 *pheadroom = PAGE_COUNTER_MAX; 3659 *pheadroom = PAGE_COUNTER_MAX;
@@ -4504,10 +4509,8 @@ static int mem_cgroup_move_account(struct page *page,
4504 spin_lock_irqsave(&from->move_lock, flags); 4509 spin_lock_irqsave(&from->move_lock, flags);
4505 4510
4506 if (!anon && page_mapped(page)) { 4511 if (!anon && page_mapped(page)) {
4507 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4512 __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
4508 nr_pages); 4513 __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
4509 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4510 nr_pages);
4511 } 4514 }
4512 4515
4513 /* 4516 /*
@@ -4519,18 +4522,16 @@ static int mem_cgroup_move_account(struct page *page,
4519 struct address_space *mapping = page_mapping(page); 4522 struct address_space *mapping = page_mapping(page);
4520 4523
4521 if (mapping_cap_account_dirty(mapping)) { 4524 if (mapping_cap_account_dirty(mapping)) {
4522 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4525 __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
4523 nr_pages); 4526 nr_pages);
4524 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4527 __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
4525 nr_pages); 4528 nr_pages);
4526 } 4529 }
4527 } 4530 }
4528 4531
4529 if (PageWriteback(page)) { 4532 if (PageWriteback(page)) {
4530 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4533 __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
4531 nr_pages); 4534 __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
4532 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4533 nr_pages);
4534 } 4535 }
4535 4536
4536 /* 4537 /*
@@ -5190,9 +5191,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5190 tree_events(memcg, events); 5191 tree_events(memcg, events);
5191 5192
5192 seq_printf(m, "anon %llu\n", 5193 seq_printf(m, "anon %llu\n",
5193 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5194 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5194 seq_printf(m, "file %llu\n", 5195 seq_printf(m, "file %llu\n",
5195 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5196 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5196 seq_printf(m, "kernel_stack %llu\n", 5197 seq_printf(m, "kernel_stack %llu\n",
5197 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5198 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5198 seq_printf(m, "slab %llu\n", 5199 seq_printf(m, "slab %llu\n",
@@ -5202,13 +5203,13 @@ static int memory_stat_show(struct seq_file *m, void *v)
5202 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5203 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5203 5204
5204 seq_printf(m, "shmem %llu\n", 5205 seq_printf(m, "shmem %llu\n",
5205 (u64)stat[MEM_CGROUP_STAT_SHMEM] * PAGE_SIZE); 5206 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5206 seq_printf(m, "file_mapped %llu\n", 5207 seq_printf(m, "file_mapped %llu\n",
5207 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5208 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5208 seq_printf(m, "file_dirty %llu\n", 5209 seq_printf(m, "file_dirty %llu\n",
5209 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5210 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5210 seq_printf(m, "file_writeback %llu\n", 5211 seq_printf(m, "file_writeback %llu\n",
5211 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5212 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5212 5213
5213 for (i = 0; i < NR_LRU_LISTS; i++) { 5214 for (i = 0; i < NR_LRU_LISTS; i++) {
5214 struct mem_cgroup *mi; 5215 struct mem_cgroup *mi;
@@ -5231,11 +5232,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
5231 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); 5232 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5232 5233
5233 seq_printf(m, "workingset_refault %lu\n", 5234 seq_printf(m, "workingset_refault %lu\n",
5234 stat[MEMCG_WORKINGSET_REFAULT]); 5235 stat[WORKINGSET_REFAULT]);
5235 seq_printf(m, "workingset_activate %lu\n", 5236 seq_printf(m, "workingset_activate %lu\n",
5236 stat[MEMCG_WORKINGSET_ACTIVATE]); 5237 stat[WORKINGSET_ACTIVATE]);
5237 seq_printf(m, "workingset_nodereclaim %lu\n", 5238 seq_printf(m, "workingset_nodereclaim %lu\n",
5238 stat[MEMCG_WORKINGSET_NODERECLAIM]); 5239 stat[WORKINGSET_NODERECLAIM]);
5239 5240
5240 return 0; 5241 return 0;
5241} 5242}
@@ -5492,10 +5493,10 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5492 } 5493 }
5493 5494
5494 local_irq_save(flags); 5495 local_irq_save(flags);
5495 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5496 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
5496 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5497 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
5497 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5498 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
5498 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem); 5499 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
5499 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); 5500 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
5500 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5501 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5501 memcg_check_events(memcg, dummy_page); 5502 memcg_check_events(memcg, dummy_page);