diff options
| author | Johannes Weiner <hannes@cmpxchg.org> | 2017-05-03 17:55:10 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 18:52:11 -0400 |
| commit | df0e53d0619e83b465e363c088bf4eeb2848273b (patch) | |
| tree | 5e5d55be503881f94f90f2fbdc039556668b4991 /mm/memcontrol.c | |
| parent | 31176c781508e4e35b1cc4ae2f0a5abd1f4ea689 (diff) | |
mm: memcontrol: re-use global VM event enum
The current duplication is a high-maintenance mess, and it's painful to
add new items.
This increases the size of the event array, but we'll eventually want
most of the VM events tracked on a per-cgroup basis anyway.
Link: http://lkml.kernel.org/r/20170404220148.28338-2-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 53 |
1 files changed, 28 insertions, 25 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ffa3ad201ea..6b42887e5f14 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -111,13 +111,6 @@ static const char * const mem_cgroup_stat_names[] = { | |||
| 111 | "swap", | 111 | "swap", |
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | static const char * const mem_cgroup_events_names[] = { | ||
| 115 | "pgpgin", | ||
| 116 | "pgpgout", | ||
| 117 | "pgfault", | ||
| 118 | "pgmajfault", | ||
| 119 | }; | ||
| 120 | |||
| 121 | static const char * const mem_cgroup_lru_names[] = { | 114 | static const char * const mem_cgroup_lru_names[] = { |
| 122 | "inactive_anon", | 115 | "inactive_anon", |
| 123 | "active_anon", | 116 | "active_anon", |
| @@ -571,13 +564,13 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) | |||
| 571 | */ | 564 | */ |
| 572 | 565 | ||
| 573 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, | 566 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, |
| 574 | enum mem_cgroup_events_index idx) | 567 | enum memcg_event_item event) |
| 575 | { | 568 | { |
| 576 | unsigned long val = 0; | 569 | unsigned long val = 0; |
| 577 | int cpu; | 570 | int cpu; |
| 578 | 571 | ||
| 579 | for_each_possible_cpu(cpu) | 572 | for_each_possible_cpu(cpu) |
| 580 | val += per_cpu(memcg->stat->events[idx], cpu); | 573 | val += per_cpu(memcg->stat->events[event], cpu); |
| 581 | return val; | 574 | return val; |
| 582 | } | 575 | } |
| 583 | 576 | ||
| @@ -608,9 +601,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, | |||
| 608 | 601 | ||
| 609 | /* pagein of a big page is an event. So, ignore page size */ | 602 | /* pagein of a big page is an event. So, ignore page size */ |
| 610 | if (nr_pages > 0) | 603 | if (nr_pages > 0) |
| 611 | __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); | 604 | __this_cpu_inc(memcg->stat->events[PGPGIN]); |
| 612 | else { | 605 | else { |
| 613 | __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); | 606 | __this_cpu_inc(memcg->stat->events[PGPGOUT]); |
| 614 | nr_pages = -nr_pages; /* for event */ | 607 | nr_pages = -nr_pages; /* for event */ |
| 615 | } | 608 | } |
| 616 | 609 | ||
| @@ -3119,6 +3112,21 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) | |||
| 3119 | } | 3112 | } |
| 3120 | #endif /* CONFIG_NUMA */ | 3113 | #endif /* CONFIG_NUMA */ |
| 3121 | 3114 | ||
| 3115 | /* Universal VM events cgroup1 shows, original sort order */ | ||
| 3116 | unsigned int memcg1_events[] = { | ||
| 3117 | PGPGIN, | ||
| 3118 | PGPGOUT, | ||
| 3119 | PGFAULT, | ||
| 3120 | PGMAJFAULT, | ||
| 3121 | }; | ||
| 3122 | |||
| 3123 | static const char *const memcg1_event_names[] = { | ||
| 3124 | "pgpgin", | ||
| 3125 | "pgpgout", | ||
| 3126 | "pgfault", | ||
| 3127 | "pgmajfault", | ||
| 3128 | }; | ||
| 3129 | |||
| 3122 | static int memcg_stat_show(struct seq_file *m, void *v) | 3130 | static int memcg_stat_show(struct seq_file *m, void *v) |
| 3123 | { | 3131 | { |
| 3124 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | 3132 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); |
| @@ -3128,8 +3136,6 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
| 3128 | 3136 | ||
| 3129 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != | 3137 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != |
| 3130 | MEM_CGROUP_STAT_NSTATS); | 3138 | MEM_CGROUP_STAT_NSTATS); |
| 3131 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != | ||
| 3132 | MEM_CGROUP_EVENTS_NSTATS); | ||
| 3133 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); | 3139 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); |
| 3134 | 3140 | ||
| 3135 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 3141 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
| @@ -3139,9 +3145,9 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
| 3139 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); | 3145 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); |
| 3140 | } | 3146 | } |
| 3141 | 3147 | ||
| 3142 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) | 3148 | for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) |
| 3143 | seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], | 3149 | seq_printf(m, "%s %lu\n", memcg1_event_names[i], |
| 3144 | mem_cgroup_read_events(memcg, i)); | 3150 | mem_cgroup_read_events(memcg, memcg1_events[i])); |
| 3145 | 3151 | ||
| 3146 | for (i = 0; i < NR_LRU_LISTS; i++) | 3152 | for (i = 0; i < NR_LRU_LISTS; i++) |
| 3147 | seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], | 3153 | seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], |
| @@ -3169,13 +3175,12 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
| 3169 | seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); | 3175 | seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); |
| 3170 | } | 3176 | } |
| 3171 | 3177 | ||
| 3172 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { | 3178 | for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) { |
| 3173 | unsigned long long val = 0; | 3179 | unsigned long long val = 0; |
| 3174 | 3180 | ||
| 3175 | for_each_mem_cgroup_tree(mi, memcg) | 3181 | for_each_mem_cgroup_tree(mi, memcg) |
| 3176 | val += mem_cgroup_read_events(mi, i); | 3182 | val += mem_cgroup_read_events(mi, memcg1_events[i]); |
| 3177 | seq_printf(m, "total_%s %llu\n", | 3183 | seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val); |
| 3178 | mem_cgroup_events_names[i], val); | ||
| 3179 | } | 3184 | } |
| 3180 | 3185 | ||
| 3181 | for (i = 0; i < NR_LRU_LISTS; i++) { | 3186 | for (i = 0; i < NR_LRU_LISTS; i++) { |
| @@ -5222,10 +5227,8 @@ static int memory_stat_show(struct seq_file *m, void *v) | |||
| 5222 | 5227 | ||
| 5223 | /* Accumulated memory events */ | 5228 | /* Accumulated memory events */ |
| 5224 | 5229 | ||
| 5225 | seq_printf(m, "pgfault %lu\n", | 5230 | seq_printf(m, "pgfault %lu\n", events[PGFAULT]); |
| 5226 | events[MEM_CGROUP_EVENTS_PGFAULT]); | 5231 | seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); |
| 5227 | seq_printf(m, "pgmajfault %lu\n", | ||
| 5228 | events[MEM_CGROUP_EVENTS_PGMAJFAULT]); | ||
| 5229 | 5232 | ||
| 5230 | seq_printf(m, "workingset_refault %lu\n", | 5233 | seq_printf(m, "workingset_refault %lu\n", |
| 5231 | stat[MEMCG_WORKINGSET_REFAULT]); | 5234 | stat[MEMCG_WORKINGSET_REFAULT]); |
| @@ -5493,7 +5496,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, | |||
| 5493 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); | 5496 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); |
| 5494 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); | 5497 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); |
| 5495 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem); | 5498 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem); |
| 5496 | __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); | 5499 | __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); |
| 5497 | __this_cpu_add(memcg->stat->nr_page_events, nr_pages); | 5500 | __this_cpu_add(memcg->stat->nr_page_events, nr_pages); |
| 5498 | memcg_check_events(memcg, dummy_page); | 5501 | memcg_check_events(memcg, dummy_page); |
| 5499 | local_irq_restore(flags); | 5502 | local_irq_restore(flags); |
