aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-03-17 17:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit72b54e7314a2e7a68567c92bbb32fe2598a3c783 (patch)
tree3ab699bd18fc1c3b27224620b21a3800ed567402 /mm
parentfcff7d7eebe6d31e2ce20d994555c86a90197034 (diff)
mm: memcontrol: make tree_{stat,events} fetch all stats
Currently, tree_{stat,events} helpers can only get one stat index at a time, so when there are a lot of stats to be reported one has to call it over and over again (see memory_stat_show). This is neither effective, nor does it look good. Instead, let's make these helpers take a snapshot of all available counters. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c67
1 files changed, 39 insertions, 28 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5c9d45e4c739..430266071c36 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2717,39 +2717,48 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2717 return retval; 2717 return retval;
2718} 2718}
2719 2719
2720static unsigned long tree_stat(struct mem_cgroup *memcg, 2720static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2721 enum mem_cgroup_stat_index idx)
2722{ 2721{
2723 struct mem_cgroup *iter; 2722 struct mem_cgroup *iter;
2724 unsigned long val = 0; 2723 int i;
2725 2724
2726 for_each_mem_cgroup_tree(iter, memcg) 2725 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2727 val += mem_cgroup_read_stat(iter, idx);
2728 2726
2729 return val; 2727 for_each_mem_cgroup_tree(iter, memcg) {
2728 for (i = 0; i < MEMCG_NR_STAT; i++)
2729 stat[i] += mem_cgroup_read_stat(iter, i);
2730 }
2730} 2731}
2731 2732
2732static unsigned long tree_events(struct mem_cgroup *memcg, 2733static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2733 enum mem_cgroup_events_index idx)
2734{ 2734{
2735 struct mem_cgroup *iter; 2735 struct mem_cgroup *iter;
2736 unsigned long val = 0; 2736 int i;
2737 2737
2738 for_each_mem_cgroup_tree(iter, memcg) 2738 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2739 val += mem_cgroup_read_events(iter, idx);
2740 2739
2741 return val; 2740 for_each_mem_cgroup_tree(iter, memcg) {
2741 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2742 events[i] += mem_cgroup_read_events(iter, i);
2743 }
2742} 2744}
2743 2745
2744static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2746static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2745{ 2747{
2746 unsigned long val; 2748 unsigned long val = 0;
2747 2749
2748 if (mem_cgroup_is_root(memcg)) { 2750 if (mem_cgroup_is_root(memcg)) {
2749 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); 2751 struct mem_cgroup *iter;
2750 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); 2752
2751 if (swap) 2753 for_each_mem_cgroup_tree(iter, memcg) {
2752 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); 2754 val += mem_cgroup_read_stat(iter,
2755 MEM_CGROUP_STAT_CACHE);
2756 val += mem_cgroup_read_stat(iter,
2757 MEM_CGROUP_STAT_RSS);
2758 if (swap)
2759 val += mem_cgroup_read_stat(iter,
2760 MEM_CGROUP_STAT_SWAP);
2761 }
2753 } else { 2762 } else {
2754 if (!swap) 2763 if (!swap)
2755 val = page_counter_read(&memcg->memory); 2764 val = page_counter_read(&memcg->memory);
@@ -5075,6 +5084,8 @@ static int memory_events_show(struct seq_file *m, void *v)
5075static int memory_stat_show(struct seq_file *m, void *v) 5084static int memory_stat_show(struct seq_file *m, void *v)
5076{ 5085{
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5086 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5087 unsigned long stat[MEMCG_NR_STAT];
5088 unsigned long events[MEMCG_NR_EVENTS];
5078 int i; 5089 int i;
5079 5090
5080 /* 5091 /*
@@ -5088,22 +5099,22 @@ static int memory_stat_show(struct seq_file *m, void *v)
5088 * Current memory state: 5099 * Current memory state:
5089 */ 5100 */
5090 5101
5102 tree_stat(memcg, stat);
5103 tree_events(memcg, events);
5104
5091 seq_printf(m, "anon %llu\n", 5105 seq_printf(m, "anon %llu\n",
5092 (u64)tree_stat(memcg, MEM_CGROUP_STAT_RSS) * PAGE_SIZE); 5106 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5093 seq_printf(m, "file %llu\n", 5107 seq_printf(m, "file %llu\n",
5094 (u64)tree_stat(memcg, MEM_CGROUP_STAT_CACHE) * PAGE_SIZE); 5108 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5095 seq_printf(m, "sock %llu\n", 5109 seq_printf(m, "sock %llu\n",
5096 (u64)tree_stat(memcg, MEMCG_SOCK) * PAGE_SIZE); 5110 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5097 5111
5098 seq_printf(m, "file_mapped %llu\n", 5112 seq_printf(m, "file_mapped %llu\n",
5099 (u64)tree_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED) * 5113 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5100 PAGE_SIZE);
5101 seq_printf(m, "file_dirty %llu\n", 5114 seq_printf(m, "file_dirty %llu\n",
5102 (u64)tree_stat(memcg, MEM_CGROUP_STAT_DIRTY) * 5115 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5103 PAGE_SIZE);
5104 seq_printf(m, "file_writeback %llu\n", 5116 seq_printf(m, "file_writeback %llu\n",
5105 (u64)tree_stat(memcg, MEM_CGROUP_STAT_WRITEBACK) * 5117 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5106 PAGE_SIZE);
5107 5118
5108 for (i = 0; i < NR_LRU_LISTS; i++) { 5119 for (i = 0; i < NR_LRU_LISTS; i++) {
5109 struct mem_cgroup *mi; 5120 struct mem_cgroup *mi;
@@ -5118,9 +5129,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5118 /* Accumulated memory events */ 5129 /* Accumulated memory events */
5119 5130
5120 seq_printf(m, "pgfault %lu\n", 5131 seq_printf(m, "pgfault %lu\n",
5121 tree_events(memcg, MEM_CGROUP_EVENTS_PGFAULT)); 5132 events[MEM_CGROUP_EVENTS_PGFAULT]);
5122 seq_printf(m, "pgmajfault %lu\n", 5133 seq_printf(m, "pgmajfault %lu\n",
5123 tree_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT)); 5134 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5124 5135
5125 return 0; 5136 return 0;
5126} 5137}