aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c248
1 files changed, 127 insertions, 121 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2bd7541d7c11..ff73899af61a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -100,24 +100,7 @@ static bool do_memsw_account(void)
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101} 101}
102 102
103static const char * const mem_cgroup_stat_names[] = { 103static const char *const mem_cgroup_lru_names[] = {
104 "cache",
105 "rss",
106 "rss_huge",
107 "mapped_file",
108 "dirty",
109 "writeback",
110 "swap",
111};
112
113static const char * const mem_cgroup_events_names[] = {
114 "pgpgin",
115 "pgpgout",
116 "pgfault",
117 "pgmajfault",
118};
119
120static const char * const mem_cgroup_lru_names[] = {
121 "inactive_anon", 104 "inactive_anon",
122 "active_anon", 105 "active_anon",
123 "inactive_file", 106 "inactive_file",
@@ -568,32 +551,15 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
568 * common workload, threshold and synchronization as vmstat[] should be 551 * common workload, threshold and synchronization as vmstat[] should be
569 * implemented. 552 * implemented.
570 */ 553 */
571static unsigned long
572mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
573{
574 long val = 0;
575 int cpu;
576
577 /* Per-cpu values can be negative, use a signed accumulator */
578 for_each_possible_cpu(cpu)
579 val += per_cpu(memcg->stat->count[idx], cpu);
580 /*
581 * Summing races with updates, so val may be negative. Avoid exposing
582 * transient negative values.
583 */
584 if (val < 0)
585 val = 0;
586 return val;
587}
588 554
589static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 555static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
590 enum mem_cgroup_events_index idx) 556 enum memcg_event_item event)
591{ 557{
592 unsigned long val = 0; 558 unsigned long val = 0;
593 int cpu; 559 int cpu;
594 560
595 for_each_possible_cpu(cpu) 561 for_each_possible_cpu(cpu)
596 val += per_cpu(memcg->stat->events[idx], cpu); 562 val += per_cpu(memcg->stat->events[event], cpu);
597 return val; 563 return val;
598} 564}
599 565
@@ -606,23 +572,23 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
606 * counted as CACHE even if it's on ANON LRU. 572 * counted as CACHE even if it's on ANON LRU.
607 */ 573 */
608 if (PageAnon(page)) 574 if (PageAnon(page))
609 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 575 __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
610 nr_pages); 576 else {
611 else 577 __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
612 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 578 if (PageSwapBacked(page))
613 nr_pages); 579 __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
580 }
614 581
615 if (compound) { 582 if (compound) {
616 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 583 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
617 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 584 __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
618 nr_pages);
619 } 585 }
620 586
621 /* pagein of a big page is an event. So, ignore page size */ 587 /* pagein of a big page is an event. So, ignore page size */
622 if (nr_pages > 0) 588 if (nr_pages > 0)
623 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 589 __this_cpu_inc(memcg->stat->events[PGPGIN]);
624 else { 590 else {
625 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 591 __this_cpu_inc(memcg->stat->events[PGPGOUT]);
626 nr_pages = -nr_pages; /* for event */ 592 nr_pages = -nr_pages; /* for event */
627 } 593 }
628 594
@@ -1144,6 +1110,28 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1144 return false; 1110 return false;
1145} 1111}
1146 1112
1113unsigned int memcg1_stats[] = {
1114 MEMCG_CACHE,
1115 MEMCG_RSS,
1116 MEMCG_RSS_HUGE,
1117 NR_SHMEM,
1118 NR_FILE_MAPPED,
1119 NR_FILE_DIRTY,
1120 NR_WRITEBACK,
1121 MEMCG_SWAP,
1122};
1123
1124static const char *const memcg1_stat_names[] = {
1125 "cache",
1126 "rss",
1127 "rss_huge",
1128 "shmem",
1129 "mapped_file",
1130 "dirty",
1131 "writeback",
1132 "swap",
1133};
1134
1147#define K(x) ((x) << (PAGE_SHIFT-10)) 1135#define K(x) ((x) << (PAGE_SHIFT-10))
1148/** 1136/**
1149 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1137 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1188,11 +1176,11 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1188 pr_cont_cgroup_path(iter->css.cgroup); 1176 pr_cont_cgroup_path(iter->css.cgroup);
1189 pr_cont(":"); 1177 pr_cont(":");
1190 1178
1191 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1179 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1192 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1193 continue; 1181 continue;
1194 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1182 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1195 K(mem_cgroup_read_stat(iter, i))); 1183 K(memcg_page_state(iter, memcg1_stats[i])));
1196 } 1184 }
1197 1185
1198 for (i = 0; i < NR_LRU_LISTS; i++) 1186 for (i = 0; i < NR_LRU_LISTS; i++)
@@ -1837,7 +1825,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
1837 do { 1825 do {
1838 if (page_counter_read(&memcg->memory) <= memcg->high) 1826 if (page_counter_read(&memcg->memory) <= memcg->high)
1839 continue; 1827 continue;
1840 mem_cgroup_events(memcg, MEMCG_HIGH, 1); 1828 mem_cgroup_event(memcg, MEMCG_HIGH);
1841 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1829 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1842 } while ((memcg = parent_mem_cgroup(memcg))); 1830 } while ((memcg = parent_mem_cgroup(memcg)));
1843} 1831}
@@ -1928,7 +1916,7 @@ retry:
1928 if (!gfpflags_allow_blocking(gfp_mask)) 1916 if (!gfpflags_allow_blocking(gfp_mask))
1929 goto nomem; 1917 goto nomem;
1930 1918
1931 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 1919 mem_cgroup_event(mem_over_limit, MEMCG_MAX);
1932 1920
1933 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1921 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1934 gfp_mask, may_swap); 1922 gfp_mask, may_swap);
@@ -1971,7 +1959,7 @@ retry:
1971 if (fatal_signal_pending(current)) 1959 if (fatal_signal_pending(current))
1972 goto force; 1960 goto force;
1973 1961
1974 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 1962 mem_cgroup_event(mem_over_limit, MEMCG_OOM);
1975 1963
1976 mem_cgroup_oom(mem_over_limit, gfp_mask, 1964 mem_cgroup_oom(mem_over_limit, gfp_mask,
1977 get_order(nr_pages * PAGE_SIZE)); 1965 get_order(nr_pages * PAGE_SIZE));
@@ -2381,7 +2369,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2381 for (i = 1; i < HPAGE_PMD_NR; i++) 2369 for (i = 1; i < HPAGE_PMD_NR; i++)
2382 head[i].mem_cgroup = head->mem_cgroup; 2370 head[i].mem_cgroup = head->mem_cgroup;
2383 2371
2384 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2372 __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
2385 HPAGE_PMD_NR); 2373 HPAGE_PMD_NR);
2386} 2374}
2387#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2375#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -2391,7 +2379,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2391 bool charge) 2379 bool charge)
2392{ 2380{
2393 int val = (charge) ? 1 : -1; 2381 int val = (charge) ? 1 : -1;
2394 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2382 this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
2395} 2383}
2396 2384
2397/** 2385/**
@@ -2725,7 +2713,7 @@ static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2725 2713
2726 for_each_mem_cgroup_tree(iter, memcg) { 2714 for_each_mem_cgroup_tree(iter, memcg) {
2727 for (i = 0; i < MEMCG_NR_STAT; i++) 2715 for (i = 0; i < MEMCG_NR_STAT; i++)
2728 stat[i] += mem_cgroup_read_stat(iter, i); 2716 stat[i] += memcg_page_state(iter, i);
2729 } 2717 }
2730} 2718}
2731 2719
@@ -2738,7 +2726,7 @@ static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2738 2726
2739 for_each_mem_cgroup_tree(iter, memcg) { 2727 for_each_mem_cgroup_tree(iter, memcg) {
2740 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2728 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2741 events[i] += mem_cgroup_read_events(iter, i); 2729 events[i] += memcg_sum_events(iter, i);
2742 } 2730 }
2743} 2731}
2744 2732
@@ -2750,13 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2750 struct mem_cgroup *iter; 2738 struct mem_cgroup *iter;
2751 2739
2752 for_each_mem_cgroup_tree(iter, memcg) { 2740 for_each_mem_cgroup_tree(iter, memcg) {
2753 val += mem_cgroup_read_stat(iter, 2741 val += memcg_page_state(iter, MEMCG_CACHE);
2754 MEM_CGROUP_STAT_CACHE); 2742 val += memcg_page_state(iter, MEMCG_RSS);
2755 val += mem_cgroup_read_stat(iter,
2756 MEM_CGROUP_STAT_RSS);
2757 if (swap) 2743 if (swap)
2758 val += mem_cgroup_read_stat(iter, 2744 val += memcg_page_state(iter, MEMCG_SWAP);
2759 MEM_CGROUP_STAT_SWAP);
2760 } 2745 }
2761 } else { 2746 } else {
2762 if (!swap) 2747 if (!swap)
@@ -3131,6 +3116,21 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
3131} 3116}
3132#endif /* CONFIG_NUMA */ 3117#endif /* CONFIG_NUMA */
3133 3118
3119/* Universal VM events cgroup1 shows, original sort order */
3120unsigned int memcg1_events[] = {
3121 PGPGIN,
3122 PGPGOUT,
3123 PGFAULT,
3124 PGMAJFAULT,
3125};
3126
3127static const char *const memcg1_event_names[] = {
3128 "pgpgin",
3129 "pgpgout",
3130 "pgfault",
3131 "pgmajfault",
3132};
3133
3134static int memcg_stat_show(struct seq_file *m, void *v) 3134static int memcg_stat_show(struct seq_file *m, void *v)
3135{ 3135{
3136 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3136 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -3138,22 +3138,20 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3138 struct mem_cgroup *mi; 3138 struct mem_cgroup *mi;
3139 unsigned int i; 3139 unsigned int i;
3140 3140
3141 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3141 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3142 MEM_CGROUP_STAT_NSTATS);
3143 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3144 MEM_CGROUP_EVENTS_NSTATS);
3145 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3146 3143
3147 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3144 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3148 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3149 continue; 3146 continue;
3150 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3151 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3148 memcg_page_state(memcg, memcg1_stats[i]) *
3149 PAGE_SIZE);
3152 } 3150 }
3153 3151
3154 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3155 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3153 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3156 mem_cgroup_read_events(memcg, i)); 3154 memcg_sum_events(memcg, memcg1_events[i]));
3157 3155
3158 for (i = 0; i < NR_LRU_LISTS; i++) 3156 for (i = 0; i < NR_LRU_LISTS; i++)
3159 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3157 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3171,23 +3169,23 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3171 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3169 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3172 (u64)memsw * PAGE_SIZE); 3170 (u64)memsw * PAGE_SIZE);
3173 3171
3174 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3172 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3175 unsigned long long val = 0; 3173 unsigned long long val = 0;
3176 3174
3177 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3178 continue; 3176 continue;
3179 for_each_mem_cgroup_tree(mi, memcg) 3177 for_each_mem_cgroup_tree(mi, memcg)
3180 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3178 val += memcg_page_state(mi, memcg1_stats[i]) *
3181 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3179 PAGE_SIZE;
3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3182 } 3181 }
3183 3182
3184 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3183 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3185 unsigned long long val = 0; 3184 unsigned long long val = 0;
3186 3185
3187 for_each_mem_cgroup_tree(mi, memcg) 3186 for_each_mem_cgroup_tree(mi, memcg)
3188 val += mem_cgroup_read_events(mi, i); 3187 val += memcg_sum_events(mi, memcg1_events[i]);
3189 seq_printf(m, "total_%s %llu\n", 3188 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3190 mem_cgroup_events_names[i], val);
3191 } 3189 }
3192 3190
3193 for (i = 0; i < NR_LRU_LISTS; i++) { 3191 for (i = 0; i < NR_LRU_LISTS; i++) {
@@ -3652,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3652 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3653 struct mem_cgroup *parent; 3651 struct mem_cgroup *parent;
3654 3652
3655 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3653 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3656 3654
3657 /* this should eventually include NR_UNSTABLE_NFS */ 3655 /* this should eventually include NR_UNSTABLE_NFS */
3658 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3656 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3659 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3660 (1 << LRU_ACTIVE_FILE)); 3658 (1 << LRU_ACTIVE_FILE));
3661 *pheadroom = PAGE_COUNTER_MAX; 3659 *pheadroom = PAGE_COUNTER_MAX;
@@ -4511,33 +4509,29 @@ static int mem_cgroup_move_account(struct page *page,
4511 spin_lock_irqsave(&from->move_lock, flags); 4509 spin_lock_irqsave(&from->move_lock, flags);
4512 4510
4513 if (!anon && page_mapped(page)) { 4511 if (!anon && page_mapped(page)) {
4514 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4512 __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
4515 nr_pages); 4513 __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
4516 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4517 nr_pages);
4518 } 4514 }
4519 4515
4520 /* 4516 /*
4521 * move_lock grabbed above and caller set from->moving_account, so 4517 * move_lock grabbed above and caller set from->moving_account, so
4522 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4518 * mod_memcg_page_state will serialize updates to PageDirty.
4523 * So mapping should be stable for dirty pages. 4519 * So mapping should be stable for dirty pages.
4524 */ 4520 */
4525 if (!anon && PageDirty(page)) { 4521 if (!anon && PageDirty(page)) {
4526 struct address_space *mapping = page_mapping(page); 4522 struct address_space *mapping = page_mapping(page);
4527 4523
4528 if (mapping_cap_account_dirty(mapping)) { 4524 if (mapping_cap_account_dirty(mapping)) {
4529 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4525 __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
4530 nr_pages); 4526 nr_pages);
4531 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4527 __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
4532 nr_pages); 4528 nr_pages);
4533 } 4529 }
4534 } 4530 }
4535 4531
4536 if (PageWriteback(page)) { 4532 if (PageWriteback(page)) {
4537 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4533 __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
4538 nr_pages); 4534 __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
4539 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4540 nr_pages);
4541 } 4535 }
4542 4536
4543 /* 4537 /*
@@ -5154,7 +5148,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
5154 continue; 5148 continue;
5155 } 5149 }
5156 5150
5157 mem_cgroup_events(memcg, MEMCG_OOM, 1); 5151 mem_cgroup_event(memcg, MEMCG_OOM);
5158 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5152 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5159 break; 5153 break;
5160 } 5154 }
@@ -5167,10 +5161,10 @@ static int memory_events_show(struct seq_file *m, void *v)
5167{ 5161{
5168 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5162 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5169 5163
5170 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5164 seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
5171 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5165 seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
5172 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5166 seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
5173 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5167 seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
5174 5168
5175 return 0; 5169 return 0;
5176} 5170}
@@ -5197,9 +5191,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5197 tree_events(memcg, events); 5191 tree_events(memcg, events);
5198 5192
5199 seq_printf(m, "anon %llu\n", 5193 seq_printf(m, "anon %llu\n",
5200 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5194 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5201 seq_printf(m, "file %llu\n", 5195 seq_printf(m, "file %llu\n",
5202 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5196 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5203 seq_printf(m, "kernel_stack %llu\n", 5197 seq_printf(m, "kernel_stack %llu\n",
5204 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5198 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5205 seq_printf(m, "slab %llu\n", 5199 seq_printf(m, "slab %llu\n",
@@ -5208,12 +5202,14 @@ static int memory_stat_show(struct seq_file *m, void *v)
5208 seq_printf(m, "sock %llu\n", 5202 seq_printf(m, "sock %llu\n",
5209 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5203 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5210 5204
5205 seq_printf(m, "shmem %llu\n",
5206 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5211 seq_printf(m, "file_mapped %llu\n", 5207 seq_printf(m, "file_mapped %llu\n",
5212 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5208 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5213 seq_printf(m, "file_dirty %llu\n", 5209 seq_printf(m, "file_dirty %llu\n",
5214 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5210 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5215 seq_printf(m, "file_writeback %llu\n", 5211 seq_printf(m, "file_writeback %llu\n",
5216 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5212 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5217 5213
5218 for (i = 0; i < NR_LRU_LISTS; i++) { 5214 for (i = 0; i < NR_LRU_LISTS; i++) {
5219 struct mem_cgroup *mi; 5215 struct mem_cgroup *mi;
@@ -5232,10 +5228,15 @@ static int memory_stat_show(struct seq_file *m, void *v)
5232 5228
5233 /* Accumulated memory events */ 5229 /* Accumulated memory events */
5234 5230
5235 seq_printf(m, "pgfault %lu\n", 5231 seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5236 events[MEM_CGROUP_EVENTS_PGFAULT]); 5232 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5237 seq_printf(m, "pgmajfault %lu\n", 5233
5238 events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 5234 seq_printf(m, "workingset_refault %lu\n",
5235 stat[WORKINGSET_REFAULT]);
5236 seq_printf(m, "workingset_activate %lu\n",
5237 stat[WORKINGSET_ACTIVATE]);
5238 seq_printf(m, "workingset_nodereclaim %lu\n",
5239 stat[WORKINGSET_NODERECLAIM]);
5239 5240
5240 return 0; 5241 return 0;
5241} 5242}
@@ -5476,8 +5477,8 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5476 5477
5477static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5478static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5478 unsigned long nr_anon, unsigned long nr_file, 5479 unsigned long nr_anon, unsigned long nr_file,
5479 unsigned long nr_huge, unsigned long nr_kmem, 5480 unsigned long nr_kmem, unsigned long nr_huge,
5480 struct page *dummy_page) 5481 unsigned long nr_shmem, struct page *dummy_page)
5481{ 5482{
5482 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5483 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5483 unsigned long flags; 5484 unsigned long flags;
@@ -5492,10 +5493,11 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5492 } 5493 }
5493 5494
5494 local_irq_save(flags); 5495 local_irq_save(flags);
5495 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5496 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
5496 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5497 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
5497 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5498 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
5498 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5499 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
5500 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
5499 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5501 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5500 memcg_check_events(memcg, dummy_page); 5502 memcg_check_events(memcg, dummy_page);
5501 local_irq_restore(flags); 5503 local_irq_restore(flags);
@@ -5507,6 +5509,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5507static void uncharge_list(struct list_head *page_list) 5509static void uncharge_list(struct list_head *page_list)
5508{ 5510{
5509 struct mem_cgroup *memcg = NULL; 5511 struct mem_cgroup *memcg = NULL;
5512 unsigned long nr_shmem = 0;
5510 unsigned long nr_anon = 0; 5513 unsigned long nr_anon = 0;
5511 unsigned long nr_file = 0; 5514 unsigned long nr_file = 0;
5512 unsigned long nr_huge = 0; 5515 unsigned long nr_huge = 0;
@@ -5539,9 +5542,9 @@ static void uncharge_list(struct list_head *page_list)
5539 if (memcg != page->mem_cgroup) { 5542 if (memcg != page->mem_cgroup) {
5540 if (memcg) { 5543 if (memcg) {
5541 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5544 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5542 nr_huge, nr_kmem, page); 5545 nr_kmem, nr_huge, nr_shmem, page);
5543 pgpgout = nr_anon = nr_file = 5546 pgpgout = nr_anon = nr_file = nr_kmem = 0;
5544 nr_huge = nr_kmem = 0; 5547 nr_huge = nr_shmem = 0;
5545 } 5548 }
5546 memcg = page->mem_cgroup; 5549 memcg = page->mem_cgroup;
5547 } 5550 }
@@ -5555,8 +5558,11 @@ static void uncharge_list(struct list_head *page_list)
5555 } 5558 }
5556 if (PageAnon(page)) 5559 if (PageAnon(page))
5557 nr_anon += nr_pages; 5560 nr_anon += nr_pages;
5558 else 5561 else {
5559 nr_file += nr_pages; 5562 nr_file += nr_pages;
5563 if (PageSwapBacked(page))
5564 nr_shmem += nr_pages;
5565 }
5560 pgpgout++; 5566 pgpgout++;
5561 } else { 5567 } else {
5562 nr_kmem += 1 << compound_order(page); 5568 nr_kmem += 1 << compound_order(page);
@@ -5568,7 +5574,7 @@ static void uncharge_list(struct list_head *page_list)
5568 5574
5569 if (memcg) 5575 if (memcg)
5570 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5576 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5571 nr_huge, nr_kmem, page); 5577 nr_kmem, nr_huge, nr_shmem, page);
5572} 5578}
5573 5579
5574/** 5580/**