aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h100
-rw-r--r--mm/memcontrol.c135
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/workingset.c7
6 files changed, 123 insertions, 138 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0bb5f055bd26..0fa1f5de6841 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,40 +35,45 @@ struct page;
35struct mm_struct; 35struct mm_struct;
36struct kmem_cache; 36struct kmem_cache;
37 37
38/* 38/* Cgroup-specific page state, on top of universal node page state */
39 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, 39enum memcg_stat_item {
40 * These two lists should keep in accord with each other. 40 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
41 */ 41 MEMCG_RSS,
42enum mem_cgroup_stat_index { 42 MEMCG_RSS_HUGE,
43 /* 43 MEMCG_SWAP,
44 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 44 MEMCG_SOCK,
45 */ 45 /* XXX: why are these zone and not node counters? */
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 46 MEMCG_KERNEL_STACK_KB,
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
48 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
49 MEM_CGROUP_STAT_SHMEM, /* # of pages charged as shmem */
50 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
51 MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
52 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
53 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
54 MEM_CGROUP_STAT_NSTATS,
55 /* default hierarchy stats */
56 MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
57 MEMCG_SLAB_RECLAIMABLE, 47 MEMCG_SLAB_RECLAIMABLE,
58 MEMCG_SLAB_UNRECLAIMABLE, 48 MEMCG_SLAB_UNRECLAIMABLE,
59 MEMCG_SOCK,
60 MEMCG_WORKINGSET_REFAULT,
61 MEMCG_WORKINGSET_ACTIVATE,
62 MEMCG_WORKINGSET_NODERECLAIM,
63 MEMCG_NR_STAT, 49 MEMCG_NR_STAT,
64}; 50};
65 51
52/* Cgroup-specific events, on top of universal VM events */
53enum memcg_event_item {
54 MEMCG_LOW = NR_VM_EVENT_ITEMS,
55 MEMCG_HIGH,
56 MEMCG_MAX,
57 MEMCG_OOM,
58 MEMCG_NR_EVENTS,
59};
60
66struct mem_cgroup_reclaim_cookie { 61struct mem_cgroup_reclaim_cookie {
67 pg_data_t *pgdat; 62 pg_data_t *pgdat;
68 int priority; 63 int priority;
69 unsigned int generation; 64 unsigned int generation;
70}; 65};
71 66
67#ifdef CONFIG_MEMCG
68
69#define MEM_CGROUP_ID_SHIFT 16
70#define MEM_CGROUP_ID_MAX USHRT_MAX
71
72struct mem_cgroup_id {
73 int id;
74 atomic_t ref;
75};
76
72/* 77/*
73 * Per memcg event counter is incremented at every pagein/pageout. With THP, 78 * Per memcg event counter is incremented at every pagein/pageout. With THP,
74 * it will be incremated by the number of pages. This counter is used for 79 * it will be incremated by the number of pages. This counter is used for
@@ -82,25 +87,6 @@ enum mem_cgroup_events_target {
82 MEM_CGROUP_NTARGETS, 87 MEM_CGROUP_NTARGETS,
83}; 88};
84 89
85#ifdef CONFIG_MEMCG
86
87#define MEM_CGROUP_ID_SHIFT 16
88#define MEM_CGROUP_ID_MAX USHRT_MAX
89
90struct mem_cgroup_id {
91 int id;
92 atomic_t ref;
93};
94
95/* Cgroup-specific events, on top of universal VM events */
96enum memcg_event_item {
97 MEMCG_LOW = NR_VM_EVENT_ITEMS,
98 MEMCG_HIGH,
99 MEMCG_MAX,
100 MEMCG_OOM,
101 MEMCG_NR_EVENTS,
102};
103
104struct mem_cgroup_stat_cpu { 90struct mem_cgroup_stat_cpu {
105 long count[MEMCG_NR_STAT]; 91 long count[MEMCG_NR_STAT];
106 unsigned long events[MEMCG_NR_EVENTS]; 92 unsigned long events[MEMCG_NR_EVENTS];
@@ -487,7 +473,7 @@ void lock_page_memcg(struct page *page);
487void unlock_page_memcg(struct page *page); 473void unlock_page_memcg(struct page *page);
488 474
489static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, 475static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
490 enum mem_cgroup_stat_index idx) 476 enum memcg_stat_item idx)
491{ 477{
492 long val = 0; 478 long val = 0;
493 int cpu; 479 int cpu;
@@ -502,20 +488,20 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
502} 488}
503 489
504static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, 490static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
505 enum mem_cgroup_stat_index idx, int val) 491 enum memcg_stat_item idx, int val)
506{ 492{
507 if (!mem_cgroup_disabled()) 493 if (!mem_cgroup_disabled())
508 this_cpu_add(memcg->stat->count[idx], val); 494 this_cpu_add(memcg->stat->count[idx], val);
509} 495}
510 496
511static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, 497static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
512 enum mem_cgroup_stat_index idx) 498 enum memcg_stat_item idx)
513{ 499{
514 mem_cgroup_update_stat(memcg, idx, 1); 500 mem_cgroup_update_stat(memcg, idx, 1);
515} 501}
516 502
517static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, 503static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
518 enum mem_cgroup_stat_index idx) 504 enum memcg_stat_item idx)
519{ 505{
520 mem_cgroup_update_stat(memcg, idx, -1); 506 mem_cgroup_update_stat(memcg, idx, -1);
521} 507}
@@ -538,20 +524,20 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
538 * Kernel pages are an exception to this, since they'll never move. 524 * Kernel pages are an exception to this, since they'll never move.
539 */ 525 */
540static inline void mem_cgroup_update_page_stat(struct page *page, 526static inline void mem_cgroup_update_page_stat(struct page *page,
541 enum mem_cgroup_stat_index idx, int val) 527 enum memcg_stat_item idx, int val)
542{ 528{
543 if (page->mem_cgroup) 529 if (page->mem_cgroup)
544 mem_cgroup_update_stat(page->mem_cgroup, idx, val); 530 mem_cgroup_update_stat(page->mem_cgroup, idx, val);
545} 531}
546 532
547static inline void mem_cgroup_inc_page_stat(struct page *page, 533static inline void mem_cgroup_inc_page_stat(struct page *page,
548 enum mem_cgroup_stat_index idx) 534 enum memcg_stat_item idx)
549{ 535{
550 mem_cgroup_update_page_stat(page, idx, 1); 536 mem_cgroup_update_page_stat(page, idx, 1);
551} 537}
552 538
553static inline void mem_cgroup_dec_page_stat(struct page *page, 539static inline void mem_cgroup_dec_page_stat(struct page *page,
554 enum mem_cgroup_stat_index idx) 540 enum memcg_stat_item idx)
555{ 541{
556 mem_cgroup_update_page_stat(page, idx, -1); 542 mem_cgroup_update_page_stat(page, idx, -1);
557} 543}
@@ -760,33 +746,33 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
760} 746}
761 747
762static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, 748static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
763 enum mem_cgroup_stat_index idx, int val) 749 enum memcg_stat_item idx, int val)
764{ 750{
765} 751}
766 752
767static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, 753static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
768 enum mem_cgroup_stat_index idx) 754 enum memcg_stat_item idx)
769{ 755{
770} 756}
771 757
772static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, 758static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
773 enum mem_cgroup_stat_index idx) 759 enum memcg_stat_item idx)
774{ 760{
775} 761}
776 762
777static inline void mem_cgroup_update_page_stat(struct page *page, 763static inline void mem_cgroup_update_page_stat(struct page *page,
778 enum mem_cgroup_stat_index idx, 764 enum memcg_stat_item idx,
779 int nr) 765 int nr)
780{ 766{
781} 767}
782 768
783static inline void mem_cgroup_inc_page_stat(struct page *page, 769static inline void mem_cgroup_inc_page_stat(struct page *page,
784 enum mem_cgroup_stat_index idx) 770 enum memcg_stat_item idx)
785{ 771{
786} 772}
787 773
788static inline void mem_cgroup_dec_page_stat(struct page *page, 774static inline void mem_cgroup_dec_page_stat(struct page *page,
789 enum mem_cgroup_stat_index idx) 775 enum memcg_stat_item idx)
790{ 776{
791} 777}
792 778
@@ -906,7 +892,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
906 * @val: number of pages (positive or negative) 892 * @val: number of pages (positive or negative)
907 */ 893 */
908static inline void memcg_kmem_update_page_stat(struct page *page, 894static inline void memcg_kmem_update_page_stat(struct page *page,
909 enum mem_cgroup_stat_index idx, int val) 895 enum memcg_stat_item idx, int val)
910{ 896{
911 if (memcg_kmem_enabled() && page->mem_cgroup) 897 if (memcg_kmem_enabled() && page->mem_cgroup)
912 this_cpu_add(page->mem_cgroup->stat->count[idx], val); 898 this_cpu_add(page->mem_cgroup->stat->count[idx], val);
@@ -935,7 +921,7 @@ static inline void memcg_put_cache_ids(void)
935} 921}
936 922
937static inline void memcg_kmem_update_page_stat(struct page *page, 923static inline void memcg_kmem_update_page_stat(struct page *page,
938 enum mem_cgroup_stat_index idx, int val) 924 enum memcg_stat_item idx, int val)
939{ 925{
940} 926}
941#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 927#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6b42887e5f14..6fe4c7fafbfc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -100,18 +100,7 @@ static bool do_memsw_account(void)
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101} 101}
102 102
103static const char * const mem_cgroup_stat_names[] = { 103static const char *const mem_cgroup_lru_names[] = {
104 "cache",
105 "rss",
106 "rss_huge",
107 "shmem",
108 "mapped_file",
109 "dirty",
110 "writeback",
111 "swap",
112};
113
114static const char * const mem_cgroup_lru_names[] = {
115 "inactive_anon", 104 "inactive_anon",
116 "active_anon", 105 "active_anon",
117 "inactive_file", 106 "inactive_file",
@@ -583,20 +572,16 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
583 * counted as CACHE even if it's on ANON LRU. 572 * counted as CACHE even if it's on ANON LRU.
584 */ 573 */
585 if (PageAnon(page)) 574 if (PageAnon(page))
586 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 575 __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
587 nr_pages);
588 else { 576 else {
589 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 577 __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
590 nr_pages);
591 if (PageSwapBacked(page)) 578 if (PageSwapBacked(page))
592 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], 579 __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
593 nr_pages);
594 } 580 }
595 581
596 if (compound) { 582 if (compound) {
597 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 583 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
598 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 584 __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
599 nr_pages);
600 } 585 }
601 586
602 /* pagein of a big page is an event. So, ignore page size */ 587 /* pagein of a big page is an event. So, ignore page size */
@@ -1125,6 +1110,28 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1125 return false; 1110 return false;
1126} 1111}
1127 1112
1113unsigned int memcg1_stats[] = {
1114 MEMCG_CACHE,
1115 MEMCG_RSS,
1116 MEMCG_RSS_HUGE,
1117 NR_SHMEM,
1118 NR_FILE_MAPPED,
1119 NR_FILE_DIRTY,
1120 NR_WRITEBACK,
1121 MEMCG_SWAP,
1122};
1123
1124static const char *const memcg1_stat_names[] = {
1125 "cache",
1126 "rss",
1127 "rss_huge",
1128 "shmem",
1129 "mapped_file",
1130 "dirty",
1131 "writeback",
1132 "swap",
1133};
1134
1128#define K(x) ((x) << (PAGE_SHIFT-10)) 1135#define K(x) ((x) << (PAGE_SHIFT-10))
1129/** 1136/**
1130 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1137 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1169,11 +1176,11 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1169 pr_cont_cgroup_path(iter->css.cgroup); 1176 pr_cont_cgroup_path(iter->css.cgroup);
1170 pr_cont(":"); 1177 pr_cont(":");
1171 1178
1172 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1179 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1173 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1174 continue; 1181 continue;
1175 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1182 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1176 K(mem_cgroup_read_stat(iter, i))); 1183 K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
1177 } 1184 }
1178 1185
1179 for (i = 0; i < NR_LRU_LISTS; i++) 1186 for (i = 0; i < NR_LRU_LISTS; i++)
@@ -2362,7 +2369,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2362 for (i = 1; i < HPAGE_PMD_NR; i++) 2369 for (i = 1; i < HPAGE_PMD_NR; i++)
2363 head[i].mem_cgroup = head->mem_cgroup; 2370 head[i].mem_cgroup = head->mem_cgroup;
2364 2371
2365 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2372 __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
2366 HPAGE_PMD_NR); 2373 HPAGE_PMD_NR);
2367} 2374}
2368#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2375#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -2372,7 +2379,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2372 bool charge) 2379 bool charge)
2373{ 2380{
2374 int val = (charge) ? 1 : -1; 2381 int val = (charge) ? 1 : -1;
2375 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2382 this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
2376} 2383}
2377 2384
2378/** 2385/**
@@ -2731,13 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2731 struct mem_cgroup *iter; 2738 struct mem_cgroup *iter;
2732 2739
2733 for_each_mem_cgroup_tree(iter, memcg) { 2740 for_each_mem_cgroup_tree(iter, memcg) {
2734 val += mem_cgroup_read_stat(iter, 2741 val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
2735 MEM_CGROUP_STAT_CACHE); 2742 val += mem_cgroup_read_stat(iter, MEMCG_RSS);
2736 val += mem_cgroup_read_stat(iter,
2737 MEM_CGROUP_STAT_RSS);
2738 if (swap) 2743 if (swap)
2739 val += mem_cgroup_read_stat(iter, 2744 val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
2740 MEM_CGROUP_STAT_SWAP);
2741 } 2745 }
2742 } else { 2746 } else {
2743 if (!swap) 2747 if (!swap)
@@ -3134,15 +3138,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3134 struct mem_cgroup *mi; 3138 struct mem_cgroup *mi;
3135 unsigned int i; 3139 unsigned int i;
3136 3140
3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3141 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3138 MEM_CGROUP_STAT_NSTATS);
3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140 3143
3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3144 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3143 continue; 3146 continue;
3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3148 mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
3149 PAGE_SIZE);
3146 } 3150 }
3147 3151
3148 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -3165,14 +3169,15 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3165 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3169 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE); 3170 (u64)memsw * PAGE_SIZE);
3167 3171
3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3172 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3169 unsigned long long val = 0; 3173 unsigned long long val = 0;
3170 3174
3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) 3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3172 continue; 3176 continue;
3173 for_each_mem_cgroup_tree(mi, memcg) 3177 for_each_mem_cgroup_tree(mi, memcg)
3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3178 val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3179 PAGE_SIZE;
3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3176 } 3181 }
3177 3182
3178 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) { 3183 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
@@ -3645,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3645 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3646 struct mem_cgroup *parent; 3651 struct mem_cgroup *parent;
3647 3652
3648 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3653 *pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
3649 3654
3650 /* this should eventually include NR_UNSTABLE_NFS */ 3655 /* this should eventually include NR_UNSTABLE_NFS */
3651 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3656 *pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
3652 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3653 (1 << LRU_ACTIVE_FILE)); 3658 (1 << LRU_ACTIVE_FILE));
3654 *pheadroom = PAGE_COUNTER_MAX; 3659 *pheadroom = PAGE_COUNTER_MAX;
@@ -4504,10 +4509,8 @@ static int mem_cgroup_move_account(struct page *page,
4504 spin_lock_irqsave(&from->move_lock, flags); 4509 spin_lock_irqsave(&from->move_lock, flags);
4505 4510
4506 if (!anon && page_mapped(page)) { 4511 if (!anon && page_mapped(page)) {
4507 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4512 __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
4508 nr_pages); 4513 __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
4509 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4510 nr_pages);
4511 } 4514 }
4512 4515
4513 /* 4516 /*
@@ -4519,18 +4522,16 @@ static int mem_cgroup_move_account(struct page *page,
4519 struct address_space *mapping = page_mapping(page); 4522 struct address_space *mapping = page_mapping(page);
4520 4523
4521 if (mapping_cap_account_dirty(mapping)) { 4524 if (mapping_cap_account_dirty(mapping)) {
4522 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4525 __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
4523 nr_pages); 4526 nr_pages);
4524 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4527 __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
4525 nr_pages); 4528 nr_pages);
4526 } 4529 }
4527 } 4530 }
4528 4531
4529 if (PageWriteback(page)) { 4532 if (PageWriteback(page)) {
4530 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4533 __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
4531 nr_pages); 4534 __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
4532 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4533 nr_pages);
4534 } 4535 }
4535 4536
4536 /* 4537 /*
@@ -5190,9 +5191,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5190 tree_events(memcg, events); 5191 tree_events(memcg, events);
5191 5192
5192 seq_printf(m, "anon %llu\n", 5193 seq_printf(m, "anon %llu\n",
5193 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5194 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5194 seq_printf(m, "file %llu\n", 5195 seq_printf(m, "file %llu\n",
5195 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5196 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5196 seq_printf(m, "kernel_stack %llu\n", 5197 seq_printf(m, "kernel_stack %llu\n",
5197 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5198 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5198 seq_printf(m, "slab %llu\n", 5199 seq_printf(m, "slab %llu\n",
@@ -5202,13 +5203,13 @@ static int memory_stat_show(struct seq_file *m, void *v)
5202 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5203 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5203 5204
5204 seq_printf(m, "shmem %llu\n", 5205 seq_printf(m, "shmem %llu\n",
5205 (u64)stat[MEM_CGROUP_STAT_SHMEM] * PAGE_SIZE); 5206 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5206 seq_printf(m, "file_mapped %llu\n", 5207 seq_printf(m, "file_mapped %llu\n",
5207 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE); 5208 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5208 seq_printf(m, "file_dirty %llu\n", 5209 seq_printf(m, "file_dirty %llu\n",
5209 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE); 5210 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5210 seq_printf(m, "file_writeback %llu\n", 5211 seq_printf(m, "file_writeback %llu\n",
5211 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE); 5212 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5212 5213
5213 for (i = 0; i < NR_LRU_LISTS; i++) { 5214 for (i = 0; i < NR_LRU_LISTS; i++) {
5214 struct mem_cgroup *mi; 5215 struct mem_cgroup *mi;
@@ -5231,11 +5232,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
5231 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); 5232 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5232 5233
5233 seq_printf(m, "workingset_refault %lu\n", 5234 seq_printf(m, "workingset_refault %lu\n",
5234 stat[MEMCG_WORKINGSET_REFAULT]); 5235 stat[WORKINGSET_REFAULT]);
5235 seq_printf(m, "workingset_activate %lu\n", 5236 seq_printf(m, "workingset_activate %lu\n",
5236 stat[MEMCG_WORKINGSET_ACTIVATE]); 5237 stat[WORKINGSET_ACTIVATE]);
5237 seq_printf(m, "workingset_nodereclaim %lu\n", 5238 seq_printf(m, "workingset_nodereclaim %lu\n",
5238 stat[MEMCG_WORKINGSET_NODERECLAIM]); 5239 stat[WORKINGSET_NODERECLAIM]);
5239 5240
5240 return 0; 5241 return 0;
5241} 5242}
@@ -5492,10 +5493,10 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5492 } 5493 }
5493 5494
5494 local_irq_save(flags); 5495 local_irq_save(flags);
5495 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5496 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
5496 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5497 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
5497 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5498 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
5498 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem); 5499 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
5499 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); 5500 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
5500 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5501 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5501 memcg_check_events(memcg, dummy_page); 5502 memcg_check_events(memcg, dummy_page);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 33df0583edb9..777711203809 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2427,7 +2427,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2427 inode_attach_wb(inode, page); 2427 inode_attach_wb(inode, page);
2428 wb = inode_to_wb(inode); 2428 wb = inode_to_wb(inode);
2429 2429
2430 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2430 mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY);
2431 __inc_node_page_state(page, NR_FILE_DIRTY); 2431 __inc_node_page_state(page, NR_FILE_DIRTY);
2432 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2432 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2433 __inc_node_page_state(page, NR_DIRTIED); 2433 __inc_node_page_state(page, NR_DIRTIED);
@@ -2449,7 +2449,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2449 struct bdi_writeback *wb) 2449 struct bdi_writeback *wb)
2450{ 2450{
2451 if (mapping_cap_account_dirty(mapping)) { 2451 if (mapping_cap_account_dirty(mapping)) {
2452 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2452 mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
2453 dec_node_page_state(page, NR_FILE_DIRTY); 2453 dec_node_page_state(page, NR_FILE_DIRTY);
2454 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2454 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2455 dec_wb_stat(wb, WB_RECLAIMABLE); 2455 dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2706,7 +2706,7 @@ int clear_page_dirty_for_io(struct page *page)
2706 */ 2706 */
2707 wb = unlocked_inode_to_wb_begin(inode, &locked); 2707 wb = unlocked_inode_to_wb_begin(inode, &locked);
2708 if (TestClearPageDirty(page)) { 2708 if (TestClearPageDirty(page)) {
2709 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2709 mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
2710 dec_node_page_state(page, NR_FILE_DIRTY); 2710 dec_node_page_state(page, NR_FILE_DIRTY);
2711 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2711 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2712 dec_wb_stat(wb, WB_RECLAIMABLE); 2712 dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2753,7 +2753,7 @@ int test_clear_page_writeback(struct page *page)
2753 ret = TestClearPageWriteback(page); 2753 ret = TestClearPageWriteback(page);
2754 } 2754 }
2755 if (ret) { 2755 if (ret) {
2756 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2756 mem_cgroup_dec_page_stat(page, NR_WRITEBACK);
2757 dec_node_page_state(page, NR_WRITEBACK); 2757 dec_node_page_state(page, NR_WRITEBACK);
2758 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2758 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2759 inc_node_page_state(page, NR_WRITTEN); 2759 inc_node_page_state(page, NR_WRITTEN);
@@ -2808,7 +2808,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2808 ret = TestSetPageWriteback(page); 2808 ret = TestSetPageWriteback(page);
2809 } 2809 }
2810 if (!ret) { 2810 if (!ret) {
2811 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2811 mem_cgroup_inc_page_stat(page, NR_WRITEBACK);
2812 inc_node_page_state(page, NR_WRITEBACK); 2812 inc_node_page_state(page, NR_WRITEBACK);
2813 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2813 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2814 } 2814 }
diff --git a/mm/rmap.c b/mm/rmap.c
index e303fdbee561..a6d018c4a13a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1158,7 +1158,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1158 goto out; 1158 goto out;
1159 } 1159 }
1160 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1160 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1161 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr); 1161 mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr);
1162out: 1162out:
1163 unlock_page_memcg(page); 1163 unlock_page_memcg(page);
1164} 1164}
@@ -1198,7 +1198,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1198 * pte lock(a spinlock) is held, which implies preemption disabled. 1198 * pte lock(a spinlock) is held, which implies preemption disabled.
1199 */ 1199 */
1200 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1200 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1201 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr); 1201 mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr);
1202 1202
1203 if (unlikely(PageMlocked(page))) 1203 if (unlikely(PageMlocked(page)))
1204 clear_page_mlock(page); 1204 clear_page_mlock(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fbec74af2b69..417b6657e994 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2046,8 +2046,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2046 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); 2046 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2047 2047
2048 if (memcg) 2048 if (memcg)
2049 refaults = mem_cgroup_read_stat(memcg, 2049 refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE);
2050 MEMCG_WORKINGSET_ACTIVATE);
2051 else 2050 else
2052 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 2051 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2053 2052
@@ -2735,7 +2734,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
2735 2734
2736 if (memcg) 2735 if (memcg)
2737 refaults = mem_cgroup_read_stat(memcg, 2736 refaults = mem_cgroup_read_stat(memcg,
2738 MEMCG_WORKINGSET_ACTIVATE); 2737 WORKINGSET_ACTIVATE);
2739 else 2738 else
2740 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 2739 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2741 2740
diff --git a/mm/workingset.c b/mm/workingset.c
index 51c6f61d4cea..37fc1057cd86 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -289,11 +289,11 @@ bool workingset_refault(void *shadow)
289 refault_distance = (refault - eviction) & EVICTION_MASK; 289 refault_distance = (refault - eviction) & EVICTION_MASK;
290 290
291 inc_node_state(pgdat, WORKINGSET_REFAULT); 291 inc_node_state(pgdat, WORKINGSET_REFAULT);
292 mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_REFAULT); 292 mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT);
293 293
294 if (refault_distance <= active_file) { 294 if (refault_distance <= active_file) {
295 inc_node_state(pgdat, WORKINGSET_ACTIVATE); 295 inc_node_state(pgdat, WORKINGSET_ACTIVATE);
296 mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_ACTIVATE); 296 mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE);
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 return true; 298 return true;
299 } 299 }
@@ -475,8 +475,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
475 if (WARN_ON_ONCE(node->exceptional)) 475 if (WARN_ON_ONCE(node->exceptional))
476 goto out_invalid; 476 goto out_invalid;
477 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 477 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
478 mem_cgroup_inc_page_stat(virt_to_page(node), 478 mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM);
479 MEMCG_WORKINGSET_NODERECLAIM);
480 __radix_tree_delete_node(&mapping->page_tree, node, 479 __radix_tree_delete_node(&mapping->page_tree, node,
481 workingset_update_node, mapping); 480 workingset_update_node, mapping);
482 481