aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h73
-rw-r--r--mm/memcontrol.c38
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/workingset.c6
6 files changed, 68 insertions, 68 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0fa1f5de6841..899949bbb2f9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -472,8 +472,8 @@ extern int do_swap_account;
472void lock_page_memcg(struct page *page); 472void lock_page_memcg(struct page *page);
473void unlock_page_memcg(struct page *page); 473void unlock_page_memcg(struct page *page);
474 474
475static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, 475static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
476 enum memcg_stat_item idx) 476 enum memcg_stat_item idx)
477{ 477{
478 long val = 0; 478 long val = 0;
479 int cpu; 479 int cpu;
@@ -487,27 +487,27 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
487 return val; 487 return val;
488} 488}
489 489
490static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, 490static inline void mod_memcg_state(struct mem_cgroup *memcg,
491 enum memcg_stat_item idx, int val) 491 enum memcg_stat_item idx, int val)
492{ 492{
493 if (!mem_cgroup_disabled()) 493 if (!mem_cgroup_disabled())
494 this_cpu_add(memcg->stat->count[idx], val); 494 this_cpu_add(memcg->stat->count[idx], val);
495} 495}
496 496
497static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, 497static inline void inc_memcg_state(struct mem_cgroup *memcg,
498 enum memcg_stat_item idx) 498 enum memcg_stat_item idx)
499{ 499{
500 mem_cgroup_update_stat(memcg, idx, 1); 500 mod_memcg_state(memcg, idx, 1);
501} 501}
502 502
503static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, 503static inline void dec_memcg_state(struct mem_cgroup *memcg,
504 enum memcg_stat_item idx) 504 enum memcg_stat_item idx)
505{ 505{
506 mem_cgroup_update_stat(memcg, idx, -1); 506 mod_memcg_state(memcg, idx, -1);
507} 507}
508 508
509/** 509/**
510 * mem_cgroup_update_page_stat - update page state statistics 510 * mod_memcg_page_state - update page state statistics
511 * @page: the page 511 * @page: the page
512 * @idx: page state item to account 512 * @idx: page state item to account
513 * @val: number of pages (positive or negative) 513 * @val: number of pages (positive or negative)
@@ -518,28 +518,28 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
518 * 518 *
519 * lock_page(page) or lock_page_memcg(page) 519 * lock_page(page) or lock_page_memcg(page)
520 * if (TestClearPageState(page)) 520 * if (TestClearPageState(page))
521 * mem_cgroup_update_page_stat(page, state, -1); 521 * mod_memcg_page_state(page, state, -1);
522 * unlock_page(page) or unlock_page_memcg(page) 522 * unlock_page(page) or unlock_page_memcg(page)
523 * 523 *
524 * Kernel pages are an exception to this, since they'll never move. 524 * Kernel pages are an exception to this, since they'll never move.
525 */ 525 */
526static inline void mem_cgroup_update_page_stat(struct page *page, 526static inline void mod_memcg_page_state(struct page *page,
527 enum memcg_stat_item idx, int val) 527 enum memcg_stat_item idx, int val)
528{ 528{
529 if (page->mem_cgroup) 529 if (page->mem_cgroup)
530 mem_cgroup_update_stat(page->mem_cgroup, idx, val); 530 mod_memcg_state(page->mem_cgroup, idx, val);
531} 531}
532 532
533static inline void mem_cgroup_inc_page_stat(struct page *page, 533static inline void inc_memcg_page_state(struct page *page,
534 enum memcg_stat_item idx) 534 enum memcg_stat_item idx)
535{ 535{
536 mem_cgroup_update_page_stat(page, idx, 1); 536 mod_memcg_page_state(page, idx, 1);
537} 537}
538 538
539static inline void mem_cgroup_dec_page_stat(struct page *page, 539static inline void dec_memcg_page_state(struct page *page,
540 enum memcg_stat_item idx) 540 enum memcg_stat_item idx)
541{ 541{
542 mem_cgroup_update_page_stat(page, idx, -1); 542 mod_memcg_page_state(page, idx, -1);
543} 543}
544 544
545unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 545unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -739,40 +739,41 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
739 return false; 739 return false;
740} 740}
741 741
742static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, 742static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
743 enum mem_cgroup_stat_index idx) 743 enum memcg_stat_item idx)
744{ 744{
745 return 0; 745 return 0;
746} 746}
747 747
748static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, 748static inline void mod_memcg_state(struct mem_cgroup *memcg,
749 enum memcg_stat_item idx, int val) 749 enum memcg_stat_item idx,
750 int nr)
750{ 751{
751} 752}
752 753
753static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, 754static inline void inc_memcg_state(struct mem_cgroup *memcg,
754 enum memcg_stat_item idx) 755 enum memcg_stat_item idx)
755{ 756{
756} 757}
757 758
758static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, 759static inline void dec_memcg_state(struct mem_cgroup *memcg,
759 enum memcg_stat_item idx) 760 enum memcg_stat_item idx)
760{ 761{
761} 762}
762 763
763static inline void mem_cgroup_update_page_stat(struct page *page, 764static inline void mod_memcg_page_state(struct page *page,
764 enum memcg_stat_item idx, 765 enum memcg_stat_item idx,
765 int nr) 766 int nr)
766{ 767{
767} 768}
768 769
769static inline void mem_cgroup_inc_page_stat(struct page *page, 770static inline void inc_memcg_page_state(struct page *page,
770 enum memcg_stat_item idx) 771 enum memcg_stat_item idx)
771{ 772{
772} 773}
773 774
774static inline void mem_cgroup_dec_page_stat(struct page *page, 775static inline void dec_memcg_page_state(struct page *page,
775 enum memcg_stat_item idx) 776 enum memcg_stat_item idx)
776{ 777{
777} 778}
778 779
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6fe4c7fafbfc..ff73899af61a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -552,8 +552,8 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
552 * implemented. 552 * implemented.
553 */ 553 */
554 554
555static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 555static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
556 enum memcg_event_item event) 556 enum memcg_event_item event)
557{ 557{
558 unsigned long val = 0; 558 unsigned long val = 0;
559 int cpu; 559 int cpu;
@@ -1180,7 +1180,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1181 continue; 1181 continue;
1182 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1182 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1183 K(mem_cgroup_read_stat(iter, memcg1_stats[i]))); 1183 K(memcg_page_state(iter, memcg1_stats[i])));
1184 } 1184 }
1185 1185
1186 for (i = 0; i < NR_LRU_LISTS; i++) 1186 for (i = 0; i < NR_LRU_LISTS; i++)
@@ -2713,7 +2713,7 @@ static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2713 2713
2714 for_each_mem_cgroup_tree(iter, memcg) { 2714 for_each_mem_cgroup_tree(iter, memcg) {
2715 for (i = 0; i < MEMCG_NR_STAT; i++) 2715 for (i = 0; i < MEMCG_NR_STAT; i++)
2716 stat[i] += mem_cgroup_read_stat(iter, i); 2716 stat[i] += memcg_page_state(iter, i);
2717 } 2717 }
2718} 2718}
2719 2719
@@ -2726,7 +2726,7 @@ static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2726 2726
2727 for_each_mem_cgroup_tree(iter, memcg) { 2727 for_each_mem_cgroup_tree(iter, memcg) {
2728 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2728 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2729 events[i] += mem_cgroup_read_events(iter, i); 2729 events[i] += memcg_sum_events(iter, i);
2730 } 2730 }
2731} 2731}
2732 2732
@@ -2738,10 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2738 struct mem_cgroup *iter; 2738 struct mem_cgroup *iter;
2739 2739
2740 for_each_mem_cgroup_tree(iter, memcg) { 2740 for_each_mem_cgroup_tree(iter, memcg) {
2741 val += mem_cgroup_read_stat(iter, MEMCG_CACHE); 2741 val += memcg_page_state(iter, MEMCG_CACHE);
2742 val += mem_cgroup_read_stat(iter, MEMCG_RSS); 2742 val += memcg_page_state(iter, MEMCG_RSS);
2743 if (swap) 2743 if (swap)
2744 val += mem_cgroup_read_stat(iter, MEMCG_SWAP); 2744 val += memcg_page_state(iter, MEMCG_SWAP);
2745 } 2745 }
2746 } else { 2746 } else {
2747 if (!swap) 2747 if (!swap)
@@ -3145,13 +3145,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3145 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3146 continue; 3146 continue;
3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3147 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3148 mem_cgroup_read_stat(memcg, memcg1_stats[i]) * 3148 memcg_page_state(memcg, memcg1_stats[i]) *
3149 PAGE_SIZE); 3149 PAGE_SIZE);
3150 } 3150 }
3151 3151
3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3152 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3153 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3153 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3154 mem_cgroup_read_events(memcg, memcg1_events[i])); 3154 memcg_sum_events(memcg, memcg1_events[i]));
3155 3155
3156 for (i = 0; i < NR_LRU_LISTS; i++) 3156 for (i = 0; i < NR_LRU_LISTS; i++)
3157 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3157 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3175,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3175 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3176 continue; 3176 continue;
3177 for_each_mem_cgroup_tree(mi, memcg) 3177 for_each_mem_cgroup_tree(mi, memcg)
3178 val += mem_cgroup_read_stat(mi, memcg1_stats[i]) * 3178 val += memcg_page_state(mi, memcg1_stats[i]) *
3179 PAGE_SIZE; 3179 PAGE_SIZE;
3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val); 3180 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3181 } 3181 }
@@ -3184,7 +3184,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
3184 unsigned long long val = 0; 3184 unsigned long long val = 0;
3185 3185
3186 for_each_mem_cgroup_tree(mi, memcg) 3186 for_each_mem_cgroup_tree(mi, memcg)
3187 val += mem_cgroup_read_events(mi, memcg1_events[i]); 3187 val += memcg_sum_events(mi, memcg1_events[i]);
3188 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val); 3188 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3189 } 3189 }
3190 3190
@@ -3650,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3651 struct mem_cgroup *parent; 3651 struct mem_cgroup *parent;
3652 3652
3653 *pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY); 3653 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3654 3654
3655 /* this should eventually include NR_UNSTABLE_NFS */ 3655 /* this should eventually include NR_UNSTABLE_NFS */
3656 *pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK); 3656 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3658 (1 << LRU_ACTIVE_FILE)); 3658 (1 << LRU_ACTIVE_FILE));
3659 *pheadroom = PAGE_COUNTER_MAX; 3659 *pheadroom = PAGE_COUNTER_MAX;
@@ -4515,7 +4515,7 @@ static int mem_cgroup_move_account(struct page *page,
4515 4515
4516 /* 4516 /*
4517 * move_lock grabbed above and caller set from->moving_account, so 4517 * move_lock grabbed above and caller set from->moving_account, so
4518 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4518 * mod_memcg_page_state will serialize updates to PageDirty.
4519 * So mapping should be stable for dirty pages. 4519 * So mapping should be stable for dirty pages.
4520 */ 4520 */
4521 if (!anon && PageDirty(page)) { 4521 if (!anon && PageDirty(page)) {
@@ -5161,10 +5161,10 @@ static int memory_events_show(struct seq_file *m, void *v)
5161{ 5161{
5162 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5162 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5163 5163
5164 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5164 seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
5165 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5165 seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
5166 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5166 seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
5167 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5167 seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
5168 5168
5169 return 0; 5169 return 0;
5170} 5170}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 777711203809..2359608d2568 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2427,7 +2427,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2427 inode_attach_wb(inode, page); 2427 inode_attach_wb(inode, page);
2428 wb = inode_to_wb(inode); 2428 wb = inode_to_wb(inode);
2429 2429
2430 mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY); 2430 inc_memcg_page_state(page, NR_FILE_DIRTY);
2431 __inc_node_page_state(page, NR_FILE_DIRTY); 2431 __inc_node_page_state(page, NR_FILE_DIRTY);
2432 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2432 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2433 __inc_node_page_state(page, NR_DIRTIED); 2433 __inc_node_page_state(page, NR_DIRTIED);
@@ -2449,7 +2449,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2449 struct bdi_writeback *wb) 2449 struct bdi_writeback *wb)
2450{ 2450{
2451 if (mapping_cap_account_dirty(mapping)) { 2451 if (mapping_cap_account_dirty(mapping)) {
2452 mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY); 2452 dec_memcg_page_state(page, NR_FILE_DIRTY);
2453 dec_node_page_state(page, NR_FILE_DIRTY); 2453 dec_node_page_state(page, NR_FILE_DIRTY);
2454 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2454 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2455 dec_wb_stat(wb, WB_RECLAIMABLE); 2455 dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2706,7 +2706,7 @@ int clear_page_dirty_for_io(struct page *page)
2706 */ 2706 */
2707 wb = unlocked_inode_to_wb_begin(inode, &locked); 2707 wb = unlocked_inode_to_wb_begin(inode, &locked);
2708 if (TestClearPageDirty(page)) { 2708 if (TestClearPageDirty(page)) {
2709 mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY); 2709 dec_memcg_page_state(page, NR_FILE_DIRTY);
2710 dec_node_page_state(page, NR_FILE_DIRTY); 2710 dec_node_page_state(page, NR_FILE_DIRTY);
2711 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2711 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2712 dec_wb_stat(wb, WB_RECLAIMABLE); 2712 dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2753,7 +2753,7 @@ int test_clear_page_writeback(struct page *page)
2753 ret = TestClearPageWriteback(page); 2753 ret = TestClearPageWriteback(page);
2754 } 2754 }
2755 if (ret) { 2755 if (ret) {
2756 mem_cgroup_dec_page_stat(page, NR_WRITEBACK); 2756 dec_memcg_page_state(page, NR_WRITEBACK);
2757 dec_node_page_state(page, NR_WRITEBACK); 2757 dec_node_page_state(page, NR_WRITEBACK);
2758 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2758 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2759 inc_node_page_state(page, NR_WRITTEN); 2759 inc_node_page_state(page, NR_WRITTEN);
@@ -2808,7 +2808,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2808 ret = TestSetPageWriteback(page); 2808 ret = TestSetPageWriteback(page);
2809 } 2809 }
2810 if (!ret) { 2810 if (!ret) {
2811 mem_cgroup_inc_page_stat(page, NR_WRITEBACK); 2811 inc_memcg_page_state(page, NR_WRITEBACK);
2812 inc_node_page_state(page, NR_WRITEBACK); 2812 inc_node_page_state(page, NR_WRITEBACK);
2813 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2813 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2814 } 2814 }
diff --git a/mm/rmap.c b/mm/rmap.c
index a6d018c4a13a..3ff241f714eb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1158,7 +1158,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1158 goto out; 1158 goto out;
1159 } 1159 }
1160 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1160 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1161 mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr); 1161 mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
1162out: 1162out:
1163 unlock_page_memcg(page); 1163 unlock_page_memcg(page);
1164} 1164}
@@ -1198,7 +1198,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1198 * pte lock(a spinlock) is held, which implies preemption disabled. 1198 * pte lock(a spinlock) is held, which implies preemption disabled.
1199 */ 1199 */
1200 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1200 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1201 mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr); 1201 mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
1202 1202
1203 if (unlikely(PageMlocked(page))) 1203 if (unlikely(PageMlocked(page)))
1204 clear_page_mlock(page); 1204 clear_page_mlock(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 417b6657e994..4e7ed65842af 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2046,7 +2046,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2046 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); 2046 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2047 2047
2048 if (memcg) 2048 if (memcg)
2049 refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE); 2049 refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
2050 else 2050 else
2051 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 2051 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2052 2052
@@ -2733,8 +2733,7 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
2733 struct lruvec *lruvec; 2733 struct lruvec *lruvec;
2734 2734
2735 if (memcg) 2735 if (memcg)
2736 refaults = mem_cgroup_read_stat(memcg, 2736 refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
2737 WORKINGSET_ACTIVATE);
2738 else 2737 else
2739 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); 2738 refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
2740 2739
diff --git a/mm/workingset.c b/mm/workingset.c
index 37fc1057cd86..b8c9ab678479 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -289,11 +289,11 @@ bool workingset_refault(void *shadow)
289 refault_distance = (refault - eviction) & EVICTION_MASK; 289 refault_distance = (refault - eviction) & EVICTION_MASK;
290 290
291 inc_node_state(pgdat, WORKINGSET_REFAULT); 291 inc_node_state(pgdat, WORKINGSET_REFAULT);
292 mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT); 292 inc_memcg_state(memcg, WORKINGSET_REFAULT);
293 293
294 if (refault_distance <= active_file) { 294 if (refault_distance <= active_file) {
295 inc_node_state(pgdat, WORKINGSET_ACTIVATE); 295 inc_node_state(pgdat, WORKINGSET_ACTIVATE);
296 mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE); 296 inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 return true; 298 return true;
299 } 299 }
@@ -475,7 +475,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
475 if (WARN_ON_ONCE(node->exceptional)) 475 if (WARN_ON_ONCE(node->exceptional))
476 goto out_invalid; 476 goto out_invalid;
477 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 477 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
478 mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM); 478 inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
479 __radix_tree_delete_node(&mapping->page_tree, node, 479 __radix_tree_delete_node(&mapping->page_tree, node,
480 workingset_update_node, mapping); 480 workingset_update_node, mapping);
481 481