aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit81f8c3a461d16f0355ced3d56d6d1bb5923207a1 (patch)
tree5d821760ca548b4357221c0399b92b7154221c33
parent0db2cb8da89d991762ec2aece45e55ceaee34664 (diff)
mm: memcontrol: generalize locking for the page->mem_cgroup binding
These patches tag the page cache radix tree eviction entries with the memcg an evicted page belonged to, thus making per-cgroup LRU reclaim work properly and be as adaptive to new cache workingsets as global reclaim already is. This should have been part of the original thrash detection patch series, but was deferred due to the complexity of those patches. This patch (of 5): So far the only sites that needed to exclude charge migration to stabilize page->mem_cgroup have been per-cgroup page statistics, hence the name mem_cgroup_begin_page_stat(). But per-cgroup thrash detection will add another site that needs to ensure page->mem_cgroup lifetime. Rename these locking functions to the more generic lock_page_memcg() and unlock_page_memcg(). Since charge migration is a cgroup1 feature only, we might be able to delete it at some point, and these now easy to identify locking sites along with it. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/xfs/xfs_aops.c8
-rw-r--r--include/linux/memcontrol.h16
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/memcontrol.c34
-rw-r--r--mm/page-writeback.c28
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmscan.c8
9 files changed, 67 insertions, 67 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index e1632abb4ca9..dc991510bb06 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -621,7 +621,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
621 * If warn is true, then emit a warning if the page is not uptodate and has 621 * If warn is true, then emit a warning if the page is not uptodate and has
622 * not been truncated. 622 * not been truncated.
623 * 623 *
624 * The caller must hold mem_cgroup_begin_page_stat() lock. 624 * The caller must hold lock_page_memcg().
625 */ 625 */
626static void __set_page_dirty(struct page *page, struct address_space *mapping, 626static void __set_page_dirty(struct page *page, struct address_space *mapping,
627 struct mem_cgroup *memcg, int warn) 627 struct mem_cgroup *memcg, int warn)
@@ -683,17 +683,17 @@ int __set_page_dirty_buffers(struct page *page)
683 } while (bh != head); 683 } while (bh != head);
684 } 684 }
685 /* 685 /*
686 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 686 * Lock out page->mem_cgroup migration to keep PageDirty
687 * per-memcg dirty page counters. 687 * synchronized with per-memcg dirty page counters.
688 */ 688 */
689 memcg = mem_cgroup_begin_page_stat(page); 689 memcg = lock_page_memcg(page);
690 newly_dirty = !TestSetPageDirty(page); 690 newly_dirty = !TestSetPageDirty(page);
691 spin_unlock(&mapping->private_lock); 691 spin_unlock(&mapping->private_lock);
692 692
693 if (newly_dirty) 693 if (newly_dirty)
694 __set_page_dirty(page, mapping, memcg, 1); 694 __set_page_dirty(page, mapping, memcg, 1);
695 695
696 mem_cgroup_end_page_stat(memcg); 696 unlock_page_memcg(memcg);
697 697
698 if (newly_dirty) 698 if (newly_dirty)
699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1169,13 +1169,13 @@ void mark_buffer_dirty(struct buffer_head *bh)
1169 struct address_space *mapping = NULL; 1169 struct address_space *mapping = NULL;
1170 struct mem_cgroup *memcg; 1170 struct mem_cgroup *memcg;
1171 1171
1172 memcg = mem_cgroup_begin_page_stat(page); 1172 memcg = lock_page_memcg(page);
1173 if (!TestSetPageDirty(page)) { 1173 if (!TestSetPageDirty(page)) {
1174 mapping = page_mapping(page); 1174 mapping = page_mapping(page);
1175 if (mapping) 1175 if (mapping)
1176 __set_page_dirty(page, mapping, memcg, 0); 1176 __set_page_dirty(page, mapping, memcg, 0);
1177 } 1177 }
1178 mem_cgroup_end_page_stat(memcg); 1178 unlock_page_memcg(memcg);
1179 if (mapping) 1179 if (mapping)
1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1181 } 1181 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a9ebabfe7587..5f85ebc52a98 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1978,10 +1978,10 @@ xfs_vm_set_page_dirty(
1978 } while (bh != head); 1978 } while (bh != head);
1979 } 1979 }
1980 /* 1980 /*
1981 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 1981 * Lock out page->mem_cgroup migration to keep PageDirty
1982 * per-memcg dirty page counters. 1982 * synchronized with per-memcg dirty page counters.
1983 */ 1983 */
1984 memcg = mem_cgroup_begin_page_stat(page); 1984 memcg = lock_page_memcg(page);
1985 newly_dirty = !TestSetPageDirty(page); 1985 newly_dirty = !TestSetPageDirty(page);
1986 spin_unlock(&mapping->private_lock); 1986 spin_unlock(&mapping->private_lock);
1987 1987
@@ -1998,7 +1998,7 @@ xfs_vm_set_page_dirty(
1998 } 1998 }
1999 spin_unlock_irqrestore(&mapping->tree_lock, flags); 1999 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2000 } 2000 }
2001 mem_cgroup_end_page_stat(memcg); 2001 unlock_page_memcg(memcg);
2002 if (newly_dirty) 2002 if (newly_dirty)
2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2004 return newly_dirty; 2004 return newly_dirty;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 30b02e79610e..8502fd4144eb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -429,8 +429,8 @@ bool mem_cgroup_oom_synchronize(bool wait);
429extern int do_swap_account; 429extern int do_swap_account;
430#endif 430#endif
431 431
432struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); 432struct mem_cgroup *lock_page_memcg(struct page *page);
433void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 433void unlock_page_memcg(struct mem_cgroup *memcg);
434 434
435/** 435/**
436 * mem_cgroup_update_page_stat - update page state statistics 436 * mem_cgroup_update_page_stat - update page state statistics
@@ -438,7 +438,13 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
438 * @idx: page state item to account 438 * @idx: page state item to account
439 * @val: number of pages (positive or negative) 439 * @val: number of pages (positive or negative)
440 * 440 *
441 * See mem_cgroup_begin_page_stat() for locking requirements. 441 * Callers must use lock_page_memcg() to prevent double accounting
442 * when the page is concurrently being moved to another memcg:
443 *
444 * memcg = lock_page_memcg(page);
445 * if (TestClearPageState(page))
446 * mem_cgroup_update_page_stat(memcg, state, -1);
447 * unlock_page_memcg(memcg);
442 */ 448 */
443static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 449static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
444 enum mem_cgroup_stat_index idx, int val) 450 enum mem_cgroup_stat_index idx, int val)
@@ -613,12 +619,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
613{ 619{
614} 620}
615 621
616static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 622static inline struct mem_cgroup *lock_page_memcg(struct page *page)
617{ 623{
618 return NULL; 624 return NULL;
619} 625}
620 626
621static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 627static inline void unlock_page_memcg(struct mem_cgroup *memcg)
622{ 628{
623} 629}
624 630
diff --git a/mm/filemap.c b/mm/filemap.c
index 4a0f5fa79dbd..ee8140cf935d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -101,7 +101,7 @@
101 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty)
102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
103 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
104 * ->memcg->move_lock (page_remove_rmap->mem_cgroup_begin_page_stat) 104 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
105 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 105 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
106 * ->inode->i_lock (zap_pte_range->set_page_dirty) 106 * ->inode->i_lock (zap_pte_range->set_page_dirty)
107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
@@ -177,7 +177,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
177 * Delete a page from the page cache and free it. Caller has to make 177 * Delete a page from the page cache and free it. Caller has to make
178 * sure the page is locked and that nobody else uses it - or that usage 178 * sure the page is locked and that nobody else uses it - or that usage
179 * is safe. The caller must hold the mapping's tree_lock and 179 * is safe. The caller must hold the mapping's tree_lock and
180 * mem_cgroup_begin_page_stat(). 180 * lock_page_memcg().
181 */ 181 */
182void __delete_from_page_cache(struct page *page, void *shadow, 182void __delete_from_page_cache(struct page *page, void *shadow,
183 struct mem_cgroup *memcg) 183 struct mem_cgroup *memcg)
@@ -263,11 +263,11 @@ void delete_from_page_cache(struct page *page)
263 263
264 freepage = mapping->a_ops->freepage; 264 freepage = mapping->a_ops->freepage;
265 265
266 memcg = mem_cgroup_begin_page_stat(page); 266 memcg = lock_page_memcg(page);
267 spin_lock_irqsave(&mapping->tree_lock, flags); 267 spin_lock_irqsave(&mapping->tree_lock, flags);
268 __delete_from_page_cache(page, NULL, memcg); 268 __delete_from_page_cache(page, NULL, memcg);
269 spin_unlock_irqrestore(&mapping->tree_lock, flags); 269 spin_unlock_irqrestore(&mapping->tree_lock, flags);
270 mem_cgroup_end_page_stat(memcg); 270 unlock_page_memcg(memcg);
271 271
272 if (freepage) 272 if (freepage)
273 freepage(page); 273 freepage(page);
@@ -561,7 +561,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
561 new->mapping = mapping; 561 new->mapping = mapping;
562 new->index = offset; 562 new->index = offset;
563 563
564 memcg = mem_cgroup_begin_page_stat(old); 564 memcg = lock_page_memcg(old);
565 spin_lock_irqsave(&mapping->tree_lock, flags); 565 spin_lock_irqsave(&mapping->tree_lock, flags);
566 __delete_from_page_cache(old, NULL, memcg); 566 __delete_from_page_cache(old, NULL, memcg);
567 error = radix_tree_insert(&mapping->page_tree, offset, new); 567 error = radix_tree_insert(&mapping->page_tree, offset, new);
@@ -576,7 +576,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
576 if (PageSwapBacked(new)) 576 if (PageSwapBacked(new))
577 __inc_zone_page_state(new, NR_SHMEM); 577 __inc_zone_page_state(new, NR_SHMEM);
578 spin_unlock_irqrestore(&mapping->tree_lock, flags); 578 spin_unlock_irqrestore(&mapping->tree_lock, flags);
579 mem_cgroup_end_page_stat(memcg); 579 unlock_page_memcg(memcg);
580 mem_cgroup_replace_page(old, new); 580 mem_cgroup_replace_page(old, new);
581 radix_tree_preload_end(); 581 radix_tree_preload_end();
582 if (freepage) 582 if (freepage)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d06cae2de783..953f0f984392 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1709,19 +1709,13 @@ cleanup:
1709} 1709}
1710 1710
1711/** 1711/**
1712 * mem_cgroup_begin_page_stat - begin a page state statistics transaction 1712 * lock_page_memcg - lock a page->mem_cgroup binding
1713 * @page: page that is going to change accounted state 1713 * @page: the page
1714 *
1715 * This function must mark the beginning of an accounted page state
1716 * change to prevent double accounting when the page is concurrently
1717 * being moved to another memcg:
1718 * 1714 *
1719 * memcg = mem_cgroup_begin_page_stat(page); 1715 * This function protects unlocked LRU pages from being moved to
1720 * if (TestClearPageState(page)) 1716 * another cgroup and stabilizes their page->mem_cgroup binding.
1721 * mem_cgroup_update_page_stat(memcg, state, -1);
1722 * mem_cgroup_end_page_stat(memcg);
1723 */ 1717 */
1724struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 1718struct mem_cgroup *lock_page_memcg(struct page *page)
1725{ 1719{
1726 struct mem_cgroup *memcg; 1720 struct mem_cgroup *memcg;
1727 unsigned long flags; 1721 unsigned long flags;
@@ -1759,20 +1753,20 @@ again:
1759 /* 1753 /*
1760 * When charge migration first begins, we can have locked and 1754 * When charge migration first begins, we can have locked and
1761 * unlocked page stat updates happening concurrently. Track 1755 * unlocked page stat updates happening concurrently. Track
1762 * the task who has the lock for mem_cgroup_end_page_stat(). 1756 * the task who has the lock for unlock_page_memcg().
1763 */ 1757 */
1764 memcg->move_lock_task = current; 1758 memcg->move_lock_task = current;
1765 memcg->move_lock_flags = flags; 1759 memcg->move_lock_flags = flags;
1766 1760
1767 return memcg; 1761 return memcg;
1768} 1762}
1769EXPORT_SYMBOL(mem_cgroup_begin_page_stat); 1763EXPORT_SYMBOL(lock_page_memcg);
1770 1764
1771/** 1765/**
1772 * mem_cgroup_end_page_stat - finish a page state statistics transaction 1766 * unlock_page_memcg - unlock a page->mem_cgroup binding
1773 * @memcg: the memcg that was accounted against 1767 * @memcg: the memcg returned by lock_page_memcg()
1774 */ 1768 */
1775void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 1769void unlock_page_memcg(struct mem_cgroup *memcg)
1776{ 1770{
1777 if (memcg && memcg->move_lock_task == current) { 1771 if (memcg && memcg->move_lock_task == current) {
1778 unsigned long flags = memcg->move_lock_flags; 1772 unsigned long flags = memcg->move_lock_flags;
@@ -1785,7 +1779,7 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
1785 1779
1786 rcu_read_unlock(); 1780 rcu_read_unlock();
1787} 1781}
1788EXPORT_SYMBOL(mem_cgroup_end_page_stat); 1782EXPORT_SYMBOL(unlock_page_memcg);
1789 1783
1790/* 1784/*
1791 * size of first charge trial. "32" comes from vmscan.c's magic value. 1785 * size of first charge trial. "32" comes from vmscan.c's magic value.
@@ -4923,9 +4917,9 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4923 4917
4924 lru_add_drain_all(); 4918 lru_add_drain_all();
4925 /* 4919 /*
4926 * Signal mem_cgroup_begin_page_stat() to take the memcg's 4920 * Signal lock_page_memcg() to take the memcg's move_lock
4927 * move_lock while we're moving its pages to another memcg. 4921 * while we're moving its pages to another memcg. Then wait
4928 * Then wait for already started RCU-only updates to finish. 4922 * for already started RCU-only updates to finish.
4929 */ 4923 */
4930 atomic_inc(&mc.from->moving_account); 4924 atomic_inc(&mc.from->moving_account);
4931 synchronize_rcu(); 4925 synchronize_rcu();
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d782cbab735a..2b5ea1271e32 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2410,7 +2410,7 @@ int __set_page_dirty_no_writeback(struct page *page)
2410/* 2410/*
2411 * Helper function for set_page_dirty family. 2411 * Helper function for set_page_dirty family.
2412 * 2412 *
2413 * Caller must hold mem_cgroup_begin_page_stat(). 2413 * Caller must hold lock_page_memcg().
2414 * 2414 *
2415 * NOTE: This relies on being atomic wrt interrupts. 2415 * NOTE: This relies on being atomic wrt interrupts.
2416 */ 2416 */
@@ -2442,7 +2442,7 @@ EXPORT_SYMBOL(account_page_dirtied);
2442/* 2442/*
2443 * Helper function for deaccounting dirty page without writeback. 2443 * Helper function for deaccounting dirty page without writeback.
2444 * 2444 *
2445 * Caller must hold mem_cgroup_begin_page_stat(). 2445 * Caller must hold lock_page_memcg().
2446 */ 2446 */
2447void account_page_cleaned(struct page *page, struct address_space *mapping, 2447void account_page_cleaned(struct page *page, struct address_space *mapping,
2448 struct mem_cgroup *memcg, struct bdi_writeback *wb) 2448 struct mem_cgroup *memcg, struct bdi_writeback *wb)
@@ -2471,13 +2471,13 @@ int __set_page_dirty_nobuffers(struct page *page)
2471{ 2471{
2472 struct mem_cgroup *memcg; 2472 struct mem_cgroup *memcg;
2473 2473
2474 memcg = mem_cgroup_begin_page_stat(page); 2474 memcg = lock_page_memcg(page);
2475 if (!TestSetPageDirty(page)) { 2475 if (!TestSetPageDirty(page)) {
2476 struct address_space *mapping = page_mapping(page); 2476 struct address_space *mapping = page_mapping(page);
2477 unsigned long flags; 2477 unsigned long flags;
2478 2478
2479 if (!mapping) { 2479 if (!mapping) {
2480 mem_cgroup_end_page_stat(memcg); 2480 unlock_page_memcg(memcg);
2481 return 1; 2481 return 1;
2482 } 2482 }
2483 2483
@@ -2488,7 +2488,7 @@ int __set_page_dirty_nobuffers(struct page *page)
2488 radix_tree_tag_set(&mapping->page_tree, page_index(page), 2488 radix_tree_tag_set(&mapping->page_tree, page_index(page),
2489 PAGECACHE_TAG_DIRTY); 2489 PAGECACHE_TAG_DIRTY);
2490 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2490 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2491 mem_cgroup_end_page_stat(memcg); 2491 unlock_page_memcg(memcg);
2492 2492
2493 if (mapping->host) { 2493 if (mapping->host) {
2494 /* !PageAnon && !swapper_space */ 2494 /* !PageAnon && !swapper_space */
@@ -2496,7 +2496,7 @@ int __set_page_dirty_nobuffers(struct page *page)
2496 } 2496 }
2497 return 1; 2497 return 1;
2498 } 2498 }
2499 mem_cgroup_end_page_stat(memcg); 2499 unlock_page_memcg(memcg);
2500 return 0; 2500 return 0;
2501} 2501}
2502EXPORT_SYMBOL(__set_page_dirty_nobuffers); 2502EXPORT_SYMBOL(__set_page_dirty_nobuffers);
@@ -2629,14 +2629,14 @@ void cancel_dirty_page(struct page *page)
2629 struct mem_cgroup *memcg; 2629 struct mem_cgroup *memcg;
2630 bool locked; 2630 bool locked;
2631 2631
2632 memcg = mem_cgroup_begin_page_stat(page); 2632 memcg = lock_page_memcg(page);
2633 wb = unlocked_inode_to_wb_begin(inode, &locked); 2633 wb = unlocked_inode_to_wb_begin(inode, &locked);
2634 2634
2635 if (TestClearPageDirty(page)) 2635 if (TestClearPageDirty(page))
2636 account_page_cleaned(page, mapping, memcg, wb); 2636 account_page_cleaned(page, mapping, memcg, wb);
2637 2637
2638 unlocked_inode_to_wb_end(inode, locked); 2638 unlocked_inode_to_wb_end(inode, locked);
2639 mem_cgroup_end_page_stat(memcg); 2639 unlock_page_memcg(memcg);
2640 } else { 2640 } else {
2641 ClearPageDirty(page); 2641 ClearPageDirty(page);
2642 } 2642 }
@@ -2705,7 +2705,7 @@ int clear_page_dirty_for_io(struct page *page)
2705 * always locked coming in here, so we get the desired 2705 * always locked coming in here, so we get the desired
2706 * exclusion. 2706 * exclusion.
2707 */ 2707 */
2708 memcg = mem_cgroup_begin_page_stat(page); 2708 memcg = lock_page_memcg(page);
2709 wb = unlocked_inode_to_wb_begin(inode, &locked); 2709 wb = unlocked_inode_to_wb_begin(inode, &locked);
2710 if (TestClearPageDirty(page)) { 2710 if (TestClearPageDirty(page)) {
2711 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2711 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
@@ -2714,7 +2714,7 @@ int clear_page_dirty_for_io(struct page *page)
2714 ret = 1; 2714 ret = 1;
2715 } 2715 }
2716 unlocked_inode_to_wb_end(inode, locked); 2716 unlocked_inode_to_wb_end(inode, locked);
2717 mem_cgroup_end_page_stat(memcg); 2717 unlock_page_memcg(memcg);
2718 return ret; 2718 return ret;
2719 } 2719 }
2720 return TestClearPageDirty(page); 2720 return TestClearPageDirty(page);
@@ -2727,7 +2727,7 @@ int test_clear_page_writeback(struct page *page)
2727 struct mem_cgroup *memcg; 2727 struct mem_cgroup *memcg;
2728 int ret; 2728 int ret;
2729 2729
2730 memcg = mem_cgroup_begin_page_stat(page); 2730 memcg = lock_page_memcg(page);
2731 if (mapping) { 2731 if (mapping) {
2732 struct inode *inode = mapping->host; 2732 struct inode *inode = mapping->host;
2733 struct backing_dev_info *bdi = inode_to_bdi(inode); 2733 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2755,7 +2755,7 @@ int test_clear_page_writeback(struct page *page)
2755 dec_zone_page_state(page, NR_WRITEBACK); 2755 dec_zone_page_state(page, NR_WRITEBACK);
2756 inc_zone_page_state(page, NR_WRITTEN); 2756 inc_zone_page_state(page, NR_WRITTEN);
2757 } 2757 }
2758 mem_cgroup_end_page_stat(memcg); 2758 unlock_page_memcg(memcg);
2759 return ret; 2759 return ret;
2760} 2760}
2761 2761
@@ -2765,7 +2765,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2765 struct mem_cgroup *memcg; 2765 struct mem_cgroup *memcg;
2766 int ret; 2766 int ret;
2767 2767
2768 memcg = mem_cgroup_begin_page_stat(page); 2768 memcg = lock_page_memcg(page);
2769 if (mapping) { 2769 if (mapping) {
2770 struct inode *inode = mapping->host; 2770 struct inode *inode = mapping->host;
2771 struct backing_dev_info *bdi = inode_to_bdi(inode); 2771 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2796,7 +2796,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2796 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 2796 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2797 inc_zone_page_state(page, NR_WRITEBACK); 2797 inc_zone_page_state(page, NR_WRITEBACK);
2798 } 2798 }
2799 mem_cgroup_end_page_stat(memcg); 2799 unlock_page_memcg(memcg);
2800 return ret; 2800 return ret;
2801 2801
2802} 2802}
diff --git a/mm/rmap.c b/mm/rmap.c
index 79f3bf047f38..2871e7d3cced 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1289,19 +1289,19 @@ void page_add_file_rmap(struct page *page)
1289{ 1289{
1290 struct mem_cgroup *memcg; 1290 struct mem_cgroup *memcg;
1291 1291
1292 memcg = mem_cgroup_begin_page_stat(page); 1292 memcg = lock_page_memcg(page);
1293 if (atomic_inc_and_test(&page->_mapcount)) { 1293 if (atomic_inc_and_test(&page->_mapcount)) {
1294 __inc_zone_page_state(page, NR_FILE_MAPPED); 1294 __inc_zone_page_state(page, NR_FILE_MAPPED);
1295 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 1295 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1296 } 1296 }
1297 mem_cgroup_end_page_stat(memcg); 1297 unlock_page_memcg(memcg);
1298} 1298}
1299 1299
1300static void page_remove_file_rmap(struct page *page) 1300static void page_remove_file_rmap(struct page *page)
1301{ 1301{
1302 struct mem_cgroup *memcg; 1302 struct mem_cgroup *memcg;
1303 1303
1304 memcg = mem_cgroup_begin_page_stat(page); 1304 memcg = lock_page_memcg(page);
1305 1305
1306 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1306 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1307 if (unlikely(PageHuge(page))) { 1307 if (unlikely(PageHuge(page))) {
@@ -1325,7 +1325,7 @@ static void page_remove_file_rmap(struct page *page)
1325 if (unlikely(PageMlocked(page))) 1325 if (unlikely(PageMlocked(page)))
1326 clear_page_mlock(page); 1326 clear_page_mlock(page);
1327out: 1327out:
1328 mem_cgroup_end_page_stat(memcg); 1328 unlock_page_memcg(memcg);
1329} 1329}
1330 1330
1331static void page_remove_anon_compound_rmap(struct page *page) 1331static void page_remove_anon_compound_rmap(struct page *page)
diff --git a/mm/truncate.c b/mm/truncate.c
index e3ee0e27cd17..51a24f6a555d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -528,7 +528,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
528 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 528 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
529 return 0; 529 return 0;
530 530
531 memcg = mem_cgroup_begin_page_stat(page); 531 memcg = lock_page_memcg(page);
532 spin_lock_irqsave(&mapping->tree_lock, flags); 532 spin_lock_irqsave(&mapping->tree_lock, flags);
533 if (PageDirty(page)) 533 if (PageDirty(page))
534 goto failed; 534 goto failed;
@@ -536,7 +536,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
536 BUG_ON(page_has_private(page)); 536 BUG_ON(page_has_private(page));
537 __delete_from_page_cache(page, NULL, memcg); 537 __delete_from_page_cache(page, NULL, memcg);
538 spin_unlock_irqrestore(&mapping->tree_lock, flags); 538 spin_unlock_irqrestore(&mapping->tree_lock, flags);
539 mem_cgroup_end_page_stat(memcg); 539 unlock_page_memcg(memcg);
540 540
541 if (mapping->a_ops->freepage) 541 if (mapping->a_ops->freepage)
542 mapping->a_ops->freepage(page); 542 mapping->a_ops->freepage(page);
@@ -545,7 +545,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
545 return 1; 545 return 1;
546failed: 546failed:
547 spin_unlock_irqrestore(&mapping->tree_lock, flags); 547 spin_unlock_irqrestore(&mapping->tree_lock, flags);
548 mem_cgroup_end_page_stat(memcg); 548 unlock_page_memcg(memcg);
549 return 0; 549 return 0;
550} 550}
551 551
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 039f08d369a5..08547a7136d3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -608,7 +608,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
608 BUG_ON(!PageLocked(page)); 608 BUG_ON(!PageLocked(page));
609 BUG_ON(mapping != page_mapping(page)); 609 BUG_ON(mapping != page_mapping(page));
610 610
611 memcg = mem_cgroup_begin_page_stat(page); 611 memcg = lock_page_memcg(page);
612 spin_lock_irqsave(&mapping->tree_lock, flags); 612 spin_lock_irqsave(&mapping->tree_lock, flags);
613 /* 613 /*
614 * The non racy check for a busy page. 614 * The non racy check for a busy page.
@@ -648,7 +648,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
648 mem_cgroup_swapout(page, swap); 648 mem_cgroup_swapout(page, swap);
649 __delete_from_swap_cache(page); 649 __delete_from_swap_cache(page);
650 spin_unlock_irqrestore(&mapping->tree_lock, flags); 650 spin_unlock_irqrestore(&mapping->tree_lock, flags);
651 mem_cgroup_end_page_stat(memcg); 651 unlock_page_memcg(memcg);
652 swapcache_free(swap); 652 swapcache_free(swap);
653 } else { 653 } else {
654 void (*freepage)(struct page *); 654 void (*freepage)(struct page *);
@@ -676,7 +676,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
676 shadow = workingset_eviction(mapping, page); 676 shadow = workingset_eviction(mapping, page);
677 __delete_from_page_cache(page, shadow, memcg); 677 __delete_from_page_cache(page, shadow, memcg);
678 spin_unlock_irqrestore(&mapping->tree_lock, flags); 678 spin_unlock_irqrestore(&mapping->tree_lock, flags);
679 mem_cgroup_end_page_stat(memcg); 679 unlock_page_memcg(memcg);
680 680
681 if (freepage != NULL) 681 if (freepage != NULL)
682 freepage(page); 682 freepage(page);
@@ -686,7 +686,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
686 686
687cannot_free: 687cannot_free:
688 spin_unlock_irqrestore(&mapping->tree_lock, flags); 688 spin_unlock_irqrestore(&mapping->tree_lock, flags);
689 mem_cgroup_end_page_stat(memcg); 689 unlock_page_memcg(memcg);
690 return 0; 690 return 0;
691} 691}
692 692