aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch)
tree43a902faf461c65393a4efebf9ff9622017b92b1 /mm
parent6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff)
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore, it's safe to make lock_page_memcg() and the memcg stat functions take pages, and spare the callers from memcg objects. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/page-writeback.c49
-rw-r--r--mm/rmap.c16
-rw-r--r--mm/truncate.c9
-rw-r--r--mm/vmscan.c11
-rw-r--r--mm/workingset.c9
7 files changed, 57 insertions, 80 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index d8317caffe85..8e629c4ef0c8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -179,8 +179,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
179 * is safe. The caller must hold the mapping's tree_lock and 179 * is safe. The caller must hold the mapping's tree_lock and
180 * lock_page_memcg(). 180 * lock_page_memcg().
181 */ 181 */
182void __delete_from_page_cache(struct page *page, void *shadow, 182void __delete_from_page_cache(struct page *page, void *shadow)
183 struct mem_cgroup *memcg)
184{ 183{
185 struct address_space *mapping = page->mapping; 184 struct address_space *mapping = page->mapping;
186 185
@@ -239,8 +238,7 @@ void __delete_from_page_cache(struct page *page, void *shadow,
239 * anyway will be cleared before returning page into buddy allocator. 238 * anyway will be cleared before returning page into buddy allocator.
240 */ 239 */
241 if (WARN_ON_ONCE(PageDirty(page))) 240 if (WARN_ON_ONCE(PageDirty(page)))
242 account_page_cleaned(page, mapping, memcg, 241 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
243 inode_to_wb(mapping->host));
244} 242}
245 243
246/** 244/**
@@ -254,7 +252,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
254void delete_from_page_cache(struct page *page) 252void delete_from_page_cache(struct page *page)
255{ 253{
256 struct address_space *mapping = page->mapping; 254 struct address_space *mapping = page->mapping;
257 struct mem_cgroup *memcg;
258 unsigned long flags; 255 unsigned long flags;
259 256
260 void (*freepage)(struct page *); 257 void (*freepage)(struct page *);
@@ -263,11 +260,11 @@ void delete_from_page_cache(struct page *page)
263 260
264 freepage = mapping->a_ops->freepage; 261 freepage = mapping->a_ops->freepage;
265 262
266 memcg = lock_page_memcg(page); 263 lock_page_memcg(page);
267 spin_lock_irqsave(&mapping->tree_lock, flags); 264 spin_lock_irqsave(&mapping->tree_lock, flags);
268 __delete_from_page_cache(page, NULL, memcg); 265 __delete_from_page_cache(page, NULL);
269 spin_unlock_irqrestore(&mapping->tree_lock, flags); 266 spin_unlock_irqrestore(&mapping->tree_lock, flags);
270 unlock_page_memcg(memcg); 267 unlock_page_memcg(page);
271 268
272 if (freepage) 269 if (freepage)
273 freepage(page); 270 freepage(page);
@@ -551,7 +548,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
551 if (!error) { 548 if (!error) {
552 struct address_space *mapping = old->mapping; 549 struct address_space *mapping = old->mapping;
553 void (*freepage)(struct page *); 550 void (*freepage)(struct page *);
554 struct mem_cgroup *memcg;
555 unsigned long flags; 551 unsigned long flags;
556 552
557 pgoff_t offset = old->index; 553 pgoff_t offset = old->index;
@@ -561,9 +557,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
561 new->mapping = mapping; 557 new->mapping = mapping;
562 new->index = offset; 558 new->index = offset;
563 559
564 memcg = lock_page_memcg(old); 560 lock_page_memcg(old);
565 spin_lock_irqsave(&mapping->tree_lock, flags); 561 spin_lock_irqsave(&mapping->tree_lock, flags);
566 __delete_from_page_cache(old, NULL, memcg); 562 __delete_from_page_cache(old, NULL);
567 error = radix_tree_insert(&mapping->page_tree, offset, new); 563 error = radix_tree_insert(&mapping->page_tree, offset, new);
568 BUG_ON(error); 564 BUG_ON(error);
569 mapping->nrpages++; 565 mapping->nrpages++;
@@ -576,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
576 if (PageSwapBacked(new)) 572 if (PageSwapBacked(new))
577 __inc_zone_page_state(new, NR_SHMEM); 573 __inc_zone_page_state(new, NR_SHMEM);
578 spin_unlock_irqrestore(&mapping->tree_lock, flags); 574 spin_unlock_irqrestore(&mapping->tree_lock, flags);
579 unlock_page_memcg(memcg); 575 unlock_page_memcg(old);
580 mem_cgroup_migrate(old, new); 576 mem_cgroup_migrate(old, new);
581 radix_tree_preload_end(); 577 radix_tree_preload_end();
582 if (freepage) 578 if (freepage)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 64506b2eef34..3e4199830456 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1690,7 +1690,7 @@ cleanup:
1690 * This function protects unlocked LRU pages from being moved to 1690 * This function protects unlocked LRU pages from being moved to
1691 * another cgroup and stabilizes their page->mem_cgroup binding. 1691 * another cgroup and stabilizes their page->mem_cgroup binding.
1692 */ 1692 */
1693struct mem_cgroup *lock_page_memcg(struct page *page) 1693void lock_page_memcg(struct page *page)
1694{ 1694{
1695 struct mem_cgroup *memcg; 1695 struct mem_cgroup *memcg;
1696 unsigned long flags; 1696 unsigned long flags;
@@ -1699,25 +1699,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
1699 * The RCU lock is held throughout the transaction. The fast 1699 * The RCU lock is held throughout the transaction. The fast
1700 * path can get away without acquiring the memcg->move_lock 1700 * path can get away without acquiring the memcg->move_lock
1701 * because page moving starts with an RCU grace period. 1701 * because page moving starts with an RCU grace period.
1702 *
1703 * The RCU lock also protects the memcg from being freed when
1704 * the page state that is going to change is the only thing
1705 * preventing the page from being uncharged.
1706 * E.g. end-writeback clearing PageWriteback(), which allows
1707 * migration to go ahead and uncharge the page before the
1708 * account transaction might be complete.
1709 */ 1702 */
1710 rcu_read_lock(); 1703 rcu_read_lock();
1711 1704
1712 if (mem_cgroup_disabled()) 1705 if (mem_cgroup_disabled())
1713 return NULL; 1706 return;
1714again: 1707again:
1715 memcg = page->mem_cgroup; 1708 memcg = page->mem_cgroup;
1716 if (unlikely(!memcg)) 1709 if (unlikely(!memcg))
1717 return NULL; 1710 return;
1718 1711
1719 if (atomic_read(&memcg->moving_account) <= 0) 1712 if (atomic_read(&memcg->moving_account) <= 0)
1720 return memcg; 1713 return;
1721 1714
1722 spin_lock_irqsave(&memcg->move_lock, flags); 1715 spin_lock_irqsave(&memcg->move_lock, flags);
1723 if (memcg != page->mem_cgroup) { 1716 if (memcg != page->mem_cgroup) {
@@ -1733,16 +1726,18 @@ again:
1733 memcg->move_lock_task = current; 1726 memcg->move_lock_task = current;
1734 memcg->move_lock_flags = flags; 1727 memcg->move_lock_flags = flags;
1735 1728
1736 return memcg; 1729 return;
1737} 1730}
1738EXPORT_SYMBOL(lock_page_memcg); 1731EXPORT_SYMBOL(lock_page_memcg);
1739 1732
1740/** 1733/**
1741 * unlock_page_memcg - unlock a page->mem_cgroup binding 1734 * unlock_page_memcg - unlock a page->mem_cgroup binding
1742 * @memcg: the memcg returned by lock_page_memcg() 1735 * @page: the page
1743 */ 1736 */
1744void unlock_page_memcg(struct mem_cgroup *memcg) 1737void unlock_page_memcg(struct page *page)
1745{ 1738{
1739 struct mem_cgroup *memcg = page->mem_cgroup;
1740
1746 if (memcg && memcg->move_lock_task == current) { 1741 if (memcg && memcg->move_lock_task == current) {
1747 unsigned long flags = memcg->move_lock_flags; 1742 unsigned long flags = memcg->move_lock_flags;
1748 1743
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2b5ea1271e32..d7cf2c53d125 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2414,8 +2414,7 @@ int __set_page_dirty_no_writeback(struct page *page)
2414 * 2414 *
2415 * NOTE: This relies on being atomic wrt interrupts. 2415 * NOTE: This relies on being atomic wrt interrupts.
2416 */ 2416 */
2417void account_page_dirtied(struct page *page, struct address_space *mapping, 2417void account_page_dirtied(struct page *page, struct address_space *mapping)
2418 struct mem_cgroup *memcg)
2419{ 2418{
2420 struct inode *inode = mapping->host; 2419 struct inode *inode = mapping->host;
2421 2420
@@ -2427,7 +2426,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping,
2427 inode_attach_wb(inode, page); 2426 inode_attach_wb(inode, page);
2428 wb = inode_to_wb(inode); 2427 wb = inode_to_wb(inode);
2429 2428
2430 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2429 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2431 __inc_zone_page_state(page, NR_FILE_DIRTY); 2430 __inc_zone_page_state(page, NR_FILE_DIRTY);
2432 __inc_zone_page_state(page, NR_DIRTIED); 2431 __inc_zone_page_state(page, NR_DIRTIED);
2433 __inc_wb_stat(wb, WB_RECLAIMABLE); 2432 __inc_wb_stat(wb, WB_RECLAIMABLE);
@@ -2445,10 +2444,10 @@ EXPORT_SYMBOL(account_page_dirtied);
2445 * Caller must hold lock_page_memcg(). 2444 * Caller must hold lock_page_memcg().
2446 */ 2445 */
2447void account_page_cleaned(struct page *page, struct address_space *mapping, 2446void account_page_cleaned(struct page *page, struct address_space *mapping,
2448 struct mem_cgroup *memcg, struct bdi_writeback *wb) 2447 struct bdi_writeback *wb)
2449{ 2448{
2450 if (mapping_cap_account_dirty(mapping)) { 2449 if (mapping_cap_account_dirty(mapping)) {
2451 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2452 dec_zone_page_state(page, NR_FILE_DIRTY); 2451 dec_zone_page_state(page, NR_FILE_DIRTY);
2453 dec_wb_stat(wb, WB_RECLAIMABLE); 2452 dec_wb_stat(wb, WB_RECLAIMABLE);
2454 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2453 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
@@ -2469,26 +2468,24 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2469 */ 2468 */
2470int __set_page_dirty_nobuffers(struct page *page) 2469int __set_page_dirty_nobuffers(struct page *page)
2471{ 2470{
2472 struct mem_cgroup *memcg; 2471 lock_page_memcg(page);
2473
2474 memcg = lock_page_memcg(page);
2475 if (!TestSetPageDirty(page)) { 2472 if (!TestSetPageDirty(page)) {
2476 struct address_space *mapping = page_mapping(page); 2473 struct address_space *mapping = page_mapping(page);
2477 unsigned long flags; 2474 unsigned long flags;
2478 2475
2479 if (!mapping) { 2476 if (!mapping) {
2480 unlock_page_memcg(memcg); 2477 unlock_page_memcg(page);
2481 return 1; 2478 return 1;
2482 } 2479 }
2483 2480
2484 spin_lock_irqsave(&mapping->tree_lock, flags); 2481 spin_lock_irqsave(&mapping->tree_lock, flags);
2485 BUG_ON(page_mapping(page) != mapping); 2482 BUG_ON(page_mapping(page) != mapping);
2486 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2483 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2487 account_page_dirtied(page, mapping, memcg); 2484 account_page_dirtied(page, mapping);
2488 radix_tree_tag_set(&mapping->page_tree, page_index(page), 2485 radix_tree_tag_set(&mapping->page_tree, page_index(page),
2489 PAGECACHE_TAG_DIRTY); 2486 PAGECACHE_TAG_DIRTY);
2490 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2487 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2491 unlock_page_memcg(memcg); 2488 unlock_page_memcg(page);
2492 2489
2493 if (mapping->host) { 2490 if (mapping->host) {
2494 /* !PageAnon && !swapper_space */ 2491 /* !PageAnon && !swapper_space */
@@ -2496,7 +2493,7 @@ int __set_page_dirty_nobuffers(struct page *page)
2496 } 2493 }
2497 return 1; 2494 return 1;
2498 } 2495 }
2499 unlock_page_memcg(memcg); 2496 unlock_page_memcg(page);
2500 return 0; 2497 return 0;
2501} 2498}
2502EXPORT_SYMBOL(__set_page_dirty_nobuffers); 2499EXPORT_SYMBOL(__set_page_dirty_nobuffers);
@@ -2626,17 +2623,16 @@ void cancel_dirty_page(struct page *page)
2626 if (mapping_cap_account_dirty(mapping)) { 2623 if (mapping_cap_account_dirty(mapping)) {
2627 struct inode *inode = mapping->host; 2624 struct inode *inode = mapping->host;
2628 struct bdi_writeback *wb; 2625 struct bdi_writeback *wb;
2629 struct mem_cgroup *memcg;
2630 bool locked; 2626 bool locked;
2631 2627
2632 memcg = lock_page_memcg(page); 2628 lock_page_memcg(page);
2633 wb = unlocked_inode_to_wb_begin(inode, &locked); 2629 wb = unlocked_inode_to_wb_begin(inode, &locked);
2634 2630
2635 if (TestClearPageDirty(page)) 2631 if (TestClearPageDirty(page))
2636 account_page_cleaned(page, mapping, memcg, wb); 2632 account_page_cleaned(page, mapping, wb);
2637 2633
2638 unlocked_inode_to_wb_end(inode, locked); 2634 unlocked_inode_to_wb_end(inode, locked);
2639 unlock_page_memcg(memcg); 2635 unlock_page_memcg(page);
2640 } else { 2636 } else {
2641 ClearPageDirty(page); 2637 ClearPageDirty(page);
2642 } 2638 }
@@ -2667,7 +2663,6 @@ int clear_page_dirty_for_io(struct page *page)
2667 if (mapping && mapping_cap_account_dirty(mapping)) { 2663 if (mapping && mapping_cap_account_dirty(mapping)) {
2668 struct inode *inode = mapping->host; 2664 struct inode *inode = mapping->host;
2669 struct bdi_writeback *wb; 2665 struct bdi_writeback *wb;
2670 struct mem_cgroup *memcg;
2671 bool locked; 2666 bool locked;
2672 2667
2673 /* 2668 /*
@@ -2705,16 +2700,16 @@ int clear_page_dirty_for_io(struct page *page)
2705 * always locked coming in here, so we get the desired 2700 * always locked coming in here, so we get the desired
2706 * exclusion. 2701 * exclusion.
2707 */ 2702 */
2708 memcg = lock_page_memcg(page); 2703 lock_page_memcg(page);
2709 wb = unlocked_inode_to_wb_begin(inode, &locked); 2704 wb = unlocked_inode_to_wb_begin(inode, &locked);
2710 if (TestClearPageDirty(page)) { 2705 if (TestClearPageDirty(page)) {
2711 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2706 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2712 dec_zone_page_state(page, NR_FILE_DIRTY); 2707 dec_zone_page_state(page, NR_FILE_DIRTY);
2713 dec_wb_stat(wb, WB_RECLAIMABLE); 2708 dec_wb_stat(wb, WB_RECLAIMABLE);
2714 ret = 1; 2709 ret = 1;
2715 } 2710 }
2716 unlocked_inode_to_wb_end(inode, locked); 2711 unlocked_inode_to_wb_end(inode, locked);
2717 unlock_page_memcg(memcg); 2712 unlock_page_memcg(page);
2718 return ret; 2713 return ret;
2719 } 2714 }
2720 return TestClearPageDirty(page); 2715 return TestClearPageDirty(page);
@@ -2724,10 +2719,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
2724int test_clear_page_writeback(struct page *page) 2719int test_clear_page_writeback(struct page *page)
2725{ 2720{
2726 struct address_space *mapping = page_mapping(page); 2721 struct address_space *mapping = page_mapping(page);
2727 struct mem_cgroup *memcg;
2728 int ret; 2722 int ret;
2729 2723
2730 memcg = lock_page_memcg(page); 2724 lock_page_memcg(page);
2731 if (mapping) { 2725 if (mapping) {
2732 struct inode *inode = mapping->host; 2726 struct inode *inode = mapping->host;
2733 struct backing_dev_info *bdi = inode_to_bdi(inode); 2727 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2751,21 +2745,20 @@ int test_clear_page_writeback(struct page *page)
2751 ret = TestClearPageWriteback(page); 2745 ret = TestClearPageWriteback(page);
2752 } 2746 }
2753 if (ret) { 2747 if (ret) {
2754 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 2748 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2755 dec_zone_page_state(page, NR_WRITEBACK); 2749 dec_zone_page_state(page, NR_WRITEBACK);
2756 inc_zone_page_state(page, NR_WRITTEN); 2750 inc_zone_page_state(page, NR_WRITTEN);
2757 } 2751 }
2758 unlock_page_memcg(memcg); 2752 unlock_page_memcg(page);
2759 return ret; 2753 return ret;
2760} 2754}
2761 2755
2762int __test_set_page_writeback(struct page *page, bool keep_write) 2756int __test_set_page_writeback(struct page *page, bool keep_write)
2763{ 2757{
2764 struct address_space *mapping = page_mapping(page); 2758 struct address_space *mapping = page_mapping(page);
2765 struct mem_cgroup *memcg;
2766 int ret; 2759 int ret;
2767 2760
2768 memcg = lock_page_memcg(page); 2761 lock_page_memcg(page);
2769 if (mapping) { 2762 if (mapping) {
2770 struct inode *inode = mapping->host; 2763 struct inode *inode = mapping->host;
2771 struct backing_dev_info *bdi = inode_to_bdi(inode); 2764 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2793,10 +2786,10 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2793 ret = TestSetPageWriteback(page); 2786 ret = TestSetPageWriteback(page);
2794 } 2787 }
2795 if (!ret) { 2788 if (!ret) {
2796 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 2789 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2797 inc_zone_page_state(page, NR_WRITEBACK); 2790 inc_zone_page_state(page, NR_WRITEBACK);
2798 } 2791 }
2799 unlock_page_memcg(memcg); 2792 unlock_page_memcg(page);
2800 return ret; 2793 return ret;
2801 2794
2802} 2795}
diff --git a/mm/rmap.c b/mm/rmap.c
index 2871e7d3cced..02f0bfc3c80a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1287,21 +1287,17 @@ void page_add_new_anon_rmap(struct page *page,
1287 */ 1287 */
1288void page_add_file_rmap(struct page *page) 1288void page_add_file_rmap(struct page *page)
1289{ 1289{
1290 struct mem_cgroup *memcg; 1290 lock_page_memcg(page);
1291
1292 memcg = lock_page_memcg(page);
1293 if (atomic_inc_and_test(&page->_mapcount)) { 1291 if (atomic_inc_and_test(&page->_mapcount)) {
1294 __inc_zone_page_state(page, NR_FILE_MAPPED); 1292 __inc_zone_page_state(page, NR_FILE_MAPPED);
1295 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 1293 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1296 } 1294 }
1297 unlock_page_memcg(memcg); 1295 unlock_page_memcg(page);
1298} 1296}
1299 1297
1300static void page_remove_file_rmap(struct page *page) 1298static void page_remove_file_rmap(struct page *page)
1301{ 1299{
1302 struct mem_cgroup *memcg; 1300 lock_page_memcg(page);
1303
1304 memcg = lock_page_memcg(page);
1305 1301
1306 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1302 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1307 if (unlikely(PageHuge(page))) { 1303 if (unlikely(PageHuge(page))) {
@@ -1320,12 +1316,12 @@ static void page_remove_file_rmap(struct page *page)
1320 * pte lock(a spinlock) is held, which implies preemption disabled. 1316 * pte lock(a spinlock) is held, which implies preemption disabled.
1321 */ 1317 */
1322 __dec_zone_page_state(page, NR_FILE_MAPPED); 1318 __dec_zone_page_state(page, NR_FILE_MAPPED);
1323 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 1319 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1324 1320
1325 if (unlikely(PageMlocked(page))) 1321 if (unlikely(PageMlocked(page)))
1326 clear_page_mlock(page); 1322 clear_page_mlock(page);
1327out: 1323out:
1328 unlock_page_memcg(memcg); 1324 unlock_page_memcg(page);
1329} 1325}
1330 1326
1331static void page_remove_anon_compound_rmap(struct page *page) 1327static void page_remove_anon_compound_rmap(struct page *page)
diff --git a/mm/truncate.c b/mm/truncate.c
index 51a24f6a555d..87311af936f2 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -519,7 +519,6 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
519static int 519static int
520invalidate_complete_page2(struct address_space *mapping, struct page *page) 520invalidate_complete_page2(struct address_space *mapping, struct page *page)
521{ 521{
522 struct mem_cgroup *memcg;
523 unsigned long flags; 522 unsigned long flags;
524 523
525 if (page->mapping != mapping) 524 if (page->mapping != mapping)
@@ -528,15 +527,15 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
528 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 527 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
529 return 0; 528 return 0;
530 529
531 memcg = lock_page_memcg(page); 530 lock_page_memcg(page);
532 spin_lock_irqsave(&mapping->tree_lock, flags); 531 spin_lock_irqsave(&mapping->tree_lock, flags);
533 if (PageDirty(page)) 532 if (PageDirty(page))
534 goto failed; 533 goto failed;
535 534
536 BUG_ON(page_has_private(page)); 535 BUG_ON(page_has_private(page));
537 __delete_from_page_cache(page, NULL, memcg); 536 __delete_from_page_cache(page, NULL);
538 spin_unlock_irqrestore(&mapping->tree_lock, flags); 537 spin_unlock_irqrestore(&mapping->tree_lock, flags);
539 unlock_page_memcg(memcg); 538 unlock_page_memcg(page);
540 539
541 if (mapping->a_ops->freepage) 540 if (mapping->a_ops->freepage)
542 mapping->a_ops->freepage(page); 541 mapping->a_ops->freepage(page);
@@ -545,7 +544,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
545 return 1; 544 return 1;
546failed: 545failed:
547 spin_unlock_irqrestore(&mapping->tree_lock, flags); 546 spin_unlock_irqrestore(&mapping->tree_lock, flags);
548 unlock_page_memcg(memcg); 547 unlock_page_memcg(page);
549 return 0; 548 return 0;
550} 549}
551 550
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fd434cc89bea..34f7e2dae0a0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -603,12 +603,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
603 bool reclaimed) 603 bool reclaimed)
604{ 604{
605 unsigned long flags; 605 unsigned long flags;
606 struct mem_cgroup *memcg;
607 606
608 BUG_ON(!PageLocked(page)); 607 BUG_ON(!PageLocked(page));
609 BUG_ON(mapping != page_mapping(page)); 608 BUG_ON(mapping != page_mapping(page));
610 609
611 memcg = lock_page_memcg(page); 610 lock_page_memcg(page);
612 spin_lock_irqsave(&mapping->tree_lock, flags); 611 spin_lock_irqsave(&mapping->tree_lock, flags);
613 /* 612 /*
614 * The non racy check for a busy page. 613 * The non racy check for a busy page.
@@ -648,7 +647,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
648 mem_cgroup_swapout(page, swap); 647 mem_cgroup_swapout(page, swap);
649 __delete_from_swap_cache(page); 648 __delete_from_swap_cache(page);
650 spin_unlock_irqrestore(&mapping->tree_lock, flags); 649 spin_unlock_irqrestore(&mapping->tree_lock, flags);
651 unlock_page_memcg(memcg); 650 unlock_page_memcg(page);
652 swapcache_free(swap); 651 swapcache_free(swap);
653 } else { 652 } else {
654 void (*freepage)(struct page *); 653 void (*freepage)(struct page *);
@@ -674,9 +673,9 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
674 if (reclaimed && page_is_file_cache(page) && 673 if (reclaimed && page_is_file_cache(page) &&
675 !mapping_exiting(mapping) && !dax_mapping(mapping)) 674 !mapping_exiting(mapping) && !dax_mapping(mapping))
676 shadow = workingset_eviction(mapping, page); 675 shadow = workingset_eviction(mapping, page);
677 __delete_from_page_cache(page, shadow, memcg); 676 __delete_from_page_cache(page, shadow);
678 spin_unlock_irqrestore(&mapping->tree_lock, flags); 677 spin_unlock_irqrestore(&mapping->tree_lock, flags);
679 unlock_page_memcg(memcg); 678 unlock_page_memcg(page);
680 679
681 if (freepage != NULL) 680 if (freepage != NULL)
682 freepage(page); 681 freepage(page);
@@ -686,7 +685,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
686 685
687cannot_free: 686cannot_free:
688 spin_unlock_irqrestore(&mapping->tree_lock, flags); 687 spin_unlock_irqrestore(&mapping->tree_lock, flags);
689 unlock_page_memcg(memcg); 688 unlock_page_memcg(page);
690 return 0; 689 return 0;
691} 690}
692 691
diff --git a/mm/workingset.c b/mm/workingset.c
index 14bc23a7779b..6130ba0b2641 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -305,10 +305,9 @@ bool workingset_refault(void *shadow)
305 */ 305 */
306void workingset_activation(struct page *page) 306void workingset_activation(struct page *page)
307{ 307{
308 struct mem_cgroup *memcg;
309 struct lruvec *lruvec; 308 struct lruvec *lruvec;
310 309
311 memcg = lock_page_memcg(page); 310 lock_page_memcg(page);
312 /* 311 /*
313 * Filter non-memcg pages here, e.g. unmap can call 312 * Filter non-memcg pages here, e.g. unmap can call
314 * mark_page_accessed() on VDSO pages. 313 * mark_page_accessed() on VDSO pages.
@@ -316,12 +315,12 @@ void workingset_activation(struct page *page)
316 * XXX: See workingset_refault() - this should return 315 * XXX: See workingset_refault() - this should return
317 * root_mem_cgroup even for !CONFIG_MEMCG. 316 * root_mem_cgroup even for !CONFIG_MEMCG.
318 */ 317 */
319 if (!mem_cgroup_disabled() && !memcg) 318 if (!mem_cgroup_disabled() && !page_memcg(page))
320 goto out; 319 goto out;
321 lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg); 320 lruvec = mem_cgroup_zone_lruvec(page_zone(page), page_memcg(page));
322 atomic_long_inc(&lruvec->inactive_age); 321 atomic_long_inc(&lruvec->inactive_age);
323out: 322out:
324 unlock_page_memcg(memcg); 323 unlock_page_memcg(page);
325} 324}
326 325
327/* 326/*