aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch)
tree43a902faf461c65393a4efebf9ff9622017b92b1 /mm/page-writeback.c
parent6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff)
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore, it's safe to make lock_page_memcg() and the memcg stat functions take pages, and spare the callers from memcg objects. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c49
1 files changed, 21 insertions, 28 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2b5ea1271e32..d7cf2c53d125 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2414,8 +2414,7 @@ int __set_page_dirty_no_writeback(struct page *page)
2414 * 2414 *
2415 * NOTE: This relies on being atomic wrt interrupts. 2415 * NOTE: This relies on being atomic wrt interrupts.
2416 */ 2416 */
2417void account_page_dirtied(struct page *page, struct address_space *mapping, 2417void account_page_dirtied(struct page *page, struct address_space *mapping)
2418 struct mem_cgroup *memcg)
2419{ 2418{
2420 struct inode *inode = mapping->host; 2419 struct inode *inode = mapping->host;
2421 2420
@@ -2427,7 +2426,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping,
2427 inode_attach_wb(inode, page); 2426 inode_attach_wb(inode, page);
2428 wb = inode_to_wb(inode); 2427 wb = inode_to_wb(inode);
2429 2428
2430 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2429 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2431 __inc_zone_page_state(page, NR_FILE_DIRTY); 2430 __inc_zone_page_state(page, NR_FILE_DIRTY);
2432 __inc_zone_page_state(page, NR_DIRTIED); 2431 __inc_zone_page_state(page, NR_DIRTIED);
2433 __inc_wb_stat(wb, WB_RECLAIMABLE); 2432 __inc_wb_stat(wb, WB_RECLAIMABLE);
@@ -2445,10 +2444,10 @@ EXPORT_SYMBOL(account_page_dirtied);
2445 * Caller must hold lock_page_memcg(). 2444 * Caller must hold lock_page_memcg().
2446 */ 2445 */
2447void account_page_cleaned(struct page *page, struct address_space *mapping, 2446void account_page_cleaned(struct page *page, struct address_space *mapping,
2448 struct mem_cgroup *memcg, struct bdi_writeback *wb) 2447 struct bdi_writeback *wb)
2449{ 2448{
2450 if (mapping_cap_account_dirty(mapping)) { 2449 if (mapping_cap_account_dirty(mapping)) {
2451 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2450 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2452 dec_zone_page_state(page, NR_FILE_DIRTY); 2451 dec_zone_page_state(page, NR_FILE_DIRTY);
2453 dec_wb_stat(wb, WB_RECLAIMABLE); 2452 dec_wb_stat(wb, WB_RECLAIMABLE);
2454 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2453 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
@@ -2469,26 +2468,24 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2469 */ 2468 */
2470int __set_page_dirty_nobuffers(struct page *page) 2469int __set_page_dirty_nobuffers(struct page *page)
2471{ 2470{
2472 struct mem_cgroup *memcg; 2471 lock_page_memcg(page);
2473
2474 memcg = lock_page_memcg(page);
2475 if (!TestSetPageDirty(page)) { 2472 if (!TestSetPageDirty(page)) {
2476 struct address_space *mapping = page_mapping(page); 2473 struct address_space *mapping = page_mapping(page);
2477 unsigned long flags; 2474 unsigned long flags;
2478 2475
2479 if (!mapping) { 2476 if (!mapping) {
2480 unlock_page_memcg(memcg); 2477 unlock_page_memcg(page);
2481 return 1; 2478 return 1;
2482 } 2479 }
2483 2480
2484 spin_lock_irqsave(&mapping->tree_lock, flags); 2481 spin_lock_irqsave(&mapping->tree_lock, flags);
2485 BUG_ON(page_mapping(page) != mapping); 2482 BUG_ON(page_mapping(page) != mapping);
2486 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2483 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2487 account_page_dirtied(page, mapping, memcg); 2484 account_page_dirtied(page, mapping);
2488 radix_tree_tag_set(&mapping->page_tree, page_index(page), 2485 radix_tree_tag_set(&mapping->page_tree, page_index(page),
2489 PAGECACHE_TAG_DIRTY); 2486 PAGECACHE_TAG_DIRTY);
2490 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2487 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2491 unlock_page_memcg(memcg); 2488 unlock_page_memcg(page);
2492 2489
2493 if (mapping->host) { 2490 if (mapping->host) {
2494 /* !PageAnon && !swapper_space */ 2491 /* !PageAnon && !swapper_space */
@@ -2496,7 +2493,7 @@ int __set_page_dirty_nobuffers(struct page *page)
2496 } 2493 }
2497 return 1; 2494 return 1;
2498 } 2495 }
2499 unlock_page_memcg(memcg); 2496 unlock_page_memcg(page);
2500 return 0; 2497 return 0;
2501} 2498}
2502EXPORT_SYMBOL(__set_page_dirty_nobuffers); 2499EXPORT_SYMBOL(__set_page_dirty_nobuffers);
@@ -2626,17 +2623,16 @@ void cancel_dirty_page(struct page *page)
2626 if (mapping_cap_account_dirty(mapping)) { 2623 if (mapping_cap_account_dirty(mapping)) {
2627 struct inode *inode = mapping->host; 2624 struct inode *inode = mapping->host;
2628 struct bdi_writeback *wb; 2625 struct bdi_writeback *wb;
2629 struct mem_cgroup *memcg;
2630 bool locked; 2626 bool locked;
2631 2627
2632 memcg = lock_page_memcg(page); 2628 lock_page_memcg(page);
2633 wb = unlocked_inode_to_wb_begin(inode, &locked); 2629 wb = unlocked_inode_to_wb_begin(inode, &locked);
2634 2630
2635 if (TestClearPageDirty(page)) 2631 if (TestClearPageDirty(page))
2636 account_page_cleaned(page, mapping, memcg, wb); 2632 account_page_cleaned(page, mapping, wb);
2637 2633
2638 unlocked_inode_to_wb_end(inode, locked); 2634 unlocked_inode_to_wb_end(inode, locked);
2639 unlock_page_memcg(memcg); 2635 unlock_page_memcg(page);
2640 } else { 2636 } else {
2641 ClearPageDirty(page); 2637 ClearPageDirty(page);
2642 } 2638 }
@@ -2667,7 +2663,6 @@ int clear_page_dirty_for_io(struct page *page)
2667 if (mapping && mapping_cap_account_dirty(mapping)) { 2663 if (mapping && mapping_cap_account_dirty(mapping)) {
2668 struct inode *inode = mapping->host; 2664 struct inode *inode = mapping->host;
2669 struct bdi_writeback *wb; 2665 struct bdi_writeback *wb;
2670 struct mem_cgroup *memcg;
2671 bool locked; 2666 bool locked;
2672 2667
2673 /* 2668 /*
@@ -2705,16 +2700,16 @@ int clear_page_dirty_for_io(struct page *page)
2705 * always locked coming in here, so we get the desired 2700 * always locked coming in here, so we get the desired
2706 * exclusion. 2701 * exclusion.
2707 */ 2702 */
2708 memcg = lock_page_memcg(page); 2703 lock_page_memcg(page);
2709 wb = unlocked_inode_to_wb_begin(inode, &locked); 2704 wb = unlocked_inode_to_wb_begin(inode, &locked);
2710 if (TestClearPageDirty(page)) { 2705 if (TestClearPageDirty(page)) {
2711 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2706 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2712 dec_zone_page_state(page, NR_FILE_DIRTY); 2707 dec_zone_page_state(page, NR_FILE_DIRTY);
2713 dec_wb_stat(wb, WB_RECLAIMABLE); 2708 dec_wb_stat(wb, WB_RECLAIMABLE);
2714 ret = 1; 2709 ret = 1;
2715 } 2710 }
2716 unlocked_inode_to_wb_end(inode, locked); 2711 unlocked_inode_to_wb_end(inode, locked);
2717 unlock_page_memcg(memcg); 2712 unlock_page_memcg(page);
2718 return ret; 2713 return ret;
2719 } 2714 }
2720 return TestClearPageDirty(page); 2715 return TestClearPageDirty(page);
@@ -2724,10 +2719,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
2724int test_clear_page_writeback(struct page *page) 2719int test_clear_page_writeback(struct page *page)
2725{ 2720{
2726 struct address_space *mapping = page_mapping(page); 2721 struct address_space *mapping = page_mapping(page);
2727 struct mem_cgroup *memcg;
2728 int ret; 2722 int ret;
2729 2723
2730 memcg = lock_page_memcg(page); 2724 lock_page_memcg(page);
2731 if (mapping) { 2725 if (mapping) {
2732 struct inode *inode = mapping->host; 2726 struct inode *inode = mapping->host;
2733 struct backing_dev_info *bdi = inode_to_bdi(inode); 2727 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2751,21 +2745,20 @@ int test_clear_page_writeback(struct page *page)
2751 ret = TestClearPageWriteback(page); 2745 ret = TestClearPageWriteback(page);
2752 } 2746 }
2753 if (ret) { 2747 if (ret) {
2754 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 2748 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2755 dec_zone_page_state(page, NR_WRITEBACK); 2749 dec_zone_page_state(page, NR_WRITEBACK);
2756 inc_zone_page_state(page, NR_WRITTEN); 2750 inc_zone_page_state(page, NR_WRITTEN);
2757 } 2751 }
2758 unlock_page_memcg(memcg); 2752 unlock_page_memcg(page);
2759 return ret; 2753 return ret;
2760} 2754}
2761 2755
2762int __test_set_page_writeback(struct page *page, bool keep_write) 2756int __test_set_page_writeback(struct page *page, bool keep_write)
2763{ 2757{
2764 struct address_space *mapping = page_mapping(page); 2758 struct address_space *mapping = page_mapping(page);
2765 struct mem_cgroup *memcg;
2766 int ret; 2759 int ret;
2767 2760
2768 memcg = lock_page_memcg(page); 2761 lock_page_memcg(page);
2769 if (mapping) { 2762 if (mapping) {
2770 struct inode *inode = mapping->host; 2763 struct inode *inode = mapping->host;
2771 struct backing_dev_info *bdi = inode_to_bdi(inode); 2764 struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2793,10 +2786,10 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2793 ret = TestSetPageWriteback(page); 2786 ret = TestSetPageWriteback(page);
2794 } 2787 }
2795 if (!ret) { 2788 if (!ret) {
2796 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 2789 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2797 inc_zone_page_state(page, NR_WRITEBACK); 2790 inc_zone_page_state(page, NR_WRITEBACK);
2798 } 2791 }
2799 unlock_page_memcg(memcg); 2792 unlock_page_memcg(page);
2800 return ret; 2793 return ret;
2801 2794
2802} 2795}