aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commitfdf1cdb91b6ab7a8a91df68c384f36b8a0909cab (patch)
tree828ad38946acad9990dcc733a10ebdeed0c1fbd5 /mm
parent62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (diff)
mm: remove unnecessary uses of lock_page_memcg()
There are several users that nest lock_page_memcg() inside lock_page() to prevent page->mem_cgroup from changing. But the page lock prevents pages from moving between cgroups, so that is unnecessary overhead. Remove lock_page_memcg() in contexts with locked contexts and fix the debug code in the page stat functions to be okay with the page lock. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/truncate.c3
-rw-r--r--mm/vmscan.c4
4 files changed, 1 insertions, 15 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8e629c4ef0c8..61b441b191ad 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -176,8 +176,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
176/* 176/*
177 * Delete a page from the page cache and free it. Caller has to make 177 * Delete a page from the page cache and free it. Caller has to make
178 * sure the page is locked and that nobody else uses it - or that usage 178 * sure the page is locked and that nobody else uses it - or that usage
179 * is safe. The caller must hold the mapping's tree_lock and 179 * is safe. The caller must hold the mapping's tree_lock.
180 * lock_page_memcg().
181 */ 180 */
182void __delete_from_page_cache(struct page *page, void *shadow) 181void __delete_from_page_cache(struct page *page, void *shadow)
183{ 182{
@@ -260,11 +259,9 @@ void delete_from_page_cache(struct page *page)
260 259
261 freepage = mapping->a_ops->freepage; 260 freepage = mapping->a_ops->freepage;
262 261
263 lock_page_memcg(page);
264 spin_lock_irqsave(&mapping->tree_lock, flags); 262 spin_lock_irqsave(&mapping->tree_lock, flags);
265 __delete_from_page_cache(page, NULL); 263 __delete_from_page_cache(page, NULL);
266 spin_unlock_irqrestore(&mapping->tree_lock, flags); 264 spin_unlock_irqrestore(&mapping->tree_lock, flags);
267 unlock_page_memcg(page);
268 265
269 if (freepage) 266 if (freepage)
270 freepage(page); 267 freepage(page);
@@ -557,7 +554,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
557 new->mapping = mapping; 554 new->mapping = mapping;
558 new->index = offset; 555 new->index = offset;
559 556
560 lock_page_memcg(old);
561 spin_lock_irqsave(&mapping->tree_lock, flags); 557 spin_lock_irqsave(&mapping->tree_lock, flags);
562 __delete_from_page_cache(old, NULL); 558 __delete_from_page_cache(old, NULL);
563 error = radix_tree_insert(&mapping->page_tree, offset, new); 559 error = radix_tree_insert(&mapping->page_tree, offset, new);
@@ -572,7 +568,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
572 if (PageSwapBacked(new)) 568 if (PageSwapBacked(new))
573 __inc_zone_page_state(new, NR_SHMEM); 569 __inc_zone_page_state(new, NR_SHMEM);
574 spin_unlock_irqrestore(&mapping->tree_lock, flags); 570 spin_unlock_irqrestore(&mapping->tree_lock, flags);
575 unlock_page_memcg(old);
576 mem_cgroup_migrate(old, new); 571 mem_cgroup_migrate(old, new);
577 radix_tree_preload_end(); 572 radix_tree_preload_end();
578 if (freepage) 573 if (freepage)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d7cf2c53d125..11ff8f758631 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2700,7 +2700,6 @@ int clear_page_dirty_for_io(struct page *page)
2700 * always locked coming in here, so we get the desired 2700 * always locked coming in here, so we get the desired
2701 * exclusion. 2701 * exclusion.
2702 */ 2702 */
2703 lock_page_memcg(page);
2704 wb = unlocked_inode_to_wb_begin(inode, &locked); 2703 wb = unlocked_inode_to_wb_begin(inode, &locked);
2705 if (TestClearPageDirty(page)) { 2704 if (TestClearPageDirty(page)) {
2706 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2705 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
@@ -2709,7 +2708,6 @@ int clear_page_dirty_for_io(struct page *page)
2709 ret = 1; 2708 ret = 1;
2710 } 2709 }
2711 unlocked_inode_to_wb_end(inode, locked); 2710 unlocked_inode_to_wb_end(inode, locked);
2712 unlock_page_memcg(page);
2713 return ret; 2711 return ret;
2714 } 2712 }
2715 return TestClearPageDirty(page); 2713 return TestClearPageDirty(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index 87311af936f2..7598b552ae03 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -527,7 +527,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
527 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 527 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
528 return 0; 528 return 0;
529 529
530 lock_page_memcg(page);
531 spin_lock_irqsave(&mapping->tree_lock, flags); 530 spin_lock_irqsave(&mapping->tree_lock, flags);
532 if (PageDirty(page)) 531 if (PageDirty(page))
533 goto failed; 532 goto failed;
@@ -535,7 +534,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
535 BUG_ON(page_has_private(page)); 534 BUG_ON(page_has_private(page));
536 __delete_from_page_cache(page, NULL); 535 __delete_from_page_cache(page, NULL);
537 spin_unlock_irqrestore(&mapping->tree_lock, flags); 536 spin_unlock_irqrestore(&mapping->tree_lock, flags);
538 unlock_page_memcg(page);
539 537
540 if (mapping->a_ops->freepage) 538 if (mapping->a_ops->freepage)
541 mapping->a_ops->freepage(page); 539 mapping->a_ops->freepage(page);
@@ -544,7 +542,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
544 return 1; 542 return 1;
545failed: 543failed:
546 spin_unlock_irqrestore(&mapping->tree_lock, flags); 544 spin_unlock_irqrestore(&mapping->tree_lock, flags);
547 unlock_page_memcg(page);
548 return 0; 545 return 0;
549} 546}
550 547
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 34f7e2dae0a0..dd984470248f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -607,7 +607,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
607 BUG_ON(!PageLocked(page)); 607 BUG_ON(!PageLocked(page));
608 BUG_ON(mapping != page_mapping(page)); 608 BUG_ON(mapping != page_mapping(page));
609 609
610 lock_page_memcg(page);
611 spin_lock_irqsave(&mapping->tree_lock, flags); 610 spin_lock_irqsave(&mapping->tree_lock, flags);
612 /* 611 /*
613 * The non racy check for a busy page. 612 * The non racy check for a busy page.
@@ -647,7 +646,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
647 mem_cgroup_swapout(page, swap); 646 mem_cgroup_swapout(page, swap);
648 __delete_from_swap_cache(page); 647 __delete_from_swap_cache(page);
649 spin_unlock_irqrestore(&mapping->tree_lock, flags); 648 spin_unlock_irqrestore(&mapping->tree_lock, flags);
650 unlock_page_memcg(page);
651 swapcache_free(swap); 649 swapcache_free(swap);
652 } else { 650 } else {
653 void (*freepage)(struct page *); 651 void (*freepage)(struct page *);
@@ -675,7 +673,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
675 shadow = workingset_eviction(mapping, page); 673 shadow = workingset_eviction(mapping, page);
676 __delete_from_page_cache(page, shadow); 674 __delete_from_page_cache(page, shadow);
677 spin_unlock_irqrestore(&mapping->tree_lock, flags); 675 spin_unlock_irqrestore(&mapping->tree_lock, flags);
678 unlock_page_memcg(page);
679 676
680 if (freepage != NULL) 677 if (freepage != NULL)
681 freepage(page); 678 freepage(page);
@@ -685,7 +682,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
685 682
686cannot_free: 683cannot_free:
687 spin_unlock_irqrestore(&mapping->tree_lock, flags); 684 spin_unlock_irqrestore(&mapping->tree_lock, flags);
688 unlock_page_memcg(page);
689 return 0; 685 return 0;
690} 686}
691 687