aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit81f8c3a461d16f0355ced3d56d6d1bb5923207a1 (patch)
tree5d821760ca548b4357221c0399b92b7154221c33 /mm/truncate.c
parent0db2cb8da89d991762ec2aece45e55ceaee34664 (diff)
mm: memcontrol: generalize locking for the page->mem_cgroup binding
These patches tag the page cache radix tree eviction entries with the memcg an evicted page belonged to, thus making per-cgroup LRU reclaim work properly and be as adaptive to new cache workingsets as global reclaim already is. This should have been part of the original thrash detection patch series, but was deferred due to the complexity of those patches. This patch (of 5): So far the only sites that needed to exclude charge migration to stabilize page->mem_cgroup have been per-cgroup page statistics, hence the name mem_cgroup_begin_page_stat(). But per-cgroup thrash detection will add another site that needs to ensure page->mem_cgroup lifetime. Rename these locking functions to the more generic lock_page_memcg() and unlock_page_memcg(). Since charge migration is a cgroup1 feature only, we might be able to delete it at some point, and these now easy to identify locking sites along with it. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index e3ee0e27cd17..51a24f6a555d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -528,7 +528,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
528 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 528 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
529 return 0; 529 return 0;
530 530
531 memcg = mem_cgroup_begin_page_stat(page); 531 memcg = lock_page_memcg(page);
532 spin_lock_irqsave(&mapping->tree_lock, flags); 532 spin_lock_irqsave(&mapping->tree_lock, flags);
533 if (PageDirty(page)) 533 if (PageDirty(page))
534 goto failed; 534 goto failed;
@@ -536,7 +536,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
536 BUG_ON(page_has_private(page)); 536 BUG_ON(page_has_private(page));
537 __delete_from_page_cache(page, NULL, memcg); 537 __delete_from_page_cache(page, NULL, memcg);
538 spin_unlock_irqrestore(&mapping->tree_lock, flags); 538 spin_unlock_irqrestore(&mapping->tree_lock, flags);
539 mem_cgroup_end_page_stat(memcg); 539 unlock_page_memcg(memcg);
540 540
541 if (mapping->a_ops->freepage) 541 if (mapping->a_ops->freepage)
542 mapping->a_ops->freepage(page); 542 mapping->a_ops->freepage(page);
@@ -545,7 +545,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
545 return 1; 545 return 1;
546failed: 546failed:
547 spin_unlock_irqrestore(&mapping->tree_lock, flags); 547 spin_unlock_irqrestore(&mapping->tree_lock, flags);
548 mem_cgroup_end_page_stat(memcg); 548 unlock_page_memcg(memcg);
549 return 0; 549 return 0;
550} 550}
551 551