aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch)
tree43a902faf461c65393a4efebf9ff9622017b92b1 /fs
parent6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff)
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore, it's safe to make lock_page_memcg() and the memcg stat functions take pages, and spare the callers from memcg objects. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c18
-rw-r--r--fs/xfs/xfs_aops.c7
2 files changed, 11 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index dc991510bb06..33be29675358 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -624,14 +624,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
624 * The caller must hold lock_page_memcg(). 624 * The caller must hold lock_page_memcg().
625 */ 625 */
626static void __set_page_dirty(struct page *page, struct address_space *mapping, 626static void __set_page_dirty(struct page *page, struct address_space *mapping,
627 struct mem_cgroup *memcg, int warn) 627 int warn)
628{ 628{
629 unsigned long flags; 629 unsigned long flags;
630 630
631 spin_lock_irqsave(&mapping->tree_lock, flags); 631 spin_lock_irqsave(&mapping->tree_lock, flags);
632 if (page->mapping) { /* Race with truncate? */ 632 if (page->mapping) { /* Race with truncate? */
633 WARN_ON_ONCE(warn && !PageUptodate(page)); 633 WARN_ON_ONCE(warn && !PageUptodate(page));
634 account_page_dirtied(page, mapping, memcg); 634 account_page_dirtied(page, mapping);
635 radix_tree_tag_set(&mapping->page_tree, 635 radix_tree_tag_set(&mapping->page_tree,
636 page_index(page), PAGECACHE_TAG_DIRTY); 636 page_index(page), PAGECACHE_TAG_DIRTY);
637 } 637 }
@@ -666,7 +666,6 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
666int __set_page_dirty_buffers(struct page *page) 666int __set_page_dirty_buffers(struct page *page)
667{ 667{
668 int newly_dirty; 668 int newly_dirty;
669 struct mem_cgroup *memcg;
670 struct address_space *mapping = page_mapping(page); 669 struct address_space *mapping = page_mapping(page);
671 670
672 if (unlikely(!mapping)) 671 if (unlikely(!mapping))
@@ -686,14 +685,14 @@ int __set_page_dirty_buffers(struct page *page)
686 * Lock out page->mem_cgroup migration to keep PageDirty 685 * Lock out page->mem_cgroup migration to keep PageDirty
687 * synchronized with per-memcg dirty page counters. 686 * synchronized with per-memcg dirty page counters.
688 */ 687 */
689 memcg = lock_page_memcg(page); 688 lock_page_memcg(page);
690 newly_dirty = !TestSetPageDirty(page); 689 newly_dirty = !TestSetPageDirty(page);
691 spin_unlock(&mapping->private_lock); 690 spin_unlock(&mapping->private_lock);
692 691
693 if (newly_dirty) 692 if (newly_dirty)
694 __set_page_dirty(page, mapping, memcg, 1); 693 __set_page_dirty(page, mapping, 1);
695 694
696 unlock_page_memcg(memcg); 695 unlock_page_memcg(page);
697 696
698 if (newly_dirty) 697 if (newly_dirty)
699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 698 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1167,15 +1166,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
1167 if (!test_set_buffer_dirty(bh)) { 1166 if (!test_set_buffer_dirty(bh)) {
1168 struct page *page = bh->b_page; 1167 struct page *page = bh->b_page;
1169 struct address_space *mapping = NULL; 1168 struct address_space *mapping = NULL;
1170 struct mem_cgroup *memcg;
1171 1169
1172 memcg = lock_page_memcg(page); 1170 lock_page_memcg(page);
1173 if (!TestSetPageDirty(page)) { 1171 if (!TestSetPageDirty(page)) {
1174 mapping = page_mapping(page); 1172 mapping = page_mapping(page);
1175 if (mapping) 1173 if (mapping)
1176 __set_page_dirty(page, mapping, memcg, 0); 1174 __set_page_dirty(page, mapping, 0);
1177 } 1175 }
1178 unlock_page_memcg(memcg); 1176 unlock_page_memcg(page);
1179 if (mapping) 1177 if (mapping)
1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1178 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1181 } 1179 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 5f85ebc52a98..5c57b7b40728 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1957,7 +1957,6 @@ xfs_vm_set_page_dirty(
1957 loff_t end_offset; 1957 loff_t end_offset;
1958 loff_t offset; 1958 loff_t offset;
1959 int newly_dirty; 1959 int newly_dirty;
1960 struct mem_cgroup *memcg;
1961 1960
1962 if (unlikely(!mapping)) 1961 if (unlikely(!mapping))
1963 return !TestSetPageDirty(page); 1962 return !TestSetPageDirty(page);
@@ -1981,7 +1980,7 @@ xfs_vm_set_page_dirty(
1981 * Lock out page->mem_cgroup migration to keep PageDirty 1980 * Lock out page->mem_cgroup migration to keep PageDirty
1982 * synchronized with per-memcg dirty page counters. 1981 * synchronized with per-memcg dirty page counters.
1983 */ 1982 */
1984 memcg = lock_page_memcg(page); 1983 lock_page_memcg(page);
1985 newly_dirty = !TestSetPageDirty(page); 1984 newly_dirty = !TestSetPageDirty(page);
1986 spin_unlock(&mapping->private_lock); 1985 spin_unlock(&mapping->private_lock);
1987 1986
@@ -1992,13 +1991,13 @@ xfs_vm_set_page_dirty(
1992 spin_lock_irqsave(&mapping->tree_lock, flags); 1991 spin_lock_irqsave(&mapping->tree_lock, flags);
1993 if (page->mapping) { /* Race with truncate? */ 1992 if (page->mapping) { /* Race with truncate? */
1994 WARN_ON_ONCE(!PageUptodate(page)); 1993 WARN_ON_ONCE(!PageUptodate(page));
1995 account_page_dirtied(page, mapping, memcg); 1994 account_page_dirtied(page, mapping);
1996 radix_tree_tag_set(&mapping->page_tree, 1995 radix_tree_tag_set(&mapping->page_tree,
1997 page_index(page), PAGECACHE_TAG_DIRTY); 1996 page_index(page), PAGECACHE_TAG_DIRTY);
1998 } 1997 }
1999 spin_unlock_irqrestore(&mapping->tree_lock, flags); 1998 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2000 } 1999 }
2001 unlock_page_memcg(memcg); 2000 unlock_page_memcg(page);
2002 if (newly_dirty) 2001 if (newly_dirty)
2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2002 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2004 return newly_dirty; 2003 return newly_dirty;