aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit81f8c3a461d16f0355ced3d56d6d1bb5923207a1 (patch)
tree5d821760ca548b4357221c0399b92b7154221c33 /fs
parent0db2cb8da89d991762ec2aece45e55ceaee34664 (diff)
mm: memcontrol: generalize locking for the page->mem_cgroup binding
These patches tag the page cache radix tree eviction entries with the memcg an evicted page belonged to, thus making per-cgroup LRU reclaim work properly and be as adaptive to new cache workingsets as global reclaim already is. This should have been part of the original thrash detection patch series, but was deferred due to the complexity of those patches. This patch (of 5): So far the only sites that needed to exclude charge migration to stabilize page->mem_cgroup have been per-cgroup page statistics, hence the name mem_cgroup_begin_page_stat(). But per-cgroup thrash detection will add another site that needs to ensure page->mem_cgroup lifetime. Rename these locking functions to the more generic lock_page_memcg() and unlock_page_memcg(). Since charge migration is a cgroup1 feature only, we might be able to delete it at some point, and these now easy to identify locking sites along with it. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/xfs/xfs_aops.c8
2 files changed, 11 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index e1632abb4ca9..dc991510bb06 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -621,7 +621,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
621 * If warn is true, then emit a warning if the page is not uptodate and has 621 * If warn is true, then emit a warning if the page is not uptodate and has
622 * not been truncated. 622 * not been truncated.
623 * 623 *
624 * The caller must hold mem_cgroup_begin_page_stat() lock. 624 * The caller must hold lock_page_memcg().
625 */ 625 */
626static void __set_page_dirty(struct page *page, struct address_space *mapping, 626static void __set_page_dirty(struct page *page, struct address_space *mapping,
627 struct mem_cgroup *memcg, int warn) 627 struct mem_cgroup *memcg, int warn)
@@ -683,17 +683,17 @@ int __set_page_dirty_buffers(struct page *page)
683 } while (bh != head); 683 } while (bh != head);
684 } 684 }
685 /* 685 /*
686 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 686 * Lock out page->mem_cgroup migration to keep PageDirty
687 * per-memcg dirty page counters. 687 * synchronized with per-memcg dirty page counters.
688 */ 688 */
689 memcg = mem_cgroup_begin_page_stat(page); 689 memcg = lock_page_memcg(page);
690 newly_dirty = !TestSetPageDirty(page); 690 newly_dirty = !TestSetPageDirty(page);
691 spin_unlock(&mapping->private_lock); 691 spin_unlock(&mapping->private_lock);
692 692
693 if (newly_dirty) 693 if (newly_dirty)
694 __set_page_dirty(page, mapping, memcg, 1); 694 __set_page_dirty(page, mapping, memcg, 1);
695 695
696 mem_cgroup_end_page_stat(memcg); 696 unlock_page_memcg(memcg);
697 697
698 if (newly_dirty) 698 if (newly_dirty)
699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1169,13 +1169,13 @@ void mark_buffer_dirty(struct buffer_head *bh)
1169 struct address_space *mapping = NULL; 1169 struct address_space *mapping = NULL;
1170 struct mem_cgroup *memcg; 1170 struct mem_cgroup *memcg;
1171 1171
1172 memcg = mem_cgroup_begin_page_stat(page); 1172 memcg = lock_page_memcg(page);
1173 if (!TestSetPageDirty(page)) { 1173 if (!TestSetPageDirty(page)) {
1174 mapping = page_mapping(page); 1174 mapping = page_mapping(page);
1175 if (mapping) 1175 if (mapping)
1176 __set_page_dirty(page, mapping, memcg, 0); 1176 __set_page_dirty(page, mapping, memcg, 0);
1177 } 1177 }
1178 mem_cgroup_end_page_stat(memcg); 1178 unlock_page_memcg(memcg);
1179 if (mapping) 1179 if (mapping)
1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1181 } 1181 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a9ebabfe7587..5f85ebc52a98 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1978,10 +1978,10 @@ xfs_vm_set_page_dirty(
1978 } while (bh != head); 1978 } while (bh != head);
1979 } 1979 }
1980 /* 1980 /*
1981 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 1981 * Lock out page->mem_cgroup migration to keep PageDirty
1982 * per-memcg dirty page counters. 1982 * synchronized with per-memcg dirty page counters.
1983 */ 1983 */
1984 memcg = mem_cgroup_begin_page_stat(page); 1984 memcg = lock_page_memcg(page);
1985 newly_dirty = !TestSetPageDirty(page); 1985 newly_dirty = !TestSetPageDirty(page);
1986 spin_unlock(&mapping->private_lock); 1986 spin_unlock(&mapping->private_lock);
1987 1987
@@ -1998,7 +1998,7 @@ xfs_vm_set_page_dirty(
1998 } 1998 }
1999 spin_unlock_irqrestore(&mapping->tree_lock, flags); 1999 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2000 } 2000 }
2001 mem_cgroup_end_page_stat(memcg); 2001 unlock_page_memcg(memcg);
2002 if (newly_dirty) 2002 if (newly_dirty)
2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2004 return newly_dirty; 2004 return newly_dirty;