aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/xfs/xfs_aops.c8
2 files changed, 11 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index e1632abb4ca9..dc991510bb06 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -621,7 +621,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
621 * If warn is true, then emit a warning if the page is not uptodate and has 621 * If warn is true, then emit a warning if the page is not uptodate and has
622 * not been truncated. 622 * not been truncated.
623 * 623 *
624 * The caller must hold mem_cgroup_begin_page_stat() lock. 624 * The caller must hold lock_page_memcg().
625 */ 625 */
626static void __set_page_dirty(struct page *page, struct address_space *mapping, 626static void __set_page_dirty(struct page *page, struct address_space *mapping,
627 struct mem_cgroup *memcg, int warn) 627 struct mem_cgroup *memcg, int warn)
@@ -683,17 +683,17 @@ int __set_page_dirty_buffers(struct page *page)
683 } while (bh != head); 683 } while (bh != head);
684 } 684 }
685 /* 685 /*
686 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 686 * Lock out page->mem_cgroup migration to keep PageDirty
687 * per-memcg dirty page counters. 687 * synchronized with per-memcg dirty page counters.
688 */ 688 */
689 memcg = mem_cgroup_begin_page_stat(page); 689 memcg = lock_page_memcg(page);
690 newly_dirty = !TestSetPageDirty(page); 690 newly_dirty = !TestSetPageDirty(page);
691 spin_unlock(&mapping->private_lock); 691 spin_unlock(&mapping->private_lock);
692 692
693 if (newly_dirty) 693 if (newly_dirty)
694 __set_page_dirty(page, mapping, memcg, 1); 694 __set_page_dirty(page, mapping, memcg, 1);
695 695
696 mem_cgroup_end_page_stat(memcg); 696 unlock_page_memcg(memcg);
697 697
698 if (newly_dirty) 698 if (newly_dirty)
699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 699 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1169,13 +1169,13 @@ void mark_buffer_dirty(struct buffer_head *bh)
1169 struct address_space *mapping = NULL; 1169 struct address_space *mapping = NULL;
1170 struct mem_cgroup *memcg; 1170 struct mem_cgroup *memcg;
1171 1171
1172 memcg = mem_cgroup_begin_page_stat(page); 1172 memcg = lock_page_memcg(page);
1173 if (!TestSetPageDirty(page)) { 1173 if (!TestSetPageDirty(page)) {
1174 mapping = page_mapping(page); 1174 mapping = page_mapping(page);
1175 if (mapping) 1175 if (mapping)
1176 __set_page_dirty(page, mapping, memcg, 0); 1176 __set_page_dirty(page, mapping, memcg, 0);
1177 } 1177 }
1178 mem_cgroup_end_page_stat(memcg); 1178 unlock_page_memcg(memcg);
1179 if (mapping) 1179 if (mapping)
1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1180 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1181 } 1181 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a9ebabfe7587..5f85ebc52a98 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1978,10 +1978,10 @@ xfs_vm_set_page_dirty(
1978 } while (bh != head); 1978 } while (bh != head);
1979 } 1979 }
1980 /* 1980 /*
1981 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with 1981 * Lock out page->mem_cgroup migration to keep PageDirty
1982 * per-memcg dirty page counters. 1982 * synchronized with per-memcg dirty page counters.
1983 */ 1983 */
1984 memcg = mem_cgroup_begin_page_stat(page); 1984 memcg = lock_page_memcg(page);
1985 newly_dirty = !TestSetPageDirty(page); 1985 newly_dirty = !TestSetPageDirty(page);
1986 spin_unlock(&mapping->private_lock); 1986 spin_unlock(&mapping->private_lock);
1987 1987
@@ -1998,7 +1998,7 @@ xfs_vm_set_page_dirty(
1998 } 1998 }
1999 spin_unlock_irqrestore(&mapping->tree_lock, flags); 1999 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2000 } 2000 }
2001 mem_cgroup_end_page_stat(memcg); 2001 unlock_page_memcg(memcg);
2002 if (newly_dirty) 2002 if (newly_dirty)
2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2003 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2004 return newly_dirty; 2004 return newly_dirty;