diff options
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r-- | fs/xfs/xfs_aops.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index a9ebabfe7587..5f85ebc52a98 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -1978,10 +1978,10 @@ xfs_vm_set_page_dirty( | |||
1978 | } while (bh != head); | 1978 | } while (bh != head); |
1979 | } | 1979 | } |
1980 | /* | 1980 | /* |
1981 | * Use mem_group_begin_page_stat() to keep PageDirty synchronized with | 1981 | * Lock out page->mem_cgroup migration to keep PageDirty |
1982 | * per-memcg dirty page counters. | 1982 | * synchronized with per-memcg dirty page counters. |
1983 | */ | 1983 | */ |
1984 | memcg = mem_cgroup_begin_page_stat(page); | 1984 | memcg = lock_page_memcg(page); |
1985 | newly_dirty = !TestSetPageDirty(page); | 1985 | newly_dirty = !TestSetPageDirty(page); |
1986 | spin_unlock(&mapping->private_lock); | 1986 | spin_unlock(&mapping->private_lock); |
1987 | 1987 | ||
@@ -1998,7 +1998,7 @@ xfs_vm_set_page_dirty( | |||
1998 | } | 1998 | } |
1999 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 1999 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
2000 | } | 2000 | } |
2001 | mem_cgroup_end_page_stat(memcg); | 2001 | unlock_page_memcg(memcg); |
2002 | if (newly_dirty) | 2002 | if (newly_dirty) |
2003 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 2003 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
2004 | return newly_dirty; | 2004 | return newly_dirty; |