diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2018-04-11 01:39:01 -0400 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2018-04-11 01:39:01 -0400 |
commit | fbbb4509048cf4e41a1254978859b588e0c86eab (patch) | |
tree | 6062070516fbb8cbcbca0b5117642957ec24a181 /fs | |
parent | 4919d42ab69a4e4601f3cd20a9540f3835e0dd48 (diff) |
Export __set_page_dirty
XFS currently contains a copy-and-paste of __set_page_dirty(). Export
it from buffer.c instead.
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_aops.c | 15 |
2 files changed, 4 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 9a73924db22f..0b487cdb7124 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -594,7 +594,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); | |||
594 | * | 594 | * |
595 | * The caller must hold lock_page_memcg(). | 595 | * The caller must hold lock_page_memcg(). |
596 | */ | 596 | */ |
597 | static void __set_page_dirty(struct page *page, struct address_space *mapping, | 597 | void __set_page_dirty(struct page *page, struct address_space *mapping, |
598 | int warn) | 598 | int warn) |
599 | { | 599 | { |
600 | unsigned long flags; | 600 | unsigned long flags; |
@@ -608,6 +608,7 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping, | |||
608 | } | 608 | } |
609 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 609 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
610 | } | 610 | } |
611 | EXPORT_SYMBOL_GPL(__set_page_dirty); | ||
611 | 612 | ||
612 | /* | 613 | /* |
613 | * Add a page to the dirty page list. | 614 | * Add a page to the dirty page list. |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 19eadc807056..c67683ebfe68 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -1472,19 +1472,8 @@ xfs_vm_set_page_dirty( | |||
1472 | newly_dirty = !TestSetPageDirty(page); | 1472 | newly_dirty = !TestSetPageDirty(page); |
1473 | spin_unlock(&mapping->private_lock); | 1473 | spin_unlock(&mapping->private_lock); |
1474 | 1474 | ||
1475 | if (newly_dirty) { | 1475 | if (newly_dirty) |
1476 | /* sigh - __set_page_dirty() is static, so copy it here, too */ | 1476 | __set_page_dirty(page, mapping, 1); |
1477 | unsigned long flags; | ||
1478 | |||
1479 | spin_lock_irqsave(&mapping->tree_lock, flags); | ||
1480 | if (page->mapping) { /* Race with truncate? */ | ||
1481 | WARN_ON_ONCE(!PageUptodate(page)); | ||
1482 | account_page_dirtied(page, mapping); | ||
1483 | radix_tree_tag_set(&mapping->page_tree, | ||
1484 | page_index(page), PAGECACHE_TAG_DIRTY); | ||
1485 | } | ||
1486 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | ||
1487 | } | ||
1488 | unlock_page_memcg(page); | 1477 | unlock_page_memcg(page); |
1489 | if (newly_dirty) | 1478 | if (newly_dirty) |
1490 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 1479 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |