aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-10-10 04:43:11 -0400
committerChris Mason <clm@fb.com>2014-11-20 20:14:28 -0500
commit728404dacfddb5364d7256d821a2ea482159cbe7 (patch)
treeadb45931cbe6104ad888f4cc2d43aa0951f8b8d0
parent075bdbdbe9f21d68950ba5b187f80a4a23105365 (diff)
Btrfs: add helper btrfs_fdatawrite_range
To avoid duplicating this double filemap_fdatawrite_range() call for inodes with async extents (compressed writes) so often. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/file.c39
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/btrfs/ordered-data.c24
4 files changed, 34 insertions, 39 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fe69edda11fb..b72b35867a7f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3901,6 +3901,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
3901 struct page **pages, size_t num_pages, 3901 struct page **pages, size_t num_pages,
3902 loff_t pos, size_t write_bytes, 3902 loff_t pos, size_t write_bytes,
3903 struct extent_state **cached); 3903 struct extent_state **cached);
3904int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
3904 3905
3905/* tree-defrag.c */ 3906/* tree-defrag.c */
3906int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3907int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f5a868ab60f3..0fbf0e7bc606 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1676 loff_t pos) 1676 loff_t pos)
1677{ 1677{
1678 struct file *file = iocb->ki_filp; 1678 struct file *file = iocb->ki_filp;
1679 struct inode *inode = file_inode(file);
1679 ssize_t written; 1680 ssize_t written;
1680 ssize_t written_buffered; 1681 ssize_t written_buffered;
1681 loff_t endbyte; 1682 loff_t endbyte;
@@ -1697,13 +1698,10 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1697 * able to read what was just written. 1698 * able to read what was just written.
1698 */ 1699 */
1699 endbyte = pos + written_buffered - 1; 1700 endbyte = pos + written_buffered - 1;
1700 err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte); 1701 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1701 if (!err && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1702 &BTRFS_I(file_inode(file))->runtime_flags))
1703 err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
1704 if (err) 1702 if (err)
1705 goto out; 1703 goto out;
1706 err = filemap_fdatawait_range(file->f_mapping, pos, endbyte); 1704 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1707 if (err) 1705 if (err)
1708 goto out; 1706 goto out;
1709 written += written_buffered; 1707 written += written_buffered;
@@ -1864,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1864 int ret; 1862 int ret;
1865 1863
1866 atomic_inc(&BTRFS_I(inode)->sync_writers); 1864 atomic_inc(&BTRFS_I(inode)->sync_writers);
1867 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 1865 ret = btrfs_fdatawrite_range(inode, start, end);
1868 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1869 &BTRFS_I(inode)->runtime_flags))
1870 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1871 atomic_dec(&BTRFS_I(inode)->sync_writers); 1866 atomic_dec(&BTRFS_I(inode)->sync_writers);
1872 1867
1873 return ret; 1868 return ret;
@@ -2820,3 +2815,29 @@ int btrfs_auto_defrag_init(void)
2820 2815
2821 return 0; 2816 return 0;
2822} 2817}
2818
2819int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2820{
2821 int ret;
2822
2823 /*
2824 * So with compression we will find and lock a dirty page and clear the
2825 * first one as dirty, setup an async extent, and immediately return
2826 * with the entire range locked but with nobody actually marked with
2827 * writeback. So we can't just filemap_write_and_wait_range() and
2828 * expect it to work since it will just kick off a thread to do the
2829 * actual work. So we need to call filemap_fdatawrite_range _again_
2830 * since it will wait on the page lock, which won't be unlocked until
2831 * after the pages have been marked as writeback and so we're good to go
2832 * from there. We have to do this otherwise we'll miss the ordered
2833 * extents and that results in badness. Please Josef, do not think you
2834 * know better and pull this out at some point in the future, it is
2835 * right and you are wrong.
2836 */
2837 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2838 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
2839 &BTRFS_I(inode)->runtime_flags))
2840 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2841
2842 return ret;
2843}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 01d223e22bb1..95f06936bc6e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7015,14 +7015,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7015 btrfs_put_ordered_extent(ordered); 7015 btrfs_put_ordered_extent(ordered);
7016 } else { 7016 } else {
7017 /* Screw you mmap */ 7017 /* Screw you mmap */
7018 ret = filemap_fdatawrite_range(inode->i_mapping, 7018 ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7019 lockstart,
7020 lockend);
7021 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7022 &BTRFS_I(inode)->runtime_flags))
7023 ret = filemap_fdatawrite_range(inode->i_mapping,
7024 lockstart,
7025 lockend);
7026 if (ret) 7019 if (ret)
7027 break; 7020 break;
7028 ret = filemap_fdatawait_range(inode->i_mapping, 7021 ret = filemap_fdatawait_range(inode->i_mapping,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index ac734ec4cc20..1401b1af4f06 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -725,30 +725,10 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
725 /* start IO across the range first to instantiate any delalloc 725 /* start IO across the range first to instantiate any delalloc
726 * extents 726 * extents
727 */ 727 */
728 ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 728 ret = btrfs_fdatawrite_range(inode, start, orig_end);
729 if (ret) 729 if (ret)
730 return ret; 730 return ret;
731 /* 731
732 * So with compression we will find and lock a dirty page and clear the
733 * first one as dirty, setup an async extent, and immediately return
734 * with the entire range locked but with nobody actually marked with
735 * writeback. So we can't just filemap_write_and_wait_range() and
736 * expect it to work since it will just kick off a thread to do the
737 * actual work. So we need to call filemap_fdatawrite_range _again_
738 * since it will wait on the page lock, which won't be unlocked until
739 * after the pages have been marked as writeback and so we're good to go
740 * from there. We have to do this otherwise we'll miss the ordered
741 * extents and that results in badness. Please Josef, do not think you
742 * know better and pull this out at some point in the future, it is
743 * right and you are wrong.
744 */
745 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
746 &BTRFS_I(inode)->runtime_flags)) {
747 ret = filemap_fdatawrite_range(inode->i_mapping, start,
748 orig_end);
749 if (ret)
750 return ret;
751 }
752 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 732 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
753 if (ret) 733 if (ret)
754 return ret; 734 return ret;