diff options
-rw-r--r-- | fs/btrfs/disk-io.c | 12 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 15 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 5 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 4 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 177 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.c | 129 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.h | 13 |
7 files changed, 164 insertions, 191 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a7ffc88a7dbe..19f5b450f405 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3671,17 +3671,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
3671 | return 0; | 3671 | return 0; |
3672 | } | 3672 | } |
3673 | 3673 | ||
3674 | static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page, | ||
3675 | u64 start, u64 end, | ||
3676 | struct extent_state *state) | ||
3677 | { | ||
3678 | struct super_block *sb = page->mapping->host->i_sb; | ||
3679 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | ||
3680 | btrfs_error(fs_info, -EIO, | ||
3681 | "Error occured while writing out btree at %llu", start); | ||
3682 | return -EIO; | ||
3683 | } | ||
3684 | |||
3685 | static struct extent_io_ops btree_extent_io_ops = { | 3674 | static struct extent_io_ops btree_extent_io_ops = { |
3686 | .write_cache_pages_lock_hook = btree_lock_page_hook, | 3675 | .write_cache_pages_lock_hook = btree_lock_page_hook, |
3687 | .readpage_end_io_hook = btree_readpage_end_io_hook, | 3676 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
@@ -3689,5 +3678,4 @@ static struct extent_io_ops btree_extent_io_ops = { | |||
3689 | .submit_bio_hook = btree_submit_bio_hook, | 3678 | .submit_bio_hook = btree_submit_bio_hook, |
3690 | /* note we're sharing with inode.c for the merge bio hook */ | 3679 | /* note we're sharing with inode.c for the merge bio hook */ |
3691 | .merge_bio_hook = btrfs_merge_bio_hook, | 3680 | .merge_bio_hook = btrfs_merge_bio_hook, |
3692 | .writepage_io_failed_hook = btree_writepage_io_failed_hook, | ||
3693 | }; | 3681 | }; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 836fc37a437a..7af93435cee0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1172,9 +1172,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | |||
1172 | cached_state, mask); | 1172 | cached_state, mask); |
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 1175 | int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
1176 | u64 end, struct extent_state **cached_state, | 1176 | struct extent_state **cached_state, gfp_t mask) |
1177 | gfp_t mask) | ||
1178 | { | 1177 | { |
1179 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, | 1178 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, |
1180 | cached_state, mask); | 1179 | cached_state, mask); |
@@ -2221,17 +2220,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) | |||
2221 | uptodate = 0; | 2220 | uptodate = 0; |
2222 | } | 2221 | } |
2223 | 2222 | ||
2224 | if (!uptodate && tree->ops && | ||
2225 | tree->ops->writepage_io_failed_hook) { | ||
2226 | ret = tree->ops->writepage_io_failed_hook(NULL, page, | ||
2227 | start, end, NULL); | ||
2228 | /* Writeback already completed */ | ||
2229 | if (ret == 0) | ||
2230 | return 1; | ||
2231 | } | ||
2232 | |||
2233 | if (!uptodate) { | 2223 | if (!uptodate) { |
2234 | clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); | ||
2235 | ClearPageUptodate(page); | 2224 | ClearPageUptodate(page); |
2236 | SetPageError(page); | 2225 | SetPageError(page); |
2237 | } | 2226 | } |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index b516c3b8dec6..4d8124b64577 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -75,9 +75,6 @@ struct extent_io_ops { | |||
75 | unsigned long bio_flags); | 75 | unsigned long bio_flags); |
76 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); | 76 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); |
77 | int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); | 77 | int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); |
78 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, | ||
79 | u64 start, u64 end, | ||
80 | struct extent_state *state); | ||
81 | int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, | 78 | int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, |
82 | struct extent_state *state, int mirror); | 79 | struct extent_state *state, int mirror); |
83 | int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, | 80 | int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, |
@@ -225,6 +222,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
225 | struct extent_state **cached_state, gfp_t mask); | 222 | struct extent_state **cached_state, gfp_t mask); |
226 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 223 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
227 | struct extent_state **cached_state, gfp_t mask); | 224 | struct extent_state **cached_state, gfp_t mask); |
225 | int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | ||
226 | struct extent_state **cached_state, gfp_t mask); | ||
228 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | 227 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, |
229 | gfp_t mask); | 228 | gfp_t mask); |
230 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 229 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 202008ec367d..cecf8df62481 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -972,9 +972,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
972 | goto out; | 972 | goto out; |
973 | 973 | ||
974 | 974 | ||
975 | ret = filemap_write_and_wait(inode->i_mapping); | 975 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
976 | if (ret) | ||
977 | goto out; | ||
978 | 976 | ||
979 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 977 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
980 | key.offset = offset; | 978 | key.offset = offset; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 41a62e6954c2..9a1b96fd672a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -89,7 +89,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |||
89 | 89 | ||
90 | static int btrfs_setsize(struct inode *inode, loff_t newsize); | 90 | static int btrfs_setsize(struct inode *inode, loff_t newsize); |
91 | static int btrfs_truncate(struct inode *inode); | 91 | static int btrfs_truncate(struct inode *inode); |
92 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); | 92 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); |
93 | static noinline int cow_file_range(struct inode *inode, | 93 | static noinline int cow_file_range(struct inode *inode, |
94 | struct page *locked_page, | 94 | struct page *locked_page, |
95 | u64 start, u64 end, int *page_started, | 95 | u64 start, u64 end, int *page_started, |
@@ -1572,11 +1572,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1572 | if (btrfs_is_free_space_inode(root, inode)) | 1572 | if (btrfs_is_free_space_inode(root, inode)) |
1573 | metadata = 2; | 1573 | metadata = 2; |
1574 | 1574 | ||
1575 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); | ||
1576 | if (ret) | ||
1577 | return ret; | ||
1578 | |||
1579 | if (!(rw & REQ_WRITE)) { | 1575 | if (!(rw & REQ_WRITE)) { |
1576 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); | ||
1577 | if (ret) | ||
1578 | return ret; | ||
1579 | |||
1580 | if (bio_flags & EXTENT_BIO_COMPRESSED) { | 1580 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
1581 | return btrfs_submit_compressed_read(inode, bio, | 1581 | return btrfs_submit_compressed_read(inode, bio, |
1582 | mirror_num, bio_flags); | 1582 | mirror_num, bio_flags); |
@@ -1815,25 +1815,24 @@ out: | |||
1815 | * an ordered extent if the range of bytes in the file it covers are | 1815 | * an ordered extent if the range of bytes in the file it covers are |
1816 | * fully written. | 1816 | * fully written. |
1817 | */ | 1817 | */ |
1818 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | 1818 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) |
1819 | { | 1819 | { |
1820 | struct inode *inode = ordered_extent->inode; | ||
1820 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1821 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1821 | struct btrfs_trans_handle *trans = NULL; | 1822 | struct btrfs_trans_handle *trans = NULL; |
1822 | struct btrfs_ordered_extent *ordered_extent = NULL; | ||
1823 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 1823 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
1824 | struct extent_state *cached_state = NULL; | 1824 | struct extent_state *cached_state = NULL; |
1825 | int compress_type = 0; | 1825 | int compress_type = 0; |
1826 | int ret; | 1826 | int ret; |
1827 | bool nolock; | 1827 | bool nolock; |
1828 | 1828 | ||
1829 | ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, | ||
1830 | end - start + 1); | ||
1831 | if (!ret) | ||
1832 | return 0; | ||
1833 | BUG_ON(!ordered_extent); /* Logic error */ | ||
1834 | |||
1835 | nolock = btrfs_is_free_space_inode(root, inode); | 1829 | nolock = btrfs_is_free_space_inode(root, inode); |
1836 | 1830 | ||
1831 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { | ||
1832 | ret = -EIO; | ||
1833 | goto out; | ||
1834 | } | ||
1835 | |||
1837 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { | 1836 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
1838 | BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ | 1837 | BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ |
1839 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); | 1838 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
@@ -1889,12 +1888,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1889 | ordered_extent->file_offset, | 1888 | ordered_extent->file_offset, |
1890 | ordered_extent->len); | 1889 | ordered_extent->len); |
1891 | } | 1890 | } |
1892 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | 1891 | |
1893 | ordered_extent->file_offset + | ||
1894 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | ||
1895 | if (ret < 0) { | 1892 | if (ret < 0) { |
1896 | btrfs_abort_transaction(trans, root, ret); | 1893 | btrfs_abort_transaction(trans, root, ret); |
1897 | goto out; | 1894 | goto out_unlock; |
1898 | } | 1895 | } |
1899 | 1896 | ||
1900 | add_pending_csums(trans, inode, ordered_extent->file_offset, | 1897 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
@@ -1905,10 +1902,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1905 | ret = btrfs_update_inode_fallback(trans, root, inode); | 1902 | ret = btrfs_update_inode_fallback(trans, root, inode); |
1906 | if (ret) { /* -ENOMEM or corruption */ | 1903 | if (ret) { /* -ENOMEM or corruption */ |
1907 | btrfs_abort_transaction(trans, root, ret); | 1904 | btrfs_abort_transaction(trans, root, ret); |
1908 | goto out; | 1905 | goto out_unlock; |
1909 | } | 1906 | } |
1910 | } | 1907 | } |
1911 | ret = 0; | 1908 | ret = 0; |
1909 | out_unlock: | ||
1910 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | ||
1911 | ordered_extent->file_offset + | ||
1912 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | ||
1912 | out: | 1913 | out: |
1913 | if (root != root->fs_info->tree_root) | 1914 | if (root != root->fs_info->tree_root) |
1914 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); | 1915 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); |
@@ -1919,26 +1920,57 @@ out: | |||
1919 | btrfs_end_transaction(trans, root); | 1920 | btrfs_end_transaction(trans, root); |
1920 | } | 1921 | } |
1921 | 1922 | ||
1923 | if (ret) | ||
1924 | clear_extent_uptodate(io_tree, ordered_extent->file_offset, | ||
1925 | ordered_extent->file_offset + | ||
1926 | ordered_extent->len - 1, NULL, GFP_NOFS); | ||
1927 | |||
1928 | /* | ||
1929 | * This needs to be dont to make sure anybody waiting knows we are done | ||
1930 | * upating everything for this ordered extent. | ||
1931 | */ | ||
1932 | btrfs_remove_ordered_extent(inode, ordered_extent); | ||
1933 | |||
1922 | /* once for us */ | 1934 | /* once for us */ |
1923 | btrfs_put_ordered_extent(ordered_extent); | 1935 | btrfs_put_ordered_extent(ordered_extent); |
1924 | /* once for the tree */ | 1936 | /* once for the tree */ |
1925 | btrfs_put_ordered_extent(ordered_extent); | 1937 | btrfs_put_ordered_extent(ordered_extent); |
1926 | 1938 | ||
1927 | return 0; | 1939 | return ret; |
1928 | out_unlock: | 1940 | } |
1929 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | 1941 | |
1930 | ordered_extent->file_offset + | 1942 | static void finish_ordered_fn(struct btrfs_work *work) |
1931 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | 1943 | { |
1932 | goto out; | 1944 | struct btrfs_ordered_extent *ordered_extent; |
1945 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); | ||
1946 | btrfs_finish_ordered_io(ordered_extent); | ||
1933 | } | 1947 | } |
1934 | 1948 | ||
1935 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | 1949 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
1936 | struct extent_state *state, int uptodate) | 1950 | struct extent_state *state, int uptodate) |
1937 | { | 1951 | { |
1952 | struct inode *inode = page->mapping->host; | ||
1953 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1954 | struct btrfs_ordered_extent *ordered_extent = NULL; | ||
1955 | struct btrfs_workers *workers; | ||
1956 | |||
1938 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); | 1957 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
1939 | 1958 | ||
1940 | ClearPagePrivate2(page); | 1959 | ClearPagePrivate2(page); |
1941 | return btrfs_finish_ordered_io(page->mapping->host, start, end); | 1960 | if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
1961 | end - start + 1, uptodate)) | ||
1962 | return 0; | ||
1963 | |||
1964 | ordered_extent->work.func = finish_ordered_fn; | ||
1965 | ordered_extent->work.flags = 0; | ||
1966 | |||
1967 | if (btrfs_is_free_space_inode(root, inode)) | ||
1968 | workers = &root->fs_info->endio_freespace_worker; | ||
1969 | else | ||
1970 | workers = &root->fs_info->endio_write_workers; | ||
1971 | btrfs_queue_worker(workers, &ordered_extent->work); | ||
1972 | |||
1973 | return 0; | ||
1942 | } | 1974 | } |
1943 | 1975 | ||
1944 | /* | 1976 | /* |
@@ -5909,9 +5941,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) | |||
5909 | struct btrfs_dio_private *dip = bio->bi_private; | 5941 | struct btrfs_dio_private *dip = bio->bi_private; |
5910 | struct inode *inode = dip->inode; | 5942 | struct inode *inode = dip->inode; |
5911 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5943 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5912 | struct btrfs_trans_handle *trans; | ||
5913 | struct btrfs_ordered_extent *ordered = NULL; | 5944 | struct btrfs_ordered_extent *ordered = NULL; |
5914 | struct extent_state *cached_state = NULL; | ||
5915 | u64 ordered_offset = dip->logical_offset; | 5945 | u64 ordered_offset = dip->logical_offset; |
5916 | u64 ordered_bytes = dip->bytes; | 5946 | u64 ordered_bytes = dip->bytes; |
5917 | int ret; | 5947 | int ret; |
@@ -5921,73 +5951,14 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) | |||
5921 | again: | 5951 | again: |
5922 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | 5952 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, |
5923 | &ordered_offset, | 5953 | &ordered_offset, |
5924 | ordered_bytes); | 5954 | ordered_bytes, !err); |
5925 | if (!ret) | 5955 | if (!ret) |
5926 | goto out_test; | 5956 | goto out_test; |
5927 | 5957 | ||
5928 | BUG_ON(!ordered); | 5958 | ordered->work.func = finish_ordered_fn; |
5929 | 5959 | ordered->work.flags = 0; | |
5930 | trans = btrfs_join_transaction(root); | 5960 | btrfs_queue_worker(&root->fs_info->endio_write_workers, |
5931 | if (IS_ERR(trans)) { | 5961 | &ordered->work); |
5932 | err = -ENOMEM; | ||
5933 | goto out; | ||
5934 | } | ||
5935 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | ||
5936 | |||
5937 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { | ||
5938 | ret = btrfs_ordered_update_i_size(inode, 0, ordered); | ||
5939 | if (!ret) | ||
5940 | err = btrfs_update_inode_fallback(trans, root, inode); | ||
5941 | goto out; | ||
5942 | } | ||
5943 | |||
5944 | lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, | ||
5945 | ordered->file_offset + ordered->len - 1, 0, | ||
5946 | &cached_state); | ||
5947 | |||
5948 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { | ||
5949 | ret = btrfs_mark_extent_written(trans, inode, | ||
5950 | ordered->file_offset, | ||
5951 | ordered->file_offset + | ||
5952 | ordered->len); | ||
5953 | if (ret) { | ||
5954 | err = ret; | ||
5955 | goto out_unlock; | ||
5956 | } | ||
5957 | } else { | ||
5958 | ret = insert_reserved_file_extent(trans, inode, | ||
5959 | ordered->file_offset, | ||
5960 | ordered->start, | ||
5961 | ordered->disk_len, | ||
5962 | ordered->len, | ||
5963 | ordered->len, | ||
5964 | 0, 0, 0, | ||
5965 | BTRFS_FILE_EXTENT_REG); | ||
5966 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | ||
5967 | ordered->file_offset, ordered->len); | ||
5968 | if (ret) { | ||
5969 | err = ret; | ||
5970 | WARN_ON(1); | ||
5971 | goto out_unlock; | ||
5972 | } | ||
5973 | } | ||
5974 | |||
5975 | add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); | ||
5976 | ret = btrfs_ordered_update_i_size(inode, 0, ordered); | ||
5977 | if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) | ||
5978 | btrfs_update_inode_fallback(trans, root, inode); | ||
5979 | ret = 0; | ||
5980 | out_unlock: | ||
5981 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, | ||
5982 | ordered->file_offset + ordered->len - 1, | ||
5983 | &cached_state, GFP_NOFS); | ||
5984 | out: | ||
5985 | btrfs_delalloc_release_metadata(inode, ordered->len); | ||
5986 | btrfs_end_transaction(trans, root); | ||
5987 | ordered_offset = ordered->file_offset + ordered->len; | ||
5988 | btrfs_put_ordered_extent(ordered); | ||
5989 | btrfs_put_ordered_extent(ordered); | ||
5990 | |||
5991 | out_test: | 5962 | out_test: |
5992 | /* | 5963 | /* |
5993 | * our bio might span multiple ordered extents. If we haven't | 5964 | * our bio might span multiple ordered extents. If we haven't |
@@ -5996,12 +5967,12 @@ out_test: | |||
5996 | if (ordered_offset < dip->logical_offset + dip->bytes) { | 5967 | if (ordered_offset < dip->logical_offset + dip->bytes) { |
5997 | ordered_bytes = dip->logical_offset + dip->bytes - | 5968 | ordered_bytes = dip->logical_offset + dip->bytes - |
5998 | ordered_offset; | 5969 | ordered_offset; |
5970 | ordered = NULL; | ||
5999 | goto again; | 5971 | goto again; |
6000 | } | 5972 | } |
6001 | out_done: | 5973 | out_done: |
6002 | bio->bi_private = dip->private; | 5974 | bio->bi_private = dip->private; |
6003 | 5975 | ||
6004 | kfree(dip->csums); | ||
6005 | kfree(dip); | 5976 | kfree(dip); |
6006 | 5977 | ||
6007 | /* If we had an error make sure to clear the uptodate flag */ | 5978 | /* If we had an error make sure to clear the uptodate flag */ |
@@ -6069,9 +6040,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
6069 | int ret; | 6040 | int ret; |
6070 | 6041 | ||
6071 | bio_get(bio); | 6042 | bio_get(bio); |
6072 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | 6043 | |
6073 | if (ret) | 6044 | if (!write) { |
6074 | goto err; | 6045 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); |
6046 | if (ret) | ||
6047 | goto err; | ||
6048 | } | ||
6075 | 6049 | ||
6076 | if (skip_sum) | 6050 | if (skip_sum) |
6077 | goto map; | 6051 | goto map; |
@@ -6491,13 +6465,13 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) | |||
6491 | 6465 | ||
6492 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) | 6466 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) |
6493 | { | 6467 | { |
6468 | struct inode *inode = page->mapping->host; | ||
6494 | struct extent_io_tree *tree; | 6469 | struct extent_io_tree *tree; |
6495 | struct btrfs_ordered_extent *ordered; | 6470 | struct btrfs_ordered_extent *ordered; |
6496 | struct extent_state *cached_state = NULL; | 6471 | struct extent_state *cached_state = NULL; |
6497 | u64 page_start = page_offset(page); | 6472 | u64 page_start = page_offset(page); |
6498 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | 6473 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; |
6499 | 6474 | ||
6500 | |||
6501 | /* | 6475 | /* |
6502 | * we have the page locked, so new writeback can't start, | 6476 | * we have the page locked, so new writeback can't start, |
6503 | * and the dirty bit won't be cleared while we are here. | 6477 | * and the dirty bit won't be cleared while we are here. |
@@ -6507,13 +6481,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
6507 | */ | 6481 | */ |
6508 | wait_on_page_writeback(page); | 6482 | wait_on_page_writeback(page); |
6509 | 6483 | ||
6510 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 6484 | tree = &BTRFS_I(inode)->io_tree; |
6511 | if (offset) { | 6485 | if (offset) { |
6512 | btrfs_releasepage(page, GFP_NOFS); | 6486 | btrfs_releasepage(page, GFP_NOFS); |
6513 | return; | 6487 | return; |
6514 | } | 6488 | } |
6515 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); | 6489 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); |
6516 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, | 6490 | ordered = btrfs_lookup_ordered_extent(inode, |
6517 | page_offset(page)); | 6491 | page_offset(page)); |
6518 | if (ordered) { | 6492 | if (ordered) { |
6519 | /* | 6493 | /* |
@@ -6528,9 +6502,10 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
6528 | * whoever cleared the private bit is responsible | 6502 | * whoever cleared the private bit is responsible |
6529 | * for the finish_ordered_io | 6503 | * for the finish_ordered_io |
6530 | */ | 6504 | */ |
6531 | if (TestClearPagePrivate2(page)) { | 6505 | if (TestClearPagePrivate2(page) && |
6532 | btrfs_finish_ordered_io(page->mapping->host, | 6506 | btrfs_dec_test_ordered_pending(inode, &ordered, page_start, |
6533 | page_start, page_end); | 6507 | PAGE_CACHE_SIZE, 1)) { |
6508 | btrfs_finish_ordered_io(ordered); | ||
6534 | } | 6509 | } |
6535 | btrfs_put_ordered_extent(ordered); | 6510 | btrfs_put_ordered_extent(ordered); |
6536 | cached_state = NULL; | 6511 | cached_state = NULL; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9565c0289164..9e138cdc36c5 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -196,7 +196,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
196 | entry->len = len; | 196 | entry->len = len; |
197 | entry->disk_len = disk_len; | 197 | entry->disk_len = disk_len; |
198 | entry->bytes_left = len; | 198 | entry->bytes_left = len; |
199 | entry->inode = inode; | 199 | entry->inode = igrab(inode); |
200 | entry->compress_type = compress_type; | 200 | entry->compress_type = compress_type; |
201 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) | 201 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
202 | set_bit(type, &entry->flags); | 202 | set_bit(type, &entry->flags); |
@@ -212,12 +212,12 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
212 | 212 | ||
213 | trace_btrfs_ordered_extent_add(inode, entry); | 213 | trace_btrfs_ordered_extent_add(inode, entry); |
214 | 214 | ||
215 | spin_lock(&tree->lock); | 215 | spin_lock_irq(&tree->lock); |
216 | node = tree_insert(&tree->tree, file_offset, | 216 | node = tree_insert(&tree->tree, file_offset, |
217 | &entry->rb_node); | 217 | &entry->rb_node); |
218 | if (node) | 218 | if (node) |
219 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | 219 | ordered_data_tree_panic(inode, -EEXIST, file_offset); |
220 | spin_unlock(&tree->lock); | 220 | spin_unlock_irq(&tree->lock); |
221 | 221 | ||
222 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | 222 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
223 | list_add_tail(&entry->root_extent_list, | 223 | list_add_tail(&entry->root_extent_list, |
@@ -264,9 +264,9 @@ void btrfs_add_ordered_sum(struct inode *inode, | |||
264 | struct btrfs_ordered_inode_tree *tree; | 264 | struct btrfs_ordered_inode_tree *tree; |
265 | 265 | ||
266 | tree = &BTRFS_I(inode)->ordered_tree; | 266 | tree = &BTRFS_I(inode)->ordered_tree; |
267 | spin_lock(&tree->lock); | 267 | spin_lock_irq(&tree->lock); |
268 | list_add_tail(&sum->list, &entry->list); | 268 | list_add_tail(&sum->list, &entry->list); |
269 | spin_unlock(&tree->lock); | 269 | spin_unlock_irq(&tree->lock); |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | 272 | /* |
@@ -283,18 +283,19 @@ void btrfs_add_ordered_sum(struct inode *inode, | |||
283 | */ | 283 | */ |
284 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | 284 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, |
285 | struct btrfs_ordered_extent **cached, | 285 | struct btrfs_ordered_extent **cached, |
286 | u64 *file_offset, u64 io_size) | 286 | u64 *file_offset, u64 io_size, int uptodate) |
287 | { | 287 | { |
288 | struct btrfs_ordered_inode_tree *tree; | 288 | struct btrfs_ordered_inode_tree *tree; |
289 | struct rb_node *node; | 289 | struct rb_node *node; |
290 | struct btrfs_ordered_extent *entry = NULL; | 290 | struct btrfs_ordered_extent *entry = NULL; |
291 | int ret; | 291 | int ret; |
292 | unsigned long flags; | ||
292 | u64 dec_end; | 293 | u64 dec_end; |
293 | u64 dec_start; | 294 | u64 dec_start; |
294 | u64 to_dec; | 295 | u64 to_dec; |
295 | 296 | ||
296 | tree = &BTRFS_I(inode)->ordered_tree; | 297 | tree = &BTRFS_I(inode)->ordered_tree; |
297 | spin_lock(&tree->lock); | 298 | spin_lock_irqsave(&tree->lock, flags); |
298 | node = tree_search(tree, *file_offset); | 299 | node = tree_search(tree, *file_offset); |
299 | if (!node) { | 300 | if (!node) { |
300 | ret = 1; | 301 | ret = 1; |
@@ -323,6 +324,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |||
323 | (unsigned long long)to_dec); | 324 | (unsigned long long)to_dec); |
324 | } | 325 | } |
325 | entry->bytes_left -= to_dec; | 326 | entry->bytes_left -= to_dec; |
327 | if (!uptodate) | ||
328 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | ||
329 | |||
326 | if (entry->bytes_left == 0) | 330 | if (entry->bytes_left == 0) |
327 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | 331 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
328 | else | 332 | else |
@@ -332,7 +336,7 @@ out: | |||
332 | *cached = entry; | 336 | *cached = entry; |
333 | atomic_inc(&entry->refs); | 337 | atomic_inc(&entry->refs); |
334 | } | 338 | } |
335 | spin_unlock(&tree->lock); | 339 | spin_unlock_irqrestore(&tree->lock, flags); |
336 | return ret == 0; | 340 | return ret == 0; |
337 | } | 341 | } |
338 | 342 | ||
@@ -347,15 +351,21 @@ out: | |||
347 | */ | 351 | */ |
348 | int btrfs_dec_test_ordered_pending(struct inode *inode, | 352 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
349 | struct btrfs_ordered_extent **cached, | 353 | struct btrfs_ordered_extent **cached, |
350 | u64 file_offset, u64 io_size) | 354 | u64 file_offset, u64 io_size, int uptodate) |
351 | { | 355 | { |
352 | struct btrfs_ordered_inode_tree *tree; | 356 | struct btrfs_ordered_inode_tree *tree; |
353 | struct rb_node *node; | 357 | struct rb_node *node; |
354 | struct btrfs_ordered_extent *entry = NULL; | 358 | struct btrfs_ordered_extent *entry = NULL; |
359 | unsigned long flags; | ||
355 | int ret; | 360 | int ret; |
356 | 361 | ||
357 | tree = &BTRFS_I(inode)->ordered_tree; | 362 | tree = &BTRFS_I(inode)->ordered_tree; |
358 | spin_lock(&tree->lock); | 363 | spin_lock_irqsave(&tree->lock, flags); |
364 | if (cached && *cached) { | ||
365 | entry = *cached; | ||
366 | goto have_entry; | ||
367 | } | ||
368 | |||
359 | node = tree_search(tree, file_offset); | 369 | node = tree_search(tree, file_offset); |
360 | if (!node) { | 370 | if (!node) { |
361 | ret = 1; | 371 | ret = 1; |
@@ -363,6 +373,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, | |||
363 | } | 373 | } |
364 | 374 | ||
365 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 375 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
376 | have_entry: | ||
366 | if (!offset_in_entry(entry, file_offset)) { | 377 | if (!offset_in_entry(entry, file_offset)) { |
367 | ret = 1; | 378 | ret = 1; |
368 | goto out; | 379 | goto out; |
@@ -374,6 +385,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, | |||
374 | (unsigned long long)io_size); | 385 | (unsigned long long)io_size); |
375 | } | 386 | } |
376 | entry->bytes_left -= io_size; | 387 | entry->bytes_left -= io_size; |
388 | if (!uptodate) | ||
389 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | ||
390 | |||
377 | if (entry->bytes_left == 0) | 391 | if (entry->bytes_left == 0) |
378 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | 392 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
379 | else | 393 | else |
@@ -383,7 +397,7 @@ out: | |||
383 | *cached = entry; | 397 | *cached = entry; |
384 | atomic_inc(&entry->refs); | 398 | atomic_inc(&entry->refs); |
385 | } | 399 | } |
386 | spin_unlock(&tree->lock); | 400 | spin_unlock_irqrestore(&tree->lock, flags); |
387 | return ret == 0; | 401 | return ret == 0; |
388 | } | 402 | } |
389 | 403 | ||
@@ -399,6 +413,8 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
399 | trace_btrfs_ordered_extent_put(entry->inode, entry); | 413 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
400 | 414 | ||
401 | if (atomic_dec_and_test(&entry->refs)) { | 415 | if (atomic_dec_and_test(&entry->refs)) { |
416 | if (entry->inode) | ||
417 | btrfs_add_delayed_iput(entry->inode); | ||
402 | while (!list_empty(&entry->list)) { | 418 | while (!list_empty(&entry->list)) { |
403 | cur = entry->list.next; | 419 | cur = entry->list.next; |
404 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | 420 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
@@ -411,21 +427,22 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
411 | 427 | ||
412 | /* | 428 | /* |
413 | * remove an ordered extent from the tree. No references are dropped | 429 | * remove an ordered extent from the tree. No references are dropped |
414 | * and you must wake_up entry->wait. You must hold the tree lock | 430 | * and waiters are woken up. |
415 | * while you call this function. | ||
416 | */ | 431 | */ |
417 | static void __btrfs_remove_ordered_extent(struct inode *inode, | 432 | void btrfs_remove_ordered_extent(struct inode *inode, |
418 | struct btrfs_ordered_extent *entry) | 433 | struct btrfs_ordered_extent *entry) |
419 | { | 434 | { |
420 | struct btrfs_ordered_inode_tree *tree; | 435 | struct btrfs_ordered_inode_tree *tree; |
421 | struct btrfs_root *root = BTRFS_I(inode)->root; | 436 | struct btrfs_root *root = BTRFS_I(inode)->root; |
422 | struct rb_node *node; | 437 | struct rb_node *node; |
423 | 438 | ||
424 | tree = &BTRFS_I(inode)->ordered_tree; | 439 | tree = &BTRFS_I(inode)->ordered_tree; |
440 | spin_lock_irq(&tree->lock); | ||
425 | node = &entry->rb_node; | 441 | node = &entry->rb_node; |
426 | rb_erase(node, &tree->tree); | 442 | rb_erase(node, &tree->tree); |
427 | tree->last = NULL; | 443 | tree->last = NULL; |
428 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); | 444 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
445 | spin_unlock_irq(&tree->lock); | ||
429 | 446 | ||
430 | spin_lock(&root->fs_info->ordered_extent_lock); | 447 | spin_lock(&root->fs_info->ordered_extent_lock); |
431 | list_del_init(&entry->root_extent_list); | 448 | list_del_init(&entry->root_extent_list); |
@@ -442,21 +459,6 @@ static void __btrfs_remove_ordered_extent(struct inode *inode, | |||
442 | list_del_init(&BTRFS_I(inode)->ordered_operations); | 459 | list_del_init(&BTRFS_I(inode)->ordered_operations); |
443 | } | 460 | } |
444 | spin_unlock(&root->fs_info->ordered_extent_lock); | 461 | spin_unlock(&root->fs_info->ordered_extent_lock); |
445 | } | ||
446 | |||
447 | /* | ||
448 | * remove an ordered extent from the tree. No references are dropped | ||
449 | * but any waiters are woken. | ||
450 | */ | ||
451 | void btrfs_remove_ordered_extent(struct inode *inode, | ||
452 | struct btrfs_ordered_extent *entry) | ||
453 | { | ||
454 | struct btrfs_ordered_inode_tree *tree; | ||
455 | |||
456 | tree = &BTRFS_I(inode)->ordered_tree; | ||
457 | spin_lock(&tree->lock); | ||
458 | __btrfs_remove_ordered_extent(inode, entry); | ||
459 | spin_unlock(&tree->lock); | ||
460 | wake_up(&entry->wait); | 462 | wake_up(&entry->wait); |
461 | } | 463 | } |
462 | 464 | ||
@@ -663,7 +665,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, | |||
663 | struct btrfs_ordered_extent *entry = NULL; | 665 | struct btrfs_ordered_extent *entry = NULL; |
664 | 666 | ||
665 | tree = &BTRFS_I(inode)->ordered_tree; | 667 | tree = &BTRFS_I(inode)->ordered_tree; |
666 | spin_lock(&tree->lock); | 668 | spin_lock_irq(&tree->lock); |
667 | node = tree_search(tree, file_offset); | 669 | node = tree_search(tree, file_offset); |
668 | if (!node) | 670 | if (!node) |
669 | goto out; | 671 | goto out; |
@@ -674,7 +676,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, | |||
674 | if (entry) | 676 | if (entry) |
675 | atomic_inc(&entry->refs); | 677 | atomic_inc(&entry->refs); |
676 | out: | 678 | out: |
677 | spin_unlock(&tree->lock); | 679 | spin_unlock_irq(&tree->lock); |
678 | return entry; | 680 | return entry; |
679 | } | 681 | } |
680 | 682 | ||
@@ -690,7 +692,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |||
690 | struct btrfs_ordered_extent *entry = NULL; | 692 | struct btrfs_ordered_extent *entry = NULL; |
691 | 693 | ||
692 | tree = &BTRFS_I(inode)->ordered_tree; | 694 | tree = &BTRFS_I(inode)->ordered_tree; |
693 | spin_lock(&tree->lock); | 695 | spin_lock_irq(&tree->lock); |
694 | node = tree_search(tree, file_offset); | 696 | node = tree_search(tree, file_offset); |
695 | if (!node) { | 697 | if (!node) { |
696 | node = tree_search(tree, file_offset + len); | 698 | node = tree_search(tree, file_offset + len); |
@@ -715,7 +717,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |||
715 | out: | 717 | out: |
716 | if (entry) | 718 | if (entry) |
717 | atomic_inc(&entry->refs); | 719 | atomic_inc(&entry->refs); |
718 | spin_unlock(&tree->lock); | 720 | spin_unlock_irq(&tree->lock); |
719 | return entry; | 721 | return entry; |
720 | } | 722 | } |
721 | 723 | ||
@@ -731,7 +733,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) | |||
731 | struct btrfs_ordered_extent *entry = NULL; | 733 | struct btrfs_ordered_extent *entry = NULL; |
732 | 734 | ||
733 | tree = &BTRFS_I(inode)->ordered_tree; | 735 | tree = &BTRFS_I(inode)->ordered_tree; |
734 | spin_lock(&tree->lock); | 736 | spin_lock_irq(&tree->lock); |
735 | node = tree_search(tree, file_offset); | 737 | node = tree_search(tree, file_offset); |
736 | if (!node) | 738 | if (!node) |
737 | goto out; | 739 | goto out; |
@@ -739,7 +741,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) | |||
739 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 741 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
740 | atomic_inc(&entry->refs); | 742 | atomic_inc(&entry->refs); |
741 | out: | 743 | out: |
742 | spin_unlock(&tree->lock); | 744 | spin_unlock_irq(&tree->lock); |
743 | return entry; | 745 | return entry; |
744 | } | 746 | } |
745 | 747 | ||
@@ -765,7 +767,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
765 | else | 767 | else |
766 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); | 768 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
767 | 769 | ||
768 | spin_lock(&tree->lock); | 770 | spin_lock_irq(&tree->lock); |
769 | disk_i_size = BTRFS_I(inode)->disk_i_size; | 771 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
770 | 772 | ||
771 | /* truncate file */ | 773 | /* truncate file */ |
@@ -803,15 +805,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
803 | } | 805 | } |
804 | node = prev; | 806 | node = prev; |
805 | } | 807 | } |
806 | while (node) { | 808 | for (; node; node = rb_prev(node)) { |
807 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 809 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
810 | |||
811 | /* We treat this entry as if it doesnt exist */ | ||
812 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | ||
813 | continue; | ||
808 | if (test->file_offset + test->len <= disk_i_size) | 814 | if (test->file_offset + test->len <= disk_i_size) |
809 | break; | 815 | break; |
810 | if (test->file_offset >= i_size) | 816 | if (test->file_offset >= i_size) |
811 | break; | 817 | break; |
812 | if (test->file_offset >= disk_i_size) | 818 | if (test->file_offset >= disk_i_size) |
813 | goto out; | 819 | goto out; |
814 | node = rb_prev(node); | ||
815 | } | 820 | } |
816 | new_i_size = min_t(u64, offset, i_size); | 821 | new_i_size = min_t(u64, offset, i_size); |
817 | 822 | ||
@@ -829,17 +834,27 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
829 | else | 834 | else |
830 | node = rb_first(&tree->tree); | 835 | node = rb_first(&tree->tree); |
831 | } | 836 | } |
832 | i_size_test = 0; | 837 | |
833 | if (node) { | 838 | /* |
834 | /* | 839 | * We are looking for an area between our current extent and the next |
835 | * do we have an area where IO might have finished | 840 | * ordered extent to update the i_size to. There are 3 cases here |
836 | * between our ordered extent and the next one. | 841 | * |
837 | */ | 842 | * 1) We don't actually have anything and we can update to i_size. |
843 | * 2) We have stuff but they already did their i_size update so again we | ||
844 | * can just update to i_size. | ||
845 | * 3) We have an outstanding ordered extent so the most we can update | ||
846 | * our disk_i_size to is the start of the next offset. | ||
847 | */ | ||
848 | i_size_test = i_size; | ||
849 | for (; node; node = rb_next(node)) { | ||
838 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | 850 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
839 | if (test->file_offset > offset) | 851 | |
852 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) | ||
853 | continue; | ||
854 | if (test->file_offset > offset) { | ||
840 | i_size_test = test->file_offset; | 855 | i_size_test = test->file_offset; |
841 | } else { | 856 | break; |
842 | i_size_test = i_size; | 857 | } |
843 | } | 858 | } |
844 | 859 | ||
845 | /* | 860 | /* |
@@ -853,15 +868,15 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
853 | ret = 0; | 868 | ret = 0; |
854 | out: | 869 | out: |
855 | /* | 870 | /* |
856 | * we need to remove the ordered extent with the tree lock held | 871 | * We need to do this because we can't remove ordered extents until |
857 | * so that other people calling this function don't find our fully | 872 | * after the i_disk_size has been updated and then the inode has been |
858 | * processed ordered entry and skip updating the i_size | 873 | * updated to reflect the change, so we need to tell anybody who finds |
874 | * this ordered extent that we've already done all the real work, we | ||
875 | * just haven't completed all the other work. | ||
859 | */ | 876 | */ |
860 | if (ordered) | 877 | if (ordered) |
861 | __btrfs_remove_ordered_extent(inode, ordered); | 878 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
862 | spin_unlock(&tree->lock); | 879 | spin_unlock_irq(&tree->lock); |
863 | if (ordered) | ||
864 | wake_up(&ordered->wait); | ||
865 | return ret; | 880 | return ret; |
866 | } | 881 | } |
867 | 882 | ||
@@ -886,7 +901,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
886 | if (!ordered) | 901 | if (!ordered) |
887 | return 1; | 902 | return 1; |
888 | 903 | ||
889 | spin_lock(&tree->lock); | 904 | spin_lock_irq(&tree->lock); |
890 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { | 905 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
891 | if (disk_bytenr >= ordered_sum->bytenr) { | 906 | if (disk_bytenr >= ordered_sum->bytenr) { |
892 | num_sectors = ordered_sum->len / sectorsize; | 907 | num_sectors = ordered_sum->len / sectorsize; |
@@ -901,7 +916,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
901 | } | 916 | } |
902 | } | 917 | } |
903 | out: | 918 | out: |
904 | spin_unlock(&tree->lock); | 919 | spin_unlock_irq(&tree->lock); |
905 | btrfs_put_ordered_extent(ordered); | 920 | btrfs_put_ordered_extent(ordered); |
906 | return ret; | 921 | return ret; |
907 | } | 922 | } |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index c355ad4dc1a6..e03c560d2997 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
@@ -74,6 +74,12 @@ struct btrfs_ordered_sum { | |||
74 | 74 | ||
75 | #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ | 75 | #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ |
76 | 76 | ||
77 | #define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */ | ||
78 | |||
79 | #define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent | ||
80 | * has done its due diligence in updating | ||
81 | * the isize. */ | ||
82 | |||
77 | struct btrfs_ordered_extent { | 83 | struct btrfs_ordered_extent { |
78 | /* logical offset in the file */ | 84 | /* logical offset in the file */ |
79 | u64 file_offset; | 85 | u64 file_offset; |
@@ -113,6 +119,8 @@ struct btrfs_ordered_extent { | |||
113 | 119 | ||
114 | /* a per root list of all the pending ordered extents */ | 120 | /* a per root list of all the pending ordered extents */ |
115 | struct list_head root_extent_list; | 121 | struct list_head root_extent_list; |
122 | |||
123 | struct btrfs_work work; | ||
116 | }; | 124 | }; |
117 | 125 | ||
118 | 126 | ||
@@ -143,10 +151,11 @@ void btrfs_remove_ordered_extent(struct inode *inode, | |||
143 | struct btrfs_ordered_extent *entry); | 151 | struct btrfs_ordered_extent *entry); |
144 | int btrfs_dec_test_ordered_pending(struct inode *inode, | 152 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
145 | struct btrfs_ordered_extent **cached, | 153 | struct btrfs_ordered_extent **cached, |
146 | u64 file_offset, u64 io_size); | 154 | u64 file_offset, u64 io_size, int uptodate); |
147 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | 155 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, |
148 | struct btrfs_ordered_extent **cached, | 156 | struct btrfs_ordered_extent **cached, |
149 | u64 *file_offset, u64 io_size); | 157 | u64 *file_offset, u64 io_size, |
158 | int uptodate); | ||
150 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | 159 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
151 | u64 start, u64 len, u64 disk_len, int type); | 160 | u64 start, u64 len, u64 disk_len, int type); |
152 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | 161 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, |