aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-11 12:27:37 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:07 -0400
commita1ed835e1ab5795f91b198d08c43e2f56848dcf3 (patch)
treeac3b370823fa76c5be7698e3663306badbbd622d /fs/btrfs/file.c
parent8b62b72b26bcd72082c4a69d179dd906bcc22200 (diff)
Btrfs: Fix extent replacment race
Data COW means that whenever we write to a file, we replace any old extent pointers with new ones. There was a window where a readpage might find the old extent pointers on disk and cache them in the extent_map tree in ram in the middle of a given write replacing them. Even though both the readpage and the write had their respective bytes in the file locked, the extent readpage inserts may cover more bytes than it had locked down. This commit closes the race by keeping the new extent pinned in the extent map tree until after the on-disk btree is properly setup with the new extent pointers. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ef66c3d989b9..4123db9d5141 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -177,10 +177,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
177 } 177 }
178 flags = em->flags; 178 flags = em->flags;
179 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 179 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
180 write_unlock(&em_tree->lock);
181 if (em->start <= start && 180 if (em->start <= start &&
182 (!testend || em->start + em->len >= start + len)) { 181 (!testend || em->start + em->len >= start + len)) {
183 free_extent_map(em); 182 free_extent_map(em);
183 write_unlock(&em_tree->lock);
184 break; 184 break;
185 } 185 }
186 if (start < em->start) { 186 if (start < em->start) {
@@ -190,6 +190,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
190 start = em->start + em->len; 190 start = em->start + em->len;
191 } 191 }
192 free_extent_map(em); 192 free_extent_map(em);
193 write_unlock(&em_tree->lock);
193 continue; 194 continue;
194 } 195 }
195 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 196 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
@@ -269,7 +270,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
269noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, 270noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
270 struct btrfs_root *root, struct inode *inode, 271 struct btrfs_root *root, struct inode *inode,
271 u64 start, u64 end, u64 locked_end, 272 u64 start, u64 end, u64 locked_end,
272 u64 inline_limit, u64 *hint_byte) 273 u64 inline_limit, u64 *hint_byte, int drop_cache)
273{ 274{
274 u64 extent_end = 0; 275 u64 extent_end = 0;
275 u64 search_start = start; 276 u64 search_start = start;
@@ -294,7 +295,8 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
294 int ret; 295 int ret;
295 296
296 inline_limit = 0; 297 inline_limit = 0;
297 btrfs_drop_extent_cache(inode, start, end - 1, 0); 298 if (drop_cache)
299 btrfs_drop_extent_cache(inode, start, end - 1, 0);
298 300
299 path = btrfs_alloc_path(); 301 path = btrfs_alloc_path();
300 if (!path) 302 if (!path)