aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-17 12:54:40 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:04 -0400
commitee6e6504e147a59a9f4d582662c105e9d72ae638 (patch)
treebe932cc04abb860ed76ab8d4c4c28353ed50bebb /fs/btrfs/file.c
parentba1da2f442ec91a1534afa893f9bef7e33056ace (diff)
Add a per-inode lock around btrfs_drop_extents
btrfs_drop_extents is always called with a range lock held on the inode. But, it may operate on extents outside that range as it drops and splits them. This patch adds a per-inode mutex that is held while calling btrfs_drop_extents and while inserting new extents into the tree. It prevents races from two procs working against adjacent ranges in the tree. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3e4e5c227c0c..40ad1b2958cb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -242,6 +242,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
242 u64 end_of_last_block; 242 u64 end_of_last_block;
243 u64 end_pos = pos + write_bytes; 243 u64 end_pos = pos + write_bytes;
244 u64 inline_size; 244 u64 inline_size;
245 int did_inline = 0;
245 loff_t isize = i_size_read(inode); 246 loff_t isize = i_size_read(inode);
246 247
247 start_pos = pos & ~((u64)root->sectorsize - 1); 248 start_pos = pos & ~((u64)root->sectorsize - 1);
@@ -275,6 +276,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
275 if (hole_size > 0) { 276 if (hole_size > 0) {
276 btrfs_wait_ordered_range(inode, last_pos_in_file, 277 btrfs_wait_ordered_range(inode, last_pos_in_file,
277 last_pos_in_file + hole_size); 278 last_pos_in_file + hole_size);
279 mutex_lock(&BTRFS_I(inode)->extent_mutex);
278 err = btrfs_drop_extents(trans, root, inode, 280 err = btrfs_drop_extents(trans, root, inode,
279 last_pos_in_file, 281 last_pos_in_file,
280 last_pos_in_file + hole_size, 282 last_pos_in_file + hole_size,
@@ -289,6 +291,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
289 0, 0, hole_size, 0); 291 0, 0, hole_size, 0);
290 btrfs_drop_extent_cache(inode, last_pos_in_file, 292 btrfs_drop_extent_cache(inode, last_pos_in_file,
291 last_pos_in_file + hole_size -1); 293 last_pos_in_file + hole_size -1);
294 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
292 btrfs_check_file(root, inode); 295 btrfs_check_file(root, inode);
293 } 296 }
294 if (err) 297 if (err)
@@ -321,6 +324,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
321 /* step one, delete the existing extents in this range */ 324 /* step one, delete the existing extents in this range */
322 aligned_end = (pos + write_bytes + root->sectorsize - 1) & 325 aligned_end = (pos + write_bytes + root->sectorsize - 1) &
323 ~((u64)root->sectorsize - 1); 326 ~((u64)root->sectorsize - 1);
327 mutex_lock(&BTRFS_I(inode)->extent_mutex);
324 err = btrfs_drop_extents(trans, root, inode, start_pos, 328 err = btrfs_drop_extents(trans, root, inode, start_pos,
325 aligned_end, aligned_end, &hint_byte); 329 aligned_end, aligned_end, &hint_byte);
326 if (err) 330 if (err)
@@ -332,9 +336,13 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
332 inline_size, pages, 0, num_pages); 336 inline_size, pages, 0, num_pages);
333 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1); 337 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
334 BUG_ON(err); 338 BUG_ON(err);
339 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
340 did_inline = 1;
335 } 341 }
336 if (end_pos > isize) { 342 if (end_pos > isize) {
337 i_size_write(inode, end_pos); 343 i_size_write(inode, end_pos);
344 if (did_inline)
345 BTRFS_I(inode)->disk_i_size = end_pos;
338 btrfs_update_inode(trans, root, inode); 346 btrfs_update_inode(trans, root, inode);
339 } 347 }
340failed: 348failed: