aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-04 09:24:25 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-04 09:24:25 -0500
commitc487685d7c18a8481900755aa5c56a7a74193101 (patch)
treef2ff6c059e24d7fdf2e1fff498bbef0bb90bfbfc /fs/btrfs/ctree.c
parent3935127c50c84106d654ef14962cff28c660bc62 (diff)
Btrfs: hash_lock is no longer needed
Before metadata is written to disk, it is updated to reflect that writeout has begun. Once this update is done, the block must be cow'd before it can be modified again. This update was originally synchronized by using a per-fs spinlock. Today the buffers for the metadata blocks are locked before writeout begins, and everyone that tests the flag has the buffer locked as well. So, the per-fs spinlock (called hash_lock for no good reason) is no longer required. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 2603ee539b7..3b6e35aafc9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -388,16 +388,14 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
388 WARN_ON(1); 388 WARN_ON(1);
389 } 389 }
390 390
391 spin_lock(&root->fs_info->hash_lock);
392 if (btrfs_header_generation(buf) == trans->transid && 391 if (btrfs_header_generation(buf) == trans->transid &&
393 btrfs_header_owner(buf) == root->root_key.objectid && 392 btrfs_header_owner(buf) == root->root_key.objectid &&
394 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 393 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
395 *cow_ret = buf; 394 *cow_ret = buf;
396 spin_unlock(&root->fs_info->hash_lock);
397 WARN_ON(prealloc_dest); 395 WARN_ON(prealloc_dest);
398 return 0; 396 return 0;
399 } 397 }
400 spin_unlock(&root->fs_info->hash_lock); 398
401 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 399 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
402 ret = __btrfs_cow_block(trans, root, buf, parent, 400 ret = __btrfs_cow_block(trans, root, buf, parent,
403 parent_slot, cow_ret, search_start, 0, 401 parent_slot, cow_ret, search_start, 0,
@@ -1376,14 +1374,11 @@ again:
1376 int wret; 1374 int wret;
1377 1375
1378 /* is a cow on this block not required */ 1376 /* is a cow on this block not required */
1379 spin_lock(&root->fs_info->hash_lock);
1380 if (btrfs_header_generation(b) == trans->transid && 1377 if (btrfs_header_generation(b) == trans->transid &&
1381 btrfs_header_owner(b) == root->root_key.objectid && 1378 btrfs_header_owner(b) == root->root_key.objectid &&
1382 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) { 1379 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
1383 spin_unlock(&root->fs_info->hash_lock);
1384 goto cow_done; 1380 goto cow_done;
1385 } 1381 }
1386 spin_unlock(&root->fs_info->hash_lock);
1387 1382
1388 /* ok, we have to cow, is our old prealloc the right 1383 /* ok, we have to cow, is our old prealloc the right
1389 * size? 1384 * size?