aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b24f1511654..0e294cfaa60c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1004,8 +1004,6 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1004 goto out; 1004 goto out;
1005 } 1005 }
1006 1006
1007 btrfs_item_key_to_cpu(buf, &ref->key, 0);
1008
1009 ref->bytenr = buf->start; 1007 ref->bytenr = buf->start;
1010 ref->owner = btrfs_header_owner(buf); 1008 ref->owner = btrfs_header_owner(buf);
1011 ref->generation = btrfs_header_generation(buf); 1009 ref->generation = btrfs_header_generation(buf);
@@ -2387,19 +2385,15 @@ static void noinline reada_walk_down(struct btrfs_root *root,
2387 } 2385 }
2388} 2386}
2389 2387
2390/*
2391 * we want to avoid as much random IO as we can with the alloc mutex
2392 * held, so drop the lock and do the lookup, then do it again with the
2393 * lock held.
2394 */
2395int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, 2388int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2396 u32 *refs) 2389 u32 *refs)
2397{ 2390{
2391 int ret;
2398 mutex_unlock(&root->fs_info->alloc_mutex); 2392 mutex_unlock(&root->fs_info->alloc_mutex);
2399 lookup_extent_ref(NULL, root, start, len, refs); 2393 ret = lookup_extent_ref(NULL, root, start, len, refs);
2400 cond_resched(); 2394 cond_resched();
2401 mutex_lock(&root->fs_info->alloc_mutex); 2395 mutex_lock(&root->fs_info->alloc_mutex);
2402 return lookup_extent_ref(NULL, root, start, len, refs); 2396 return ret;
2403} 2397}
2404 2398
2405/* 2399/*
@@ -2468,11 +2462,11 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2468 BUG_ON(ret); 2462 BUG_ON(ret);
2469 continue; 2463 continue;
2470 } 2464 }
2471 2465
2472 if (*level == 1) { 2466 if (*level == 1) {
2473 struct btrfs_key key; 2467 struct btrfs_key key;
2474 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]); 2468 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2475 ref = btrfs_lookup_leaf_ref(root, &key); 2469 ref = btrfs_lookup_leaf_ref(root, bytenr);
2476 if (ref) { 2470 if (ref) {
2477 ret = drop_leaf_ref(trans, root, ref); 2471 ret = drop_leaf_ref(trans, root, ref);
2478 BUG_ON(ret); 2472 BUG_ON(ret);
@@ -2482,7 +2476,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2482 break; 2476 break;
2483 } 2477 }
2484 } 2478 }
2485
2486 next = btrfs_find_tree_block(root, bytenr, blocksize); 2479 next = btrfs_find_tree_block(root, bytenr, blocksize);
2487 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { 2480 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2488 free_extent_buffer(next); 2481 free_extent_buffer(next);
@@ -2672,6 +2665,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2672 ret = -EAGAIN; 2665 ret = -EAGAIN;
2673 break; 2666 break;
2674 } 2667 }
2668 wake_up(&root->fs_info->transaction_throttle);
2675 } 2669 }
2676 for (i = 0; i <= orig_level; i++) { 2670 for (i = 0; i <= orig_level; i++) {
2677 if (path->nodes[i]) { 2671 if (path->nodes[i]) {