aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorYan Zheng <zheng.yan@oracle.com>2009-06-27 21:07:35 -0400
committerChris Mason <chris.mason@oracle.com>2009-07-02 13:41:17 -0400
commit2c47e605a91dde6b0514f689645e7ab336c8592a (patch)
treed541e61440f06f37d17e2d14cbaa78fc0cd41ff2 /fs/btrfs/extent-tree.c
parenta970b0a16cc416a509d5ae8b1d70978664e6f4fe (diff)
Btrfs: update backrefs while dropping snapshot
The new backref format has restriction on type of backref item. If a tree block isn't referenced by its owner tree, full backrefs must be used for the pointers in it. When a tree block loses its owner tree's reference, backrefs for the pointers in it should be updated to full backrefs. Current btrfs_drop_snapshot misses the code that updates backrefs, so it's unsafe for general use. This patch adds backrefs update code to btrfs_drop_snapshot. It isn't a problem in the restricted form btrfs_drop_snapshot is used today, but for general snapshot deletion this update is required. Signed-off-by: Yan Zheng <zheng.yan@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c564
1 files changed, 390 insertions, 174 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index edc7d208c5ce..cd64cfc14f7f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
990 return type; 990 return type;
991} 991}
992 992
993static int find_next_key(struct btrfs_path *path, struct btrfs_key *key) 993static int find_next_key(struct btrfs_path *path, int level,
994 struct btrfs_key *key)
994 995
995{ 996{
996 int level; 997 for (; level < BTRFS_MAX_LEVEL; level++) {
997 BUG_ON(!path->keep_locks);
998 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
999 if (!path->nodes[level]) 998 if (!path->nodes[level])
1000 break; 999 break;
1001 btrfs_assert_tree_locked(path->nodes[level]);
1002 if (path->slots[level] + 1 >= 1000 if (path->slots[level] + 1 >=
1003 btrfs_header_nritems(path->nodes[level])) 1001 btrfs_header_nritems(path->nodes[level]))
1004 continue; 1002 continue;
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1158 * For simplicity, we just do not add new inline back 1156 * For simplicity, we just do not add new inline back
1159 * ref if there is any kind of item for this block 1157 * ref if there is any kind of item for this block
1160 */ 1158 */
1161 if (find_next_key(path, &key) == 0 && key.objectid == bytenr && 1159 if (find_next_key(path, 0, &key) == 0 &&
1160 key.objectid == bytenr &&
1162 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 1161 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1163 err = -EAGAIN; 1162 err = -EAGAIN;
1164 goto out; 1163 goto out;
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4128 return buf; 4127 return buf;
4129} 4128}
4130 4129
4130#if 0
4131int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, 4131int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4132 struct btrfs_root *root, struct extent_buffer *leaf) 4132 struct btrfs_root *root, struct extent_buffer *leaf)
4133{ 4133{
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4171 return 0; 4171 return 0;
4172} 4172}
4173 4173
4174#if 0
4175
4176static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, 4174static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
4177 struct btrfs_root *root, 4175 struct btrfs_root *root,
4178 struct btrfs_leaf_ref *ref) 4176 struct btrfs_leaf_ref *ref)
@@ -4553,262 +4551,471 @@ out:
4553} 4551}
4554#endif 4552#endif
4555 4553
4554struct walk_control {
4555 u64 refs[BTRFS_MAX_LEVEL];
4556 u64 flags[BTRFS_MAX_LEVEL];
4557 struct btrfs_key update_progress;
4558 int stage;
4559 int level;
4560 int shared_level;
4561 int update_ref;
4562 int keep_locks;
4563};
4564
4565#define DROP_REFERENCE 1
4566#define UPDATE_BACKREF 2
4567
4556/* 4568/*
4557 * helper function for drop_subtree, this function is similar to 4569 * hepler to process tree block while walking down the tree.
4558 * walk_down_tree. The main difference is that it checks reference 4570 *
4559 * counts while tree blocks are locked. 4571 * when wc->stage == DROP_REFERENCE, this function checks
4572 * reference count of the block. if the block is shared and
4573 * we need update back refs for the subtree rooted at the
4574 * block, this function changes wc->stage to UPDATE_BACKREF
4575 *
4576 * when wc->stage == UPDATE_BACKREF, this function updates
4577 * back refs for pointers in the block.
4578 *
4579 * NOTE: return value 1 means we should stop walking down.
4560 */ 4580 */
4561static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 4581static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4562 struct btrfs_root *root, 4582 struct btrfs_root *root,
4563 struct btrfs_path *path, int *level) 4583 struct btrfs_path *path,
4584 struct walk_control *wc)
4564{ 4585{
4565 struct extent_buffer *next; 4586 int level = wc->level;
4566 struct extent_buffer *cur; 4587 struct extent_buffer *eb = path->nodes[level];
4567 struct extent_buffer *parent; 4588 struct btrfs_key key;
4568 u64 bytenr; 4589 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4569 u64 ptr_gen;
4570 u64 refs;
4571 u64 flags;
4572 u32 blocksize;
4573 int ret; 4590 int ret;
4574 4591
4575 cur = path->nodes[*level]; 4592 if (wc->stage == UPDATE_BACKREF &&
4576 ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len, 4593 btrfs_header_owner(eb) != root->root_key.objectid)
4577 &refs, &flags); 4594 return 1;
4578 BUG_ON(ret);
4579 if (refs > 1)
4580 goto out;
4581 4595
4582 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 4596 /*
4597 * when reference count of tree block is 1, it won't increase
4598 * again. once full backref flag is set, we never clear it.
4599 */
4600 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4601 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4602 BUG_ON(!path->locks[level]);
4603 ret = btrfs_lookup_extent_info(trans, root,
4604 eb->start, eb->len,
4605 &wc->refs[level],
4606 &wc->flags[level]);
4607 BUG_ON(ret);
4608 BUG_ON(wc->refs[level] == 0);
4609 }
4583 4610
4584 while (*level >= 0) { 4611 if (wc->stage == DROP_REFERENCE &&
4585 cur = path->nodes[*level]; 4612 wc->update_ref && wc->refs[level] > 1) {
4586 if (*level == 0) { 4613 BUG_ON(eb == root->node);
4587 ret = btrfs_drop_leaf_ref(trans, root, cur); 4614 BUG_ON(path->slots[level] > 0);
4588 BUG_ON(ret); 4615 if (level == 0)
4589 clean_tree_block(trans, root, cur); 4616 btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
4590 break; 4617 else
4591 } 4618 btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
4592 if (path->slots[*level] >= btrfs_header_nritems(cur)) { 4619 if (btrfs_header_owner(eb) == root->root_key.objectid &&
4593 clean_tree_block(trans, root, cur); 4620 btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
4594 break; 4621 wc->stage = UPDATE_BACKREF;
4622 wc->shared_level = level;
4595 } 4623 }
4624 }
4596 4625
4597 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 4626 if (wc->stage == DROP_REFERENCE) {
4598 blocksize = btrfs_level_size(root, *level - 1); 4627 if (wc->refs[level] > 1)
4599 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 4628 return 1;
4600 4629
4601 next = read_tree_block(root, bytenr, blocksize, ptr_gen); 4630 if (path->locks[level] && !wc->keep_locks) {
4602 btrfs_tree_lock(next); 4631 btrfs_tree_unlock(eb);
4603 btrfs_set_lock_blocking(next); 4632 path->locks[level] = 0;
4633 }
4634 return 0;
4635 }
4604 4636
4605 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 4637 /* wc->stage == UPDATE_BACKREF */
4606 &refs, &flags); 4638 if (!(wc->flags[level] & flag)) {
4639 BUG_ON(!path->locks[level]);
4640 ret = btrfs_inc_ref(trans, root, eb, 1);
4607 BUG_ON(ret); 4641 BUG_ON(ret);
4608 if (refs > 1) { 4642 ret = btrfs_dec_ref(trans, root, eb, 0);
4609 parent = path->nodes[*level]; 4643 BUG_ON(ret);
4610 ret = btrfs_free_extent(trans, root, bytenr, 4644 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4611 blocksize, parent->start, 4645 eb->len, flag, 0);
4612 btrfs_header_owner(parent), 4646 BUG_ON(ret);
4613 *level - 1, 0); 4647 wc->flags[level] |= flag;
4648 }
4649
4650 /*
4651 * the block is shared by multiple trees, so it's not good to
4652 * keep the tree lock
4653 */
4654 if (path->locks[level] && level > 0) {
4655 btrfs_tree_unlock(eb);
4656 path->locks[level] = 0;
4657 }
4658 return 0;
4659}
4660
4661/*
4662 * hepler to process tree block while walking up the tree.
4663 *
4664 * when wc->stage == DROP_REFERENCE, this function drops
4665 * reference count on the block.
4666 *
4667 * when wc->stage == UPDATE_BACKREF, this function changes
4668 * wc->stage back to DROP_REFERENCE if we changed wc->stage
4669 * to UPDATE_BACKREF previously while processing the block.
4670 *
4671 * NOTE: return value 1 means we should stop walking up.
4672 */
4673static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4674 struct btrfs_root *root,
4675 struct btrfs_path *path,
4676 struct walk_control *wc)
4677{
4678 int ret = 0;
4679 int level = wc->level;
4680 struct extent_buffer *eb = path->nodes[level];
4681 u64 parent = 0;
4682
4683 if (wc->stage == UPDATE_BACKREF) {
4684 BUG_ON(wc->shared_level < level);
4685 if (level < wc->shared_level)
4686 goto out;
4687
4688 BUG_ON(wc->refs[level] <= 1);
4689 ret = find_next_key(path, level + 1, &wc->update_progress);
4690 if (ret > 0)
4691 wc->update_ref = 0;
4692
4693 wc->stage = DROP_REFERENCE;
4694 wc->shared_level = -1;
4695 path->slots[level] = 0;
4696
4697 /*
4698 * check reference count again if the block isn't locked.
4699 * we should start walking down the tree again if reference
4700 * count is one.
4701 */
4702 if (!path->locks[level]) {
4703 BUG_ON(level == 0);
4704 btrfs_tree_lock(eb);
4705 btrfs_set_lock_blocking(eb);
4706 path->locks[level] = 1;
4707
4708 ret = btrfs_lookup_extent_info(trans, root,
4709 eb->start, eb->len,
4710 &wc->refs[level],
4711 &wc->flags[level]);
4614 BUG_ON(ret); 4712 BUG_ON(ret);
4615 path->slots[*level]++; 4713 BUG_ON(wc->refs[level] == 0);
4616 btrfs_tree_unlock(next); 4714 if (wc->refs[level] == 1) {
4617 free_extent_buffer(next); 4715 btrfs_tree_unlock(eb);
4618 continue; 4716 path->locks[level] = 0;
4717 return 1;
4718 }
4719 } else {
4720 BUG_ON(level != 0);
4619 } 4721 }
4722 }
4620 4723
4621 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 4724 /* wc->stage == DROP_REFERENCE */
4725 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4622 4726
4623 *level = btrfs_header_level(next); 4727 if (wc->refs[level] == 1) {
4624 path->nodes[*level] = next; 4728 if (level == 0) {
4625 path->slots[*level] = 0; 4729 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4626 path->locks[*level] = 1; 4730 ret = btrfs_dec_ref(trans, root, eb, 1);
4627 cond_resched(); 4731 else
4732 ret = btrfs_dec_ref(trans, root, eb, 0);
4733 BUG_ON(ret);
4734 }
4735 /* make block locked assertion in clean_tree_block happy */
4736 if (!path->locks[level] &&
4737 btrfs_header_generation(eb) == trans->transid) {
4738 btrfs_tree_lock(eb);
4739 btrfs_set_lock_blocking(eb);
4740 path->locks[level] = 1;
4741 }
4742 clean_tree_block(trans, root, eb);
4743 }
4744
4745 if (eb == root->node) {
4746 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4747 parent = eb->start;
4748 else
4749 BUG_ON(root->root_key.objectid !=
4750 btrfs_header_owner(eb));
4751 } else {
4752 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4753 parent = path->nodes[level + 1]->start;
4754 else
4755 BUG_ON(root->root_key.objectid !=
4756 btrfs_header_owner(path->nodes[level + 1]));
4628 } 4757 }
4629out:
4630 if (path->nodes[*level] == root->node)
4631 parent = path->nodes[*level];
4632 else
4633 parent = path->nodes[*level + 1];
4634 bytenr = path->nodes[*level]->start;
4635 blocksize = path->nodes[*level]->len;
4636 4758
4637 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, 4759 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4638 btrfs_header_owner(parent), *level, 0); 4760 root->root_key.objectid, level, 0);
4639 BUG_ON(ret); 4761 BUG_ON(ret);
4762out:
4763 wc->refs[level] = 0;
4764 wc->flags[level] = 0;
4765 return ret;
4766}
4767
4768static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4769 struct btrfs_root *root,
4770 struct btrfs_path *path,
4771 struct walk_control *wc)
4772{
4773 struct extent_buffer *next;
4774 struct extent_buffer *cur;
4775 u64 bytenr;
4776 u64 ptr_gen;
4777 u32 blocksize;
4778 int level = wc->level;
4779 int ret;
4780
4781 while (level >= 0) {
4782 cur = path->nodes[level];
4783 BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
4640 4784
4641 if (path->locks[*level]) { 4785 ret = walk_down_proc(trans, root, path, wc);
4642 btrfs_tree_unlock(path->nodes[*level]); 4786 if (ret > 0)
4643 path->locks[*level] = 0; 4787 break;
4788
4789 if (level == 0)
4790 break;
4791
4792 bytenr = btrfs_node_blockptr(cur, path->slots[level]);
4793 blocksize = btrfs_level_size(root, level - 1);
4794 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
4795
4796 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
4797 btrfs_tree_lock(next);
4798 btrfs_set_lock_blocking(next);
4799
4800 level--;
4801 BUG_ON(level != btrfs_header_level(next));
4802 path->nodes[level] = next;
4803 path->slots[level] = 0;
4804 path->locks[level] = 1;
4805 wc->level = level;
4644 } 4806 }
4645 free_extent_buffer(path->nodes[*level]);
4646 path->nodes[*level] = NULL;
4647 *level += 1;
4648 cond_resched();
4649 return 0; 4807 return 0;
4650} 4808}
4651 4809
4652/*
4653 * helper for dropping snapshots. This walks back up the tree in the path
4654 * to find the first node higher up where we haven't yet gone through
4655 * all the slots
4656 */
4657static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 4810static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4658 struct btrfs_root *root, 4811 struct btrfs_root *root,
4659 struct btrfs_path *path, 4812 struct btrfs_path *path,
4660 int *level, int max_level) 4813 struct walk_control *wc, int max_level)
4661{ 4814{
4662 struct btrfs_root_item *root_item = &root->root_item; 4815 int level = wc->level;
4663 int i;
4664 int slot;
4665 int ret; 4816 int ret;
4666 4817
4667 for (i = *level; i < max_level && path->nodes[i]; i++) { 4818 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
4668 slot = path->slots[i]; 4819 while (level < max_level && path->nodes[level]) {
4669 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 4820 wc->level = level;
4670 /* 4821 if (path->slots[level] + 1 <
4671 * there is more work to do in this level. 4822 btrfs_header_nritems(path->nodes[level])) {
4672 * Update the drop_progress marker to reflect 4823 path->slots[level]++;
4673 * the work we've done so far, and then bump
4674 * the slot number
4675 */
4676 path->slots[i]++;
4677 WARN_ON(*level == 0);
4678 if (max_level == BTRFS_MAX_LEVEL) {
4679 btrfs_node_key(path->nodes[i],
4680 &root_item->drop_progress,
4681 path->slots[i]);
4682 root_item->drop_level = i;
4683 }
4684 *level = i;
4685 return 0; 4824 return 0;
4686 } else { 4825 } else {
4687 struct extent_buffer *parent; 4826 ret = walk_up_proc(trans, root, path, wc);
4688 4827 if (ret > 0)
4689 /* 4828 return 0;
4690 * this whole node is done, free our reference
4691 * on it and go up one level
4692 */
4693 if (path->nodes[*level] == root->node)
4694 parent = path->nodes[*level];
4695 else
4696 parent = path->nodes[*level + 1];
4697 4829
4698 clean_tree_block(trans, root, path->nodes[i]); 4830 if (path->locks[level]) {
4699 ret = btrfs_free_extent(trans, root, 4831 btrfs_tree_unlock(path->nodes[level]);
4700 path->nodes[i]->start, 4832 path->locks[level] = 0;
4701 path->nodes[i]->len,
4702 parent->start,
4703 btrfs_header_owner(parent),
4704 *level, 0);
4705 BUG_ON(ret);
4706 if (path->locks[*level]) {
4707 btrfs_tree_unlock(path->nodes[i]);
4708 path->locks[i] = 0;
4709 } 4833 }
4710 free_extent_buffer(path->nodes[i]); 4834 free_extent_buffer(path->nodes[level]);
4711 path->nodes[i] = NULL; 4835 path->nodes[level] = NULL;
4712 *level = i + 1; 4836 level++;
4713 } 4837 }
4714 } 4838 }
4715 return 1; 4839 return 1;
4716} 4840}
4717 4841
4718/* 4842/*
4719 * drop the reference count on the tree rooted at 'snap'. This traverses 4843 * drop a subvolume tree.
4720 * the tree freeing any blocks that have a ref count of zero after being 4844 *
4721 * decremented. 4845 * this function traverses the tree freeing any blocks that only
4846 * referenced by the tree.
4847 *
4848 * when a shared tree block is found. this function decreases its
4849 * reference count by one. if update_ref is true, this function
4850 * also make sure backrefs for the shared block and all lower level
4851 * blocks are properly updated.
4722 */ 4852 */
4723int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root 4853int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
4724 *root)
4725{ 4854{
4726 int ret = 0;
4727 int wret;
4728 int level;
4729 struct btrfs_path *path; 4855 struct btrfs_path *path;
4730 int update_count; 4856 struct btrfs_trans_handle *trans;
4857 struct btrfs_root *tree_root = root->fs_info->tree_root;
4731 struct btrfs_root_item *root_item = &root->root_item; 4858 struct btrfs_root_item *root_item = &root->root_item;
4859 struct walk_control *wc;
4860 struct btrfs_key key;
4861 int err = 0;
4862 int ret;
4863 int level;
4732 4864
4733 path = btrfs_alloc_path(); 4865 path = btrfs_alloc_path();
4734 BUG_ON(!path); 4866 BUG_ON(!path);
4735 4867
4736 level = btrfs_header_level(root->node); 4868 wc = kzalloc(sizeof(*wc), GFP_NOFS);
4869 BUG_ON(!wc);
4870
4871 trans = btrfs_start_transaction(tree_root, 1);
4872
4737 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 4873 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
4874 level = btrfs_header_level(root->node);
4738 path->nodes[level] = btrfs_lock_root_node(root); 4875 path->nodes[level] = btrfs_lock_root_node(root);
4739 btrfs_set_lock_blocking(path->nodes[level]); 4876 btrfs_set_lock_blocking(path->nodes[level]);
4740 path->slots[level] = 0; 4877 path->slots[level] = 0;
4741 path->locks[level] = 1; 4878 path->locks[level] = 1;
4879 memset(&wc->update_progress, 0,
4880 sizeof(wc->update_progress));
4742 } else { 4881 } else {
4743 struct btrfs_key key;
4744 struct btrfs_disk_key found_key;
4745 struct extent_buffer *node;
4746
4747 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 4882 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
4883 memcpy(&wc->update_progress, &key,
4884 sizeof(wc->update_progress));
4885
4748 level = root_item->drop_level; 4886 level = root_item->drop_level;
4887 BUG_ON(level == 0);
4749 path->lowest_level = level; 4888 path->lowest_level = level;
4750 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4889 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4751 if (wret < 0) { 4890 path->lowest_level = 0;
4752 ret = wret; 4891 if (ret < 0) {
4892 err = ret;
4753 goto out; 4893 goto out;
4754 } 4894 }
4755 node = path->nodes[level]; 4895 btrfs_node_key_to_cpu(path->nodes[level], &key,
4756 btrfs_node_key(node, &found_key, path->slots[level]); 4896 path->slots[level]);
4757 WARN_ON(memcmp(&found_key, &root_item->drop_progress, 4897 WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
4758 sizeof(found_key))); 4898
4759 /* 4899 /*
4760 * unlock our path, this is safe because only this 4900 * unlock our path, this is safe because only this
4761 * function is allowed to delete this snapshot 4901 * function is allowed to delete this snapshot
4762 */ 4902 */
4763 btrfs_unlock_up_safe(path, 0); 4903 btrfs_unlock_up_safe(path, 0);
4904
4905 level = btrfs_header_level(root->node);
4906 while (1) {
4907 btrfs_tree_lock(path->nodes[level]);
4908 btrfs_set_lock_blocking(path->nodes[level]);
4909
4910 ret = btrfs_lookup_extent_info(trans, root,
4911 path->nodes[level]->start,
4912 path->nodes[level]->len,
4913 &wc->refs[level],
4914 &wc->flags[level]);
4915 BUG_ON(ret);
4916 BUG_ON(wc->refs[level] == 0);
4917
4918 if (level == root_item->drop_level)
4919 break;
4920
4921 btrfs_tree_unlock(path->nodes[level]);
4922 WARN_ON(wc->refs[level] != 1);
4923 level--;
4924 }
4764 } 4925 }
4926
4927 wc->level = level;
4928 wc->shared_level = -1;
4929 wc->stage = DROP_REFERENCE;
4930 wc->update_ref = update_ref;
4931 wc->keep_locks = 0;
4932
4765 while (1) { 4933 while (1) {
4766 unsigned long update; 4934 ret = walk_down_tree(trans, root, path, wc);
4767 wret = walk_down_tree(trans, root, path, &level); 4935 if (ret < 0) {
4768 if (wret > 0) 4936 err = ret;
4769 break; 4937 break;
4770 if (wret < 0) 4938 }
4771 ret = wret;
4772 4939
4773 wret = walk_up_tree(trans, root, path, &level, 4940 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
4774 BTRFS_MAX_LEVEL); 4941 if (ret < 0) {
4775 if (wret > 0) 4942 err = ret;
4776 break; 4943 break;
4777 if (wret < 0) 4944 }
4778 ret = wret; 4945
4779 if (trans->transaction->in_commit || 4946 if (ret > 0) {
4780 trans->transaction->delayed_refs.flushing) { 4947 BUG_ON(wc->stage != DROP_REFERENCE);
4781 ret = -EAGAIN;
4782 break; 4948 break;
4783 } 4949 }
4784 for (update_count = 0; update_count < 16; update_count++) { 4950
4951 if (wc->stage == DROP_REFERENCE) {
4952 level = wc->level;
4953 btrfs_node_key(path->nodes[level],
4954 &root_item->drop_progress,
4955 path->slots[level]);
4956 root_item->drop_level = level;
4957 }
4958
4959 BUG_ON(wc->level == 0);
4960 if (trans->transaction->in_commit ||
4961 trans->transaction->delayed_refs.flushing) {
4962 ret = btrfs_update_root(trans, tree_root,
4963 &root->root_key,
4964 root_item);
4965 BUG_ON(ret);
4966
4967 btrfs_end_transaction(trans, tree_root);
4968 trans = btrfs_start_transaction(tree_root, 1);
4969 } else {
4970 unsigned long update;
4785 update = trans->delayed_ref_updates; 4971 update = trans->delayed_ref_updates;
4786 trans->delayed_ref_updates = 0; 4972 trans->delayed_ref_updates = 0;
4787 if (update) 4973 if (update)
4788 btrfs_run_delayed_refs(trans, root, update); 4974 btrfs_run_delayed_refs(trans, tree_root,
4789 else 4975 update);
4790 break;
4791 } 4976 }
4792 } 4977 }
4978 btrfs_release_path(root, path);
4979 BUG_ON(err);
4980
4981 ret = btrfs_del_root(trans, tree_root, &root->root_key);
4982 BUG_ON(ret);
4983
4984 free_extent_buffer(root->node);
4985 free_extent_buffer(root->commit_root);
4986 kfree(root);
4793out: 4987out:
4988 btrfs_end_transaction(trans, tree_root);
4989 kfree(wc);
4794 btrfs_free_path(path); 4990 btrfs_free_path(path);
4795 return ret; 4991 return err;
4796} 4992}
4797 4993
4994/*
4995 * drop subtree rooted at tree block 'node'.
4996 *
4997 * NOTE: this function will unlock and release tree block 'node'
4998 */
4798int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 4999int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4799 struct btrfs_root *root, 5000 struct btrfs_root *root,
4800 struct extent_buffer *node, 5001 struct extent_buffer *node,
4801 struct extent_buffer *parent) 5002 struct extent_buffer *parent)
4802{ 5003{
4803 struct btrfs_path *path; 5004 struct btrfs_path *path;
5005 struct walk_control *wc;
4804 int level; 5006 int level;
4805 int parent_level; 5007 int parent_level;
4806 int ret = 0; 5008 int ret = 0;
4807 int wret; 5009 int wret;
4808 5010
5011 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5012
4809 path = btrfs_alloc_path(); 5013 path = btrfs_alloc_path();
4810 BUG_ON(!path); 5014 BUG_ON(!path);
4811 5015
5016 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5017 BUG_ON(!wc);
5018
4812 btrfs_assert_tree_locked(parent); 5019 btrfs_assert_tree_locked(parent);
4813 parent_level = btrfs_header_level(parent); 5020 parent_level = btrfs_header_level(parent);
4814 extent_buffer_get(parent); 5021 extent_buffer_get(parent);
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4817 5024
4818 btrfs_assert_tree_locked(node); 5025 btrfs_assert_tree_locked(node);
4819 level = btrfs_header_level(node); 5026 level = btrfs_header_level(node);
4820 extent_buffer_get(node);
4821 path->nodes[level] = node; 5027 path->nodes[level] = node;
4822 path->slots[level] = 0; 5028 path->slots[level] = 0;
5029 path->locks[level] = 1;
5030
5031 wc->refs[parent_level] = 1;
5032 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5033 wc->level = level;
5034 wc->shared_level = -1;
5035 wc->stage = DROP_REFERENCE;
5036 wc->update_ref = 0;
5037 wc->keep_locks = 1;
4823 5038
4824 while (1) { 5039 while (1) {
4825 wret = walk_down_tree(trans, root, path, &level); 5040 wret = walk_down_tree(trans, root, path, wc);
4826 if (wret < 0) 5041 if (wret < 0) {
4827 ret = wret; 5042 ret = wret;
4828 if (wret != 0)
4829 break; 5043 break;
5044 }
4830 5045
4831 wret = walk_up_tree(trans, root, path, &level, parent_level); 5046 wret = walk_up_tree(trans, root, path, wc, parent_level);
4832 if (wret < 0) 5047 if (wret < 0)
4833 ret = wret; 5048 ret = wret;
4834 if (wret != 0) 5049 if (wret != 0)
4835 break; 5050 break;
4836 } 5051 }
4837 5052
5053 kfree(wc);
4838 btrfs_free_path(path); 5054 btrfs_free_path(path);
4839 return ret; 5055 return ret;
4840} 5056}