aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c114
1 files changed, 0 insertions, 114 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index bce28f653899..125cf76fcd08 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -281,44 +281,6 @@ again:
281} 281}
282 282
283/* 283/*
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
287 *
288 * If it only finds a head node, it returns 0.
289 *
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
294 * allocation tree.
295 */
296int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
297{
298 struct btrfs_delayed_ref_node *ref;
299 struct btrfs_delayed_ref_root *delayed_refs;
300 struct rb_node *prev_node;
301 int ret = 0;
302
303 delayed_refs = &trans->transaction->delayed_refs;
304 spin_lock(&delayed_refs->lock);
305
306 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
307 if (ref) {
308 prev_node = rb_prev(&ref->rb_node);
309 if (!prev_node)
310 goto out;
311 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
312 rb_node);
313 if (ref->bytenr == bytenr)
314 ret = 1;
315 }
316out:
317 spin_unlock(&delayed_refs->lock);
318 return ret;
319}
320
321/*
322 * helper function to update an extent delayed ref in the 284 * helper function to update an extent delayed ref in the
323 * rbtree. existing and update must both have the same 285 * rbtree. existing and update must both have the same
324 * bytenr and parent 286 * bytenr and parent
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
747 return btrfs_delayed_node_to_head(ref); 709 return btrfs_delayed_node_to_head(ref);
748 return NULL; 710 return NULL;
749} 711}
750
751/*
752 * add a delayed ref to the tree. This does all of the accounting required
753 * to make sure the delayed ref is eventually processed before this
754 * transaction commits.
755 *
756 * The main point of this call is to add and remove a backreference in a single
757 * shot, taking the lock only once, and only searching for the head node once.
758 *
759 * It is the same as doing a ref add and delete in two separate calls.
760 */
761#if 0
762int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
763 u64 bytenr, u64 num_bytes, u64 orig_parent,
764 u64 parent, u64 orig_ref_root, u64 ref_root,
765 u64 orig_ref_generation, u64 ref_generation,
766 u64 owner_objectid, int pin)
767{
768 struct btrfs_delayed_ref *ref;
769 struct btrfs_delayed_ref *old_ref;
770 struct btrfs_delayed_ref_head *head_ref;
771 struct btrfs_delayed_ref_root *delayed_refs;
772 int ret;
773
774 ref = kmalloc(sizeof(*ref), GFP_NOFS);
775 if (!ref)
776 return -ENOMEM;
777
778 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
779 if (!old_ref) {
780 kfree(ref);
781 return -ENOMEM;
782 }
783
784 /*
785 * the parent = 0 case comes from cases where we don't actually
786 * know the parent yet. It will get updated later via a add/drop
787 * pair.
788 */
789 if (parent == 0)
790 parent = bytenr;
791 if (orig_parent == 0)
792 orig_parent = bytenr;
793
794 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
795 if (!head_ref) {
796 kfree(ref);
797 kfree(old_ref);
798 return -ENOMEM;
799 }
800 delayed_refs = &trans->transaction->delayed_refs;
801 spin_lock(&delayed_refs->lock);
802
803 /*
804 * insert both the head node and the new ref without dropping
805 * the spin lock
806 */
807 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
808 (u64)-1, 0, 0, 0,
809 BTRFS_UPDATE_DELAYED_HEAD, 0);
810 BUG_ON(ret);
811
812 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
813 parent, ref_root, ref_generation,
814 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
815 BUG_ON(ret);
816
817 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
818 orig_parent, orig_ref_root,
819 orig_ref_generation, owner_objectid,
820 BTRFS_DROP_DELAYED_REF, pin);
821 BUG_ON(ret);
822 spin_unlock(&delayed_refs->lock);
823 return 0;
824}
825#endif