aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /fs/btrfs/delayed-ref.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c120
1 files changed, 6 insertions, 114 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e807b143b857..125cf76fcd08 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -281,44 +281,6 @@ again:
281} 281}
282 282
283/* 283/*
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
287 *
288 * If it only finds a head node, it returns 0.
289 *
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
294 * allocation tree.
295 */
296int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
297{
298 struct btrfs_delayed_ref_node *ref;
299 struct btrfs_delayed_ref_root *delayed_refs;
300 struct rb_node *prev_node;
301 int ret = 0;
302
303 delayed_refs = &trans->transaction->delayed_refs;
304 spin_lock(&delayed_refs->lock);
305
306 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
307 if (ref) {
308 prev_node = rb_prev(&ref->rb_node);
309 if (!prev_node)
310 goto out;
311 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
312 rb_node);
313 if (ref->bytenr == bytenr)
314 ret = 1;
315 }
316out:
317 spin_unlock(&delayed_refs->lock);
318 return ret;
319}
320
321/*
322 * helper function to update an extent delayed ref in the 284 * helper function to update an extent delayed ref in the
323 * rbtree. existing and update must both have the same 285 * rbtree. existing and update must both have the same
324 * bytenr and parent 286 * bytenr and parent
@@ -483,6 +445,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
483 INIT_LIST_HEAD(&head_ref->cluster); 445 INIT_LIST_HEAD(&head_ref->cluster);
484 mutex_init(&head_ref->mutex); 446 mutex_init(&head_ref->mutex);
485 447
448 trace_btrfs_delayed_ref_head(ref, head_ref, action);
449
486 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 450 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
487 451
488 if (existing) { 452 if (existing) {
@@ -537,6 +501,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
537 } 501 }
538 full_ref->level = level; 502 full_ref->level = level;
539 503
504 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
505
540 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 506 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
541 507
542 if (existing) { 508 if (existing) {
@@ -591,6 +557,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
591 full_ref->objectid = owner; 557 full_ref->objectid = owner;
592 full_ref->offset = offset; 558 full_ref->offset = offset;
593 559
560 trace_btrfs_delayed_data_ref(ref, full_ref, action);
561
594 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 562 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
595 563
596 if (existing) { 564 if (existing) {
@@ -741,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
741 return btrfs_delayed_node_to_head(ref); 709 return btrfs_delayed_node_to_head(ref);
742 return NULL; 710 return NULL;
743} 711}
744
745/*
746 * add a delayed ref to the tree. This does all of the accounting required
747 * to make sure the delayed ref is eventually processed before this
748 * transaction commits.
749 *
750 * The main point of this call is to add and remove a backreference in a single
751 * shot, taking the lock only once, and only searching for the head node once.
752 *
753 * It is the same as doing a ref add and delete in two separate calls.
754 */
755#if 0
756int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
757 u64 bytenr, u64 num_bytes, u64 orig_parent,
758 u64 parent, u64 orig_ref_root, u64 ref_root,
759 u64 orig_ref_generation, u64 ref_generation,
760 u64 owner_objectid, int pin)
761{
762 struct btrfs_delayed_ref *ref;
763 struct btrfs_delayed_ref *old_ref;
764 struct btrfs_delayed_ref_head *head_ref;
765 struct btrfs_delayed_ref_root *delayed_refs;
766 int ret;
767
768 ref = kmalloc(sizeof(*ref), GFP_NOFS);
769 if (!ref)
770 return -ENOMEM;
771
772 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
773 if (!old_ref) {
774 kfree(ref);
775 return -ENOMEM;
776 }
777
778 /*
779 * the parent = 0 case comes from cases where we don't actually
780 * know the parent yet. It will get updated later via a add/drop
781 * pair.
782 */
783 if (parent == 0)
784 parent = bytenr;
785 if (orig_parent == 0)
786 orig_parent = bytenr;
787
788 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
789 if (!head_ref) {
790 kfree(ref);
791 kfree(old_ref);
792 return -ENOMEM;
793 }
794 delayed_refs = &trans->transaction->delayed_refs;
795 spin_lock(&delayed_refs->lock);
796
797 /*
798 * insert both the head node and the new ref without dropping
799 * the spin lock
800 */
801 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
802 (u64)-1, 0, 0, 0,
803 BTRFS_UPDATE_DELAYED_HEAD, 0);
804 BUG_ON(ret);
805
806 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
807 parent, ref_root, ref_generation,
808 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
809 BUG_ON(ret);
810
811 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
812 orig_parent, orig_ref_root,
813 orig_ref_generation, owner_objectid,
814 BTRFS_DROP_DELAYED_REF, pin);
815 BUG_ON(ret);
816 spin_unlock(&delayed_refs->lock);
817 return 0;
818}
819#endif