aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorLiu Bo <bo.li.liu@oracle.com>2013-10-14 00:59:45 -0400
committerChris Mason <clm@fb.com>2014-01-28 16:19:22 -0500
commitc46effa601f869f3d20a7386a745d9c002838eb8 (patch)
tree7aa114c1a78e1834950a34524c8ada82569af50b /fs/btrfs/extent-tree.c
parente20d6c5ba38d066c7dc0f7d3b68da14b9ae7fe37 (diff)
Btrfs: introduce a head ref rbtree
The way how we process delayed refs is 1) get a bunch of head refs, 2) pick up one head ref, 3) go one node back for any delayed ref updates. The head ref is also linked in the same rbtree as the delayed ref is, so in 1) stage, we have to walk one by one including not only head refs, but delayed refs. When we have a great number of delayed refs pending to process, this'll cost time a lot. Here we introduce a head ref specific rbtree, it only has head refs, so troubles go away. Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c01509dd8ab..d15b4fc07554 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2438,6 +2438,10 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2438 2438
2439 ref->in_tree = 0; 2439 ref->in_tree = 0;
2440 rb_erase(&ref->rb_node, &delayed_refs->root); 2440 rb_erase(&ref->rb_node, &delayed_refs->root);
2441 if (btrfs_delayed_ref_is_head(ref)) {
2442 rb_erase(&locked_ref->href_node,
2443 &delayed_refs->href_root);
2444 }
2441 delayed_refs->num_entries--; 2445 delayed_refs->num_entries--;
2442 if (!btrfs_delayed_ref_is_head(ref)) { 2446 if (!btrfs_delayed_ref_is_head(ref)) {
2443 /* 2447 /*
@@ -2640,7 +2644,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2640{ 2644{
2641 struct rb_node *node; 2645 struct rb_node *node;
2642 struct btrfs_delayed_ref_root *delayed_refs; 2646 struct btrfs_delayed_ref_root *delayed_refs;
2643 struct btrfs_delayed_ref_node *ref; 2647 struct btrfs_delayed_ref_head *head;
2644 struct list_head cluster; 2648 struct list_head cluster;
2645 int ret; 2649 int ret;
2646 u64 delayed_start; 2650 u64 delayed_start;
@@ -2770,18 +2774,18 @@ again:
2770 spin_lock(&delayed_refs->lock); 2774 spin_lock(&delayed_refs->lock);
2771 } 2775 }
2772 2776
2773 node = rb_first(&delayed_refs->root); 2777 node = rb_first(&delayed_refs->href_root);
2774 if (!node) 2778 if (!node)
2775 goto out; 2779 goto out;
2776 count = (unsigned long)-1; 2780 count = (unsigned long)-1;
2777 2781
2778 while (node) { 2782 while (node) {
2779 ref = rb_entry(node, struct btrfs_delayed_ref_node, 2783 head = rb_entry(node, struct btrfs_delayed_ref_head,
2780 rb_node); 2784 href_node);
2781 if (btrfs_delayed_ref_is_head(ref)) { 2785 if (btrfs_delayed_ref_is_head(&head->node)) {
2782 struct btrfs_delayed_ref_head *head; 2786 struct btrfs_delayed_ref_node *ref;
2783 2787
2784 head = btrfs_delayed_node_to_head(ref); 2788 ref = &head->node;
2785 atomic_inc(&ref->refs); 2789 atomic_inc(&ref->refs);
2786 2790
2787 spin_unlock(&delayed_refs->lock); 2791 spin_unlock(&delayed_refs->lock);
@@ -2795,6 +2799,8 @@ again:
2795 btrfs_put_delayed_ref(ref); 2799 btrfs_put_delayed_ref(ref);
2796 cond_resched(); 2800 cond_resched();
2797 goto again; 2801 goto again;
2802 } else {
2803 WARN_ON(1);
2798 } 2804 }
2799 node = rb_next(node); 2805 node = rb_next(node);
2800 } 2806 }
@@ -5956,6 +5962,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5956 */ 5962 */
5957 head->node.in_tree = 0; 5963 head->node.in_tree = 0;
5958 rb_erase(&head->node.rb_node, &delayed_refs->root); 5964 rb_erase(&head->node.rb_node, &delayed_refs->root);
5965 rb_erase(&head->href_node, &delayed_refs->href_root);
5959 5966
5960 delayed_refs->num_entries--; 5967 delayed_refs->num_entries--;
5961 5968