aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/backref.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2014-01-23 09:21:38 -0500
committerChris Mason <clm@fb.com>2014-01-28 16:20:25 -0500
commitd7df2c796d7eedd72a334dc89c65e1fec8171431 (patch)
tree63e3adda6e56db27b13d0df28840a873ceac5855 /fs/btrfs/backref.c
parent5039eddc19aee8c894191c24f2dde4e645ca1bbb (diff)
Btrfs: attach delayed ref updates to delayed ref heads
Currently we have two rb-trees, one for delayed ref heads and one for all of the delayed refs, including the delayed ref heads. When we process the delayed refs we have to hold onto the delayed ref lock for all of the selecting and merging and such, which results in quite a bit of lock contention. This was solved by having a waitqueue and only one flusher at a time, however this hurts if we get a lot of delayed refs queued up. So instead just have an rb tree for the delayed ref heads, and then attach the delayed ref updates to an rb tree that is per delayed ref head. Then we only need to take the delayed ref lock when adding new delayed refs and when selecting a delayed ref head to process, all the rest of the time we deal with a per delayed ref head lock which will be much less contentious. The locking rules for this get a little more complicated since we have to lock up to 3 things to properly process delayed refs, but I will address that problem later. For now this passes all of xfstests and my overnight stress tests. Thanks, Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/backref.c')
-rw-r--r--fs/btrfs/backref.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 835b6c9a26a8..34a8952de8dd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -538,14 +538,13 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
538 if (extent_op && extent_op->update_key) 538 if (extent_op && extent_op->update_key)
539 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 539 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
540 540
541 while ((n = rb_prev(n))) { 541 spin_lock(&head->lock);
542 n = rb_first(&head->ref_root);
543 while (n) {
542 struct btrfs_delayed_ref_node *node; 544 struct btrfs_delayed_ref_node *node;
543 node = rb_entry(n, struct btrfs_delayed_ref_node, 545 node = rb_entry(n, struct btrfs_delayed_ref_node,
544 rb_node); 546 rb_node);
545 if (node->bytenr != head->node.bytenr) 547 n = rb_next(n);
546 break;
547 WARN_ON(node->is_head);
548
549 if (node->seq > seq) 548 if (node->seq > seq)
550 continue; 549 continue;
551 550
@@ -612,10 +611,10 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
612 WARN_ON(1); 611 WARN_ON(1);
613 } 612 }
614 if (ret) 613 if (ret)
615 return ret; 614 break;
616 } 615 }
617 616 spin_unlock(&head->lock);
618 return 0; 617 return ret;
619} 618}
620 619
621/* 620/*
@@ -882,15 +881,15 @@ again:
882 btrfs_put_delayed_ref(&head->node); 881 btrfs_put_delayed_ref(&head->node);
883 goto again; 882 goto again;
884 } 883 }
884 spin_unlock(&delayed_refs->lock);
885 ret = __add_delayed_refs(head, time_seq, 885 ret = __add_delayed_refs(head, time_seq,
886 &prefs_delayed); 886 &prefs_delayed);
887 mutex_unlock(&head->mutex); 887 mutex_unlock(&head->mutex);
888 if (ret) { 888 if (ret)
889 spin_unlock(&delayed_refs->lock);
890 goto out; 889 goto out;
891 } 890 } else {
891 spin_unlock(&delayed_refs->lock);
892 } 892 }
893 spin_unlock(&delayed_refs->lock);
894 } 893 }
895 894
896 if (path->slots[0]) { 895 if (path->slots[0]) {