summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2015-03-30 05:03:00 -0400
committerChris Mason <clm@fb.com>2015-06-10 12:25:03 -0400
commitc6fc24549960f26910cd0c6e4b5f48f2f306b11d (patch)
tree374c6af58fb08a000f09fa1e1966e4bd157731b9 /fs/btrfs/delayed-ref.c
parent00db646d3fb3f5f62c2327abcf3630f4cc1075ba (diff)
btrfs: delayed-ref: Use list to replace the ref_root in ref_head.
This patch replace the rbtree used in ref_head to list. This has the following advantage: 1) Easier merge logic. With the new list implement, we only need to care merging the tail ref_node with the new ref_node. And this can be done quite easy at insert time, no need to do a indicated merge at run_delayed_refs(). Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c156
1 files changed, 82 insertions, 74 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 8f8ed7d20bac..4dbc31636d14 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -268,7 +268,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
268 rb_erase(&head->href_node, &delayed_refs->href_root); 268 rb_erase(&head->href_node, &delayed_refs->href_root);
269 } else { 269 } else {
270 assert_spin_locked(&head->lock); 270 assert_spin_locked(&head->lock);
271 rb_erase(&ref->rb_node, &head->ref_root); 271 list_del(&ref->list);
272 } 272 }
273 ref->in_tree = 0; 273 ref->in_tree = 0;
274 btrfs_put_delayed_ref(ref); 274 btrfs_put_delayed_ref(ref);
@@ -328,48 +328,6 @@ static int merge_ref(struct btrfs_trans_handle *trans,
328 return done; 328 return done;
329} 329}
330 330
331void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
332 struct btrfs_fs_info *fs_info,
333 struct btrfs_delayed_ref_root *delayed_refs,
334 struct btrfs_delayed_ref_head *head)
335{
336 struct rb_node *node;
337 u64 seq = 0;
338
339 assert_spin_locked(&head->lock);
340 /*
341 * We don't have too much refs to merge in the case of delayed data
342 * refs.
343 */
344 if (head->is_data)
345 return;
346
347 spin_lock(&fs_info->tree_mod_seq_lock);
348 if (!list_empty(&fs_info->tree_mod_seq_list)) {
349 struct seq_list *elem;
350
351 elem = list_first_entry(&fs_info->tree_mod_seq_list,
352 struct seq_list, list);
353 seq = elem->seq;
354 }
355 spin_unlock(&fs_info->tree_mod_seq_lock);
356
357 node = rb_first(&head->ref_root);
358 while (node) {
359 struct btrfs_delayed_ref_node *ref;
360
361 ref = rb_entry(node, struct btrfs_delayed_ref_node,
362 rb_node);
363 /* We can't merge refs that are outside of our seq count */
364 if (seq && ref->seq >= seq)
365 break;
366 if (merge_ref(trans, delayed_refs, head, ref, seq))
367 node = rb_first(&head->ref_root);
368 else
369 node = rb_next(&ref->rb_node);
370 }
371}
372
373int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 331int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
374 struct btrfs_delayed_ref_root *delayed_refs, 332 struct btrfs_delayed_ref_root *delayed_refs,
375 u64 seq) 333 u64 seq)
@@ -485,6 +443,74 @@ update_existing_ref(struct btrfs_trans_handle *trans,
485} 443}
486 444
487/* 445/*
446 * Helper to insert the ref_node to the tail or merge with tail.
447 *
448 * Return 0 for insert.
449 * Return >0 for merge.
450 */
451static int
452add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
453 struct btrfs_delayed_ref_root *root,
454 struct btrfs_delayed_ref_head *href,
455 struct btrfs_delayed_ref_node *ref)
456{
457 struct btrfs_delayed_ref_node *exist;
458 int mod;
459 int ret = 0;
460
461 spin_lock(&href->lock);
462 /* Check whether we can merge the tail node with ref */
463 if (list_empty(&href->ref_list))
464 goto add_tail;
465 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
466 list);
467 /* No need to compare bytenr nor is_head */
468 if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
469 exist->seq != ref->seq)
470 goto add_tail;
471
472 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
473 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
474 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
475 btrfs_delayed_node_to_tree_ref(ref),
476 ref->type))
477 goto add_tail;
478 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
479 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
480 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
481 btrfs_delayed_node_to_data_ref(ref)))
482 goto add_tail;
483
484 /* Now we are sure we can merge */
485 ret = 1;
486 if (exist->action == ref->action) {
487 mod = ref->ref_mod;
488 } else {
489 /* Need to change action */
490 if (exist->ref_mod < ref->ref_mod) {
491 exist->action = ref->action;
492 mod = -exist->ref_mod;
493 exist->ref_mod = ref->ref_mod;
494 } else
495 mod = -ref->ref_mod;
496 }
497 exist->ref_mod += mod;
498
499 /* remove existing tail if its ref_mod is zero */
500 if (exist->ref_mod == 0)
501 drop_delayed_ref(trans, root, href, exist);
502 spin_unlock(&href->lock);
503 return ret;
504
505add_tail:
506 list_add_tail(&ref->list, &href->ref_list);
507 atomic_inc(&root->num_entries);
508 trans->delayed_ref_updates++;
509 spin_unlock(&href->lock);
510 return ret;
511}
512
513/*
488 * helper function to update the accounting in the head ref 514 * helper function to update the accounting in the head ref
489 * existing and update must have the same bytenr 515 * existing and update must have the same bytenr
490 */ 516 */
@@ -618,7 +644,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
618 head_ref = btrfs_delayed_node_to_head(ref); 644 head_ref = btrfs_delayed_node_to_head(ref);
619 head_ref->must_insert_reserved = must_insert_reserved; 645 head_ref->must_insert_reserved = must_insert_reserved;
620 head_ref->is_data = is_data; 646 head_ref->is_data = is_data;
621 head_ref->ref_root = RB_ROOT; 647 INIT_LIST_HEAD(&head_ref->ref_list);
622 head_ref->processing = 0; 648 head_ref->processing = 0;
623 head_ref->total_ref_mod = count_mod; 649 head_ref->total_ref_mod = count_mod;
624 650
@@ -659,10 +685,10 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
659 u64 num_bytes, u64 parent, u64 ref_root, int level, 685 u64 num_bytes, u64 parent, u64 ref_root, int level,
660 int action, int no_quota) 686 int action, int no_quota)
661{ 687{
662 struct btrfs_delayed_ref_node *existing;
663 struct btrfs_delayed_tree_ref *full_ref; 688 struct btrfs_delayed_tree_ref *full_ref;
664 struct btrfs_delayed_ref_root *delayed_refs; 689 struct btrfs_delayed_ref_root *delayed_refs;
665 u64 seq = 0; 690 u64 seq = 0;
691 int ret;
666 692
667 if (action == BTRFS_ADD_DELAYED_EXTENT) 693 if (action == BTRFS_ADD_DELAYED_EXTENT)
668 action = BTRFS_ADD_DELAYED_REF; 694 action = BTRFS_ADD_DELAYED_REF;
@@ -693,21 +719,14 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
693 719
694 trace_add_delayed_tree_ref(ref, full_ref, action); 720 trace_add_delayed_tree_ref(ref, full_ref, action);
695 721
696 spin_lock(&head_ref->lock); 722 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
697 existing = tree_insert(&head_ref->ref_root, &ref->rb_node); 723
698 if (existing) { 724 /*
699 update_existing_ref(trans, delayed_refs, head_ref, existing, 725 * XXX: memory should be freed at the same level allocated.
700 ref); 726 * But bad practice is anywhere... Follow it now. Need cleanup.
701 /* 727 */
702 * we've updated the existing ref, free the newly 728 if (ret > 0)
703 * allocated ref
704 */
705 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); 729 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
706 } else {
707 atomic_inc(&delayed_refs->num_entries);
708 trans->delayed_ref_updates++;
709 }
710 spin_unlock(&head_ref->lock);
711} 730}
712 731
713/* 732/*
@@ -721,10 +740,10 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
721 u64 num_bytes, u64 parent, u64 ref_root, u64 owner, 740 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
722 u64 offset, int action, int no_quota) 741 u64 offset, int action, int no_quota)
723{ 742{
724 struct btrfs_delayed_ref_node *existing;
725 struct btrfs_delayed_data_ref *full_ref; 743 struct btrfs_delayed_data_ref *full_ref;
726 struct btrfs_delayed_ref_root *delayed_refs; 744 struct btrfs_delayed_ref_root *delayed_refs;
727 u64 seq = 0; 745 u64 seq = 0;
746 int ret;
728 747
729 if (action == BTRFS_ADD_DELAYED_EXTENT) 748 if (action == BTRFS_ADD_DELAYED_EXTENT)
730 action = BTRFS_ADD_DELAYED_REF; 749 action = BTRFS_ADD_DELAYED_REF;
@@ -758,21 +777,10 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
758 777
759 trace_add_delayed_data_ref(ref, full_ref, action); 778 trace_add_delayed_data_ref(ref, full_ref, action);
760 779
761 spin_lock(&head_ref->lock); 780 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
762 existing = tree_insert(&head_ref->ref_root, &ref->rb_node); 781
763 if (existing) { 782 if (ret > 0)
764 update_existing_ref(trans, delayed_refs, head_ref, existing,
765 ref);
766 /*
767 * we've updated the existing ref, free the newly
768 * allocated ref
769 */
770 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); 783 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
771 } else {
772 atomic_inc(&delayed_refs->num_entries);
773 trans->delayed_ref_updates++;
774 }
775 spin_unlock(&head_ref->lock);
776} 784}
777 785
778/* 786/*