aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2018-08-15 03:39:56 -0400
committerDavid Sterba <dsterba@suse.com>2018-10-15 11:23:35 -0400
commit0110a4c43451533de1ea1bbdc57b5d452f9d8b25 (patch)
tree1ed4551c8b68165b79091e18260f0db80dc963b0
parente726138676f896146a55a98305665e81d34d038c (diff)
btrfs: refactor __btrfs_run_delayed_refs loop
Refactor the delayed refs loop by using the newly introduced btrfs_run_delayed_refs_for_head function. This greatly simplifies __btrfs_run_delayed_refs and makes it more obvious what is happening. We now have 1 loop which iterates the existing delayed_heads and then each selected ref head is processed by the new helper. All existing semantics of the code are preserved so no functional changes. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/extent-tree.c108
1 files changed, 28 insertions, 80 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 979702f361cc..8798fa029ebe 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2548,6 +2548,9 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
2548 2548
2549 delayed_refs = &trans->transaction->delayed_refs; 2549 delayed_refs = &trans->transaction->delayed_refs;
2550 2550
2551 lockdep_assert_held(&locked_ref->mutex);
2552 lockdep_assert_held(&locked_ref->lock);
2553
2551 while ((ref = select_delayed_ref(locked_ref))) { 2554 while ((ref = select_delayed_ref(locked_ref))) {
2552 if (ref->seq && 2555 if (ref->seq &&
2553 btrfs_check_delayed_seq(fs_info, ref->seq)) { 2556 btrfs_check_delayed_seq(fs_info, ref->seq)) {
@@ -2621,31 +2624,25 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2621{ 2624{
2622 struct btrfs_fs_info *fs_info = trans->fs_info; 2625 struct btrfs_fs_info *fs_info = trans->fs_info;
2623 struct btrfs_delayed_ref_root *delayed_refs; 2626 struct btrfs_delayed_ref_root *delayed_refs;
2624 struct btrfs_delayed_ref_node *ref;
2625 struct btrfs_delayed_ref_head *locked_ref = NULL; 2627 struct btrfs_delayed_ref_head *locked_ref = NULL;
2626 struct btrfs_delayed_extent_op *extent_op;
2627 ktime_t start = ktime_get(); 2628 ktime_t start = ktime_get();
2628 int ret; 2629 int ret;
2629 unsigned long count = 0; 2630 unsigned long count = 0;
2630 unsigned long actual_count = 0; 2631 unsigned long actual_count = 0;
2631 int must_insert_reserved = 0;
2632 2632
2633 delayed_refs = &trans->transaction->delayed_refs; 2633 delayed_refs = &trans->transaction->delayed_refs;
2634 while (1) { 2634 do {
2635 if (!locked_ref) { 2635 if (!locked_ref) {
2636 if (count >= nr)
2637 break;
2638
2639 locked_ref = btrfs_obtain_ref_head(trans); 2636 locked_ref = btrfs_obtain_ref_head(trans);
2640 if (!locked_ref) 2637 if (IS_ERR_OR_NULL(locked_ref)) {
2641 break; 2638 if (PTR_ERR(locked_ref) == -EAGAIN) {
2642 else if (PTR_ERR(locked_ref) == -EAGAIN) { 2639 continue;
2643 locked_ref = NULL; 2640 } else {
2644 count++; 2641 break;
2645 continue; 2642 }
2646 } 2643 }
2644 count++;
2647 } 2645 }
2648
2649 /* 2646 /*
2650 * We need to try and merge add/drops of the same ref since we 2647 * We need to try and merge add/drops of the same ref since we
2651 * can run into issues with relocate dropping the implicit ref 2648 * can run into issues with relocate dropping the implicit ref
@@ -2661,23 +2658,19 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2661 spin_lock(&locked_ref->lock); 2658 spin_lock(&locked_ref->lock);
2662 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref); 2659 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2663 2660
2664 ref = select_delayed_ref(locked_ref); 2661 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
2665 2662 &actual_count);
2666 if (ref && ref->seq && 2663 if (ret < 0 && ret != -EAGAIN) {
2667 btrfs_check_delayed_seq(fs_info, ref->seq)) { 2664 /*
2668 spin_unlock(&locked_ref->lock); 2665 * Error, btrfs_run_delayed_refs_for_head already
2669 unselect_delayed_ref_head(delayed_refs, locked_ref); 2666 * unlocked everything so just bail out
2670 locked_ref = NULL; 2667 */
2671 cond_resched(); 2668 return ret;
2672 count++; 2669 } else if (!ret) {
2673 continue; 2670 /*
2674 } 2671 * Success, perform the usual cleanup of a processed
2675 2672 * head
2676 /* 2673 */
2677 * We're done processing refs in this ref_head, clean everything
2678 * up and move on to the next ref_head.
2679 */
2680 if (!ref) {
2681 ret = cleanup_ref_head(trans, locked_ref); 2674 ret = cleanup_ref_head(trans, locked_ref);
2682 if (ret > 0 ) { 2675 if (ret > 0 ) {
2683 /* We dropped our lock, we need to loop. */ 2676 /* We dropped our lock, we need to loop. */
@@ -2686,61 +2679,16 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2686 } else if (ret) { 2679 } else if (ret) {
2687 return ret; 2680 return ret;
2688 } 2681 }
2689 locked_ref = NULL;
2690 count++;
2691 continue;
2692 } 2682 }
2693 2683
2694 actual_count++;
2695 ref->in_tree = 0;
2696 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
2697 RB_CLEAR_NODE(&ref->ref_node);
2698 if (!list_empty(&ref->add_list))
2699 list_del(&ref->add_list);
2700 /* 2684 /*
2701 * When we play the delayed ref, also correct the ref_mod on 2685 * Either success case or btrfs_run_delayed_refs_for_head
2702 * head 2686 * returned -EAGAIN, meaning we need to select another head
2703 */ 2687 */
2704 switch (ref->action) {
2705 case BTRFS_ADD_DELAYED_REF:
2706 case BTRFS_ADD_DELAYED_EXTENT:
2707 locked_ref->ref_mod -= ref->ref_mod;
2708 break;
2709 case BTRFS_DROP_DELAYED_REF:
2710 locked_ref->ref_mod += ref->ref_mod;
2711 break;
2712 default:
2713 WARN_ON(1);
2714 }
2715 atomic_dec(&delayed_refs->num_entries);
2716
2717 /*
2718 * Record the must-insert_reserved flag before we drop the spin
2719 * lock.
2720 */
2721 must_insert_reserved = locked_ref->must_insert_reserved;
2722 locked_ref->must_insert_reserved = 0;
2723 2688
2724 extent_op = locked_ref->extent_op; 2689 locked_ref = NULL;
2725 locked_ref->extent_op = NULL;
2726 spin_unlock(&locked_ref->lock);
2727
2728 ret = run_one_delayed_ref(trans, ref, extent_op,
2729 must_insert_reserved);
2730
2731 btrfs_free_delayed_extent_op(extent_op);
2732 if (ret) {
2733 unselect_delayed_ref_head(delayed_refs, locked_ref);
2734 btrfs_put_delayed_ref(ref);
2735 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2736 ret);
2737 return ret;
2738 }
2739
2740 btrfs_put_delayed_ref(ref);
2741 count++;
2742 cond_resched(); 2690 cond_resched();
2743 } 2691 } while ((nr != -1 && count < nr) || locked_ref);
2744 2692
2745 /* 2693 /*
2746 * We don't want to include ref heads since we can have empty ref heads 2694 * We don't want to include ref heads since we can have empty ref heads