aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2018-08-15 03:39:55 -0400
committerDavid Sterba <dsterba@suse.com>2018-10-15 11:23:35 -0400
commite726138676f896146a55a98305665e81d34d038c (patch)
tree2d6b061c0b7d45ba69527bc68dd874a4f3502646
parentb1cdbcb53a6edd84d50b72117d49a350575bbe6a (diff)
btrfs: Factor out loop processing all refs of a head
This patch introduces a new helper encompassing the implicit inner loop in __btrfs_run_delayed_refs which processes all the refs for a given head. The code is mostly copy/paste, the only difference is that if we detect a newer reference then -EAGAIN is returned so that callers can react correctly. Also, at the end of the loop the head is relocked and btrfs_merge_delayed_refs is run again to retain the pre-refactoring semantics. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/extent-tree.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index ee027a7fe7f4..979702f361cc 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2535,6 +2535,83 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
2535 return head; 2535 return head;
2536} 2536}
2537 2537
2538static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
2539 struct btrfs_delayed_ref_head *locked_ref,
2540 unsigned long *run_refs)
2541{
2542 struct btrfs_fs_info *fs_info = trans->fs_info;
2543 struct btrfs_delayed_ref_root *delayed_refs;
2544 struct btrfs_delayed_extent_op *extent_op;
2545 struct btrfs_delayed_ref_node *ref;
2546 int must_insert_reserved = 0;
2547 int ret;
2548
2549 delayed_refs = &trans->transaction->delayed_refs;
2550
2551 while ((ref = select_delayed_ref(locked_ref))) {
2552 if (ref->seq &&
2553 btrfs_check_delayed_seq(fs_info, ref->seq)) {
2554 spin_unlock(&locked_ref->lock);
2555 unselect_delayed_ref_head(delayed_refs, locked_ref);
2556 return -EAGAIN;
2557 }
2558
2559 (*run_refs)++;
2560 ref->in_tree = 0;
2561 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
2562 RB_CLEAR_NODE(&ref->ref_node);
2563 if (!list_empty(&ref->add_list))
2564 list_del(&ref->add_list);
2565 /*
2566 * When we play the delayed ref, also correct the ref_mod on
2567 * head
2568 */
2569 switch (ref->action) {
2570 case BTRFS_ADD_DELAYED_REF:
2571 case BTRFS_ADD_DELAYED_EXTENT:
2572 locked_ref->ref_mod -= ref->ref_mod;
2573 break;
2574 case BTRFS_DROP_DELAYED_REF:
2575 locked_ref->ref_mod += ref->ref_mod;
2576 break;
2577 default:
2578 WARN_ON(1);
2579 }
2580 atomic_dec(&delayed_refs->num_entries);
2581
2582 /*
2583 * Record the must_insert_reserved flag before we drop the
2584 * spin lock.
2585 */
2586 must_insert_reserved = locked_ref->must_insert_reserved;
2587 locked_ref->must_insert_reserved = 0;
2588
2589 extent_op = locked_ref->extent_op;
2590 locked_ref->extent_op = NULL;
2591 spin_unlock(&locked_ref->lock);
2592
2593 ret = run_one_delayed_ref(trans, ref, extent_op,
2594 must_insert_reserved);
2595
2596 btrfs_free_delayed_extent_op(extent_op);
2597 if (ret) {
2598 unselect_delayed_ref_head(delayed_refs, locked_ref);
2599 btrfs_put_delayed_ref(ref);
2600 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2601 ret);
2602 return ret;
2603 }
2604
2605 btrfs_put_delayed_ref(ref);
2606 cond_resched();
2607
2608 spin_lock(&locked_ref->lock);
2609 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2610 }
2611
2612 return 0;
2613}
2614
2538/* 2615/*
2539 * Returns 0 on success or if called with an already aborted transaction. 2616 * Returns 0 on success or if called with an already aborted transaction.
2540 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 2617 * Returns -ENOMEM or -EIO on failure and will abort the transaction.