aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2014-05-22 19:18:52 -0400
committerChris Mason <clm@fb.com>2014-06-09 20:20:58 -0400
commita79b7d4b3e8118f265dcb4bdf9a572c392f02708 (patch)
treea7b4792e01ea5a44467f053e1822d4240e70edc6 /fs/btrfs/extent-tree.c
parent40f765805f082ed679c55bf6ab60212e55fb6fc1 (diff)
Btrfs: async delayed refs
Delayed extent operations are triggered during transaction commits. The goal is to queue up a healthly batch of changes to the extent allocation tree and run through them in bulk. This farms them off to async helper threads. The goal is to have the bulk of the delayed operations being done in the background, but this is also important to limit our stack footprint. Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c79
1 files changed, 79 insertions, 0 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index bb5b3067ddc3..6caddd5970e4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2674,15 +2674,94 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2674 u64 num_entries = 2674 u64 num_entries =
2675 atomic_read(&trans->transaction->delayed_refs.num_entries); 2675 atomic_read(&trans->transaction->delayed_refs.num_entries);
2676 u64 avg_runtime; 2676 u64 avg_runtime;
2677 u64 val;
2677 2678
2678 smp_mb(); 2679 smp_mb();
2679 avg_runtime = fs_info->avg_delayed_ref_runtime; 2680 avg_runtime = fs_info->avg_delayed_ref_runtime;
2681 val = num_entries * avg_runtime;
2680 if (num_entries * avg_runtime >= NSEC_PER_SEC) 2682 if (num_entries * avg_runtime >= NSEC_PER_SEC)
2681 return 1; 2683 return 1;
2684 if (val >= NSEC_PER_SEC / 2)
2685 return 2;
2682 2686
2683 return btrfs_check_space_for_delayed_refs(trans, root); 2687 return btrfs_check_space_for_delayed_refs(trans, root);
2684} 2688}
2685 2689
2690struct async_delayed_refs {
2691 struct btrfs_root *root;
2692 int count;
2693 int error;
2694 int sync;
2695 struct completion wait;
2696 struct btrfs_work work;
2697};
2698
2699static void delayed_ref_async_start(struct btrfs_work *work)
2700{
2701 struct async_delayed_refs *async;
2702 struct btrfs_trans_handle *trans;
2703 int ret;
2704
2705 async = container_of(work, struct async_delayed_refs, work);
2706
2707 trans = btrfs_join_transaction(async->root);
2708 if (IS_ERR(trans)) {
2709 async->error = PTR_ERR(trans);
2710 goto done;
2711 }
2712
2713 /*
2714 * trans->sync means that when we call end_transaciton, we won't
2715 * wait on delayed refs
2716 */
2717 trans->sync = true;
2718 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2719 if (ret)
2720 async->error = ret;
2721
2722 ret = btrfs_end_transaction(trans, async->root);
2723 if (ret && !async->error)
2724 async->error = ret;
2725done:
2726 if (async->sync)
2727 complete(&async->wait);
2728 else
2729 kfree(async);
2730}
2731
2732int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2733 unsigned long count, int wait)
2734{
2735 struct async_delayed_refs *async;
2736 int ret;
2737
2738 async = kmalloc(sizeof(*async), GFP_NOFS);
2739 if (!async)
2740 return -ENOMEM;
2741
2742 async->root = root->fs_info->tree_root;
2743 async->count = count;
2744 async->error = 0;
2745 if (wait)
2746 async->sync = 1;
2747 else
2748 async->sync = 0;
2749 init_completion(&async->wait);
2750
2751 btrfs_init_work(&async->work, delayed_ref_async_start,
2752 NULL, NULL);
2753
2754 btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2755
2756 if (wait) {
2757 wait_for_completion(&async->wait);
2758 ret = async->error;
2759 kfree(async);
2760 return ret;
2761 }
2762 return 0;
2763}
2764
2686/* 2765/*
2687 * this starts processing the delayed reference count updates and 2766 * this starts processing the delayed reference count updates and
2688 * extent insertions we have queued up so far. count can be 2767 * extent insertions we have queued up so far. count can be