aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2012-06-21 05:08:04 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-07-10 09:14:41 -0400
commit097b8a7c9e48e2cb50fd0eb9315791921beaf484 (patch)
tree03588f0e29000e415f7177d31a8f5b4c1689d6ad /fs/btrfs/extent-tree.c
parentcf5388307a2b4faab4b11d732b61c85741be6169 (diff)
Btrfs: join tree mod log code with the code holding back delayed refs
We've got two mechanisms both required for reliable backref resolving (tree mod log and holding back delayed refs). You cannot make use of one without the other. So instead of requiring the user of this mechanism to setup both correctly, we join them into a single interface. Additionally, we stop inserting non-blockers into fs_info->tree_mod_seq_list as we did before, which was of no value. Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6e1d36702ff7..94ce79f76e5f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2217,6 +2217,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2217 struct btrfs_delayed_ref_node *ref; 2217 struct btrfs_delayed_ref_node *ref;
2218 struct btrfs_delayed_ref_head *locked_ref = NULL; 2218 struct btrfs_delayed_ref_head *locked_ref = NULL;
2219 struct btrfs_delayed_extent_op *extent_op; 2219 struct btrfs_delayed_extent_op *extent_op;
2220 struct btrfs_fs_info *fs_info = root->fs_info;
2220 int ret; 2221 int ret;
2221 int count = 0; 2222 int count = 0;
2222 int must_insert_reserved = 0; 2223 int must_insert_reserved = 0;
@@ -2255,7 +2256,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2255 ref = select_delayed_ref(locked_ref); 2256 ref = select_delayed_ref(locked_ref);
2256 2257
2257 if (ref && ref->seq && 2258 if (ref && ref->seq &&
2258 btrfs_check_delayed_seq(delayed_refs, ref->seq)) { 2259 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2259 /* 2260 /*
2260 * there are still refs with lower seq numbers in the 2261 * there are still refs with lower seq numbers in the
2261 * process of being added. Don't run this ref yet. 2262 * process of being added. Don't run this ref yet.
@@ -2337,7 +2338,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2337 } 2338 }
2338 2339
2339next: 2340next:
2340 do_chunk_alloc(trans, root->fs_info->extent_root, 2341 do_chunk_alloc(trans, fs_info->extent_root,
2341 2 * 1024 * 1024, 2342 2 * 1024 * 1024,
2342 btrfs_get_alloc_profile(root, 0), 2343 btrfs_get_alloc_profile(root, 0),
2343 CHUNK_ALLOC_NO_FORCE); 2344 CHUNK_ALLOC_NO_FORCE);
@@ -2347,18 +2348,19 @@ next:
2347 return count; 2348 return count;
2348} 2349}
2349 2350
2350static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs, 2351static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
2352 struct btrfs_delayed_ref_root *delayed_refs,
2351 unsigned long num_refs, 2353 unsigned long num_refs,
2352 struct list_head *first_seq) 2354 struct list_head *first_seq)
2353{ 2355{
2354 spin_unlock(&delayed_refs->lock); 2356 spin_unlock(&delayed_refs->lock);
2355 pr_debug("waiting for more refs (num %ld, first %p)\n", 2357 pr_debug("waiting for more refs (num %ld, first %p)\n",
2356 num_refs, first_seq); 2358 num_refs, first_seq);
2357 wait_event(delayed_refs->seq_wait, 2359 wait_event(fs_info->tree_mod_seq_wait,
2358 num_refs != delayed_refs->num_entries || 2360 num_refs != delayed_refs->num_entries ||
2359 delayed_refs->seq_head.next != first_seq); 2361 fs_info->tree_mod_seq_list.next != first_seq);
2360 pr_debug("done waiting for more refs (num %ld, first %p)\n", 2362 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2361 delayed_refs->num_entries, delayed_refs->seq_head.next); 2363 delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
2362 spin_lock(&delayed_refs->lock); 2364 spin_lock(&delayed_refs->lock);
2363} 2365}
2364 2366
@@ -2403,6 +2405,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2403again: 2405again:
2404 consider_waiting = 0; 2406 consider_waiting = 0;
2405 spin_lock(&delayed_refs->lock); 2407 spin_lock(&delayed_refs->lock);
2408
2406 if (count == 0) { 2409 if (count == 0) {
2407 count = delayed_refs->num_entries * 2; 2410 count = delayed_refs->num_entries * 2;
2408 run_most = 1; 2411 run_most = 1;
@@ -2437,7 +2440,7 @@ again:
2437 num_refs = delayed_refs->num_entries; 2440 num_refs = delayed_refs->num_entries;
2438 first_seq = root->fs_info->tree_mod_seq_list.next; 2441 first_seq = root->fs_info->tree_mod_seq_list.next;
2439 } else { 2442 } else {
2440 wait_for_more_refs(delayed_refs, 2443 wait_for_more_refs(root->fs_info, delayed_refs,
2441 num_refs, first_seq); 2444 num_refs, first_seq);
2442 /* 2445 /*
2443 * after waiting, things have changed. we 2446 * after waiting, things have changed. we
@@ -5190,8 +5193,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5190 rb_erase(&head->node.rb_node, &delayed_refs->root); 5193 rb_erase(&head->node.rb_node, &delayed_refs->root);
5191 5194
5192 delayed_refs->num_entries--; 5195 delayed_refs->num_entries--;
5193 if (waitqueue_active(&delayed_refs->seq_wait)) 5196 if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
5194 wake_up(&delayed_refs->seq_wait); 5197 wake_up(&root->fs_info->tree_mod_seq_wait);
5195 5198
5196 /* 5199 /*
5197 * we don't take a ref on the node because we're removing it from the 5200 * we don't take a ref on the node because we're removing it from the