aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2012-12-19 03:10:10 -0500
committerJosef Bacik <jbacik@fusionio.com>2013-02-20 09:36:41 -0500
commit093486c453a55230ccdad4b48863b872fe68c46e (patch)
tree5e66183feabea18cca133d4688d7adbdf0110360 /fs
parent0e8c36a9fd8169a8b96c2ddc8446894bcd07b6b1 (diff)
Btrfs: make delayed ref lock logic more readable
Locking and unlocking delayed ref mutex are in the different functions, and the name of lock functions is not uniform, so the readability is not so good, this patch optimizes the lock logic and makes it more readable. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/delayed-ref.c8
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/extent-tree.c42
3 files changed, 38 insertions, 18 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 455894f1ca3b..b7a0641ead77 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -426,6 +426,14 @@ again:
426 return 1; 426 return 1;
427} 427}
428 428
429void btrfs_release_ref_cluster(struct list_head *cluster)
430{
431 struct list_head *pos, *q;
432
433 list_for_each_safe(pos, q, cluster)
434 list_del_init(pos);
435}
436
429/* 437/*
430 * helper function to update an extent delayed ref in the 438 * helper function to update an extent delayed ref in the
431 * rbtree. existing and update must both have the same 439 * rbtree. existing and update must both have the same
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index fe50392cdf76..7939149f8f27 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -211,8 +211,14 @@ struct btrfs_delayed_ref_head *
211btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 211btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
212int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 212int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 struct btrfs_delayed_ref_head *head); 213 struct btrfs_delayed_ref_head *head);
214static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
215{
216 mutex_unlock(&head->mutex);
217}
218
214int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 219int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
215 struct list_head *cluster, u64 search_start); 220 struct list_head *cluster, u64 search_start);
221void btrfs_release_ref_cluster(struct list_head *cluster);
216 222
217int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 223int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
218 struct btrfs_delayed_ref_root *delayed_refs, 224 struct btrfs_delayed_ref_root *delayed_refs,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9bd87f0d73d0..b4cb8186035f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2143,7 +2143,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2143 node->num_bytes); 2143 node->num_bytes);
2144 } 2144 }
2145 } 2145 }
2146 mutex_unlock(&head->mutex);
2147 return ret; 2146 return ret;
2148 } 2147 }
2149 2148
@@ -2258,7 +2257,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2258 * process of being added. Don't run this ref yet. 2257 * process of being added. Don't run this ref yet.
2259 */ 2258 */
2260 list_del_init(&locked_ref->cluster); 2259 list_del_init(&locked_ref->cluster);
2261 mutex_unlock(&locked_ref->mutex); 2260 btrfs_delayed_ref_unlock(locked_ref);
2262 locked_ref = NULL; 2261 locked_ref = NULL;
2263 delayed_refs->num_heads_ready++; 2262 delayed_refs->num_heads_ready++;
2264 spin_unlock(&delayed_refs->lock); 2263 spin_unlock(&delayed_refs->lock);
@@ -2297,25 +2296,22 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2297 btrfs_free_delayed_extent_op(extent_op); 2296 btrfs_free_delayed_extent_op(extent_op);
2298 2297
2299 if (ret) { 2298 if (ret) {
2300 list_del_init(&locked_ref->cluster); 2299 printk(KERN_DEBUG
2301 mutex_unlock(&locked_ref->mutex); 2300 "btrfs: run_delayed_extent_op "
2302 2301 "returned %d\n", ret);
2303 printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2304 spin_lock(&delayed_refs->lock); 2302 spin_lock(&delayed_refs->lock);
2303 btrfs_delayed_ref_unlock(locked_ref);
2305 return ret; 2304 return ret;
2306 } 2305 }
2307 2306
2308 goto next; 2307 goto next;
2309 } 2308 }
2310
2311 list_del_init(&locked_ref->cluster);
2312 locked_ref = NULL;
2313 } 2309 }
2314 2310
2315 ref->in_tree = 0; 2311 ref->in_tree = 0;
2316 rb_erase(&ref->rb_node, &delayed_refs->root); 2312 rb_erase(&ref->rb_node, &delayed_refs->root);
2317 delayed_refs->num_entries--; 2313 delayed_refs->num_entries--;
2318 if (locked_ref) { 2314 if (!btrfs_delayed_ref_is_head(ref)) {
2319 /* 2315 /*
2320 * when we play the delayed ref, also correct the 2316 * when we play the delayed ref, also correct the
2321 * ref_mod on head 2317 * ref_mod on head
@@ -2337,20 +2333,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2337 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2333 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2338 must_insert_reserved); 2334 must_insert_reserved);
2339 2335
2340 btrfs_put_delayed_ref(ref);
2341 btrfs_free_delayed_extent_op(extent_op); 2336 btrfs_free_delayed_extent_op(extent_op);
2342 count++;
2343
2344 if (ret) { 2337 if (ret) {
2345 if (locked_ref) { 2338 btrfs_delayed_ref_unlock(locked_ref);
2346 list_del_init(&locked_ref->cluster); 2339 btrfs_put_delayed_ref(ref);
2347 mutex_unlock(&locked_ref->mutex); 2340 printk(KERN_DEBUG
2348 } 2341 "btrfs: run_one_delayed_ref returned %d\n", ret);
2349 printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2350 spin_lock(&delayed_refs->lock); 2342 spin_lock(&delayed_refs->lock);
2351 return ret; 2343 return ret;
2352 } 2344 }
2353 2345
2346 /*
2347 * If this node is a head, that means all the refs in this head
2348 * have been dealt with, and we will pick the next head to deal
2349 * with, so we must unlock the head and drop it from the cluster
2350 * list before we release it.
2351 */
2352 if (btrfs_delayed_ref_is_head(ref)) {
2353 list_del_init(&locked_ref->cluster);
2354 btrfs_delayed_ref_unlock(locked_ref);
2355 locked_ref = NULL;
2356 }
2357 btrfs_put_delayed_ref(ref);
2358 count++;
2354next: 2359next:
2355 cond_resched(); 2360 cond_resched();
2356 spin_lock(&delayed_refs->lock); 2361 spin_lock(&delayed_refs->lock);
@@ -2500,6 +2505,7 @@ again:
2500 2505
2501 ret = run_clustered_refs(trans, root, &cluster); 2506 ret = run_clustered_refs(trans, root, &cluster);
2502 if (ret < 0) { 2507 if (ret < 0) {
2508 btrfs_release_ref_cluster(&cluster);
2503 spin_unlock(&delayed_refs->lock); 2509 spin_unlock(&delayed_refs->lock);
2504 btrfs_abort_transaction(trans, root, ret); 2510 btrfs_abort_transaction(trans, root, ret);
2505 return ret; 2511 return ret;