aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent-tree.c44
1 files changed, 27 insertions, 17 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1eef4ee01d1a..fc0db9887c0e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3408,17 +3408,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3408 int loops = 0; 3408 int loops = 0;
3409 3409
3410 spin_lock(&cur_trans->dirty_bgs_lock); 3410 spin_lock(&cur_trans->dirty_bgs_lock);
3411 if (!list_empty(&cur_trans->dirty_bgs)) { 3411 if (list_empty(&cur_trans->dirty_bgs)) {
3412 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3412 spin_unlock(&cur_trans->dirty_bgs_lock);
3413 return 0;
3413 } 3414 }
3415 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3414 spin_unlock(&cur_trans->dirty_bgs_lock); 3416 spin_unlock(&cur_trans->dirty_bgs_lock);
3415 3417
3416again: 3418again:
3417 if (list_empty(&dirty)) {
3418 btrfs_free_path(path);
3419 return 0;
3420 }
3421
3422 /* 3419 /*
3423 * make sure all the block groups on our dirty list actually 3420 * make sure all the block groups on our dirty list actually
3424 * exist 3421 * exist
@@ -3431,18 +3428,16 @@ again:
3431 return -ENOMEM; 3428 return -ENOMEM;
3432 } 3429 }
3433 3430
3431 /*
3432 * cache_write_mutex is here only to save us from balance or automatic
3433 * removal of empty block groups deleting this block group while we are
3434 * writing out the cache
3435 */
3436 mutex_lock(&trans->transaction->cache_write_mutex);
3434 while (!list_empty(&dirty)) { 3437 while (!list_empty(&dirty)) {
3435 cache = list_first_entry(&dirty, 3438 cache = list_first_entry(&dirty,
3436 struct btrfs_block_group_cache, 3439 struct btrfs_block_group_cache,
3437 dirty_list); 3440 dirty_list);
3438
3439 /*
3440 * cache_write_mutex is here only to save us from balance
3441 * deleting this block group while we are writing out the
3442 * cache
3443 */
3444 mutex_lock(&trans->transaction->cache_write_mutex);
3445
3446 /* 3441 /*
3447 * this can happen if something re-dirties a block 3442 * this can happen if something re-dirties a block
3448 * group that is already under IO. Just wait for it to 3443 * group that is already under IO. Just wait for it to
@@ -3495,7 +3490,6 @@ again:
3495 } 3490 }
3496 if (!ret) 3491 if (!ret)
3497 ret = write_one_cache_group(trans, root, path, cache); 3492 ret = write_one_cache_group(trans, root, path, cache);
3498 mutex_unlock(&trans->transaction->cache_write_mutex);
3499 3493
3500 /* if its not on the io list, we need to put the block group */ 3494 /* if its not on the io list, we need to put the block group */
3501 if (should_put) 3495 if (should_put)
@@ -3503,7 +3497,16 @@ again:
3503 3497
3504 if (ret) 3498 if (ret)
3505 break; 3499 break;
3500
3501 /*
3502 * Avoid blocking other tasks for too long. It might even save
3503 * us from writing caches for block groups that are going to be
3504 * removed.
3505 */
3506 mutex_unlock(&trans->transaction->cache_write_mutex);
3507 mutex_lock(&trans->transaction->cache_write_mutex);
3506 } 3508 }
3509 mutex_unlock(&trans->transaction->cache_write_mutex);
3507 3510
3508 /* 3511 /*
3509 * go through delayed refs for all the stuff we've just kicked off 3512 * go through delayed refs for all the stuff we've just kicked off
@@ -3514,8 +3517,15 @@ again:
3514 loops++; 3517 loops++;
3515 spin_lock(&cur_trans->dirty_bgs_lock); 3518 spin_lock(&cur_trans->dirty_bgs_lock);
3516 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3519 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3520 /*
3521 * dirty_bgs_lock protects us from concurrent block group
3522 * deletes too (not just cache_write_mutex).
3523 */
3524 if (!list_empty(&dirty)) {
3525 spin_unlock(&cur_trans->dirty_bgs_lock);
3526 goto again;
3527 }
3517 spin_unlock(&cur_trans->dirty_bgs_lock); 3528 spin_unlock(&cur_trans->dirty_bgs_lock);
3518 goto again;
3519 } 3529 }
3520 3530
3521 btrfs_free_path(path); 3531 btrfs_free_path(path);