aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 8c56f5b38948..cec05e100142 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -429,6 +429,7 @@ err:
429 429
430static int cache_block_group(struct btrfs_block_group_cache *cache, 430static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans, 431 struct btrfs_trans_handle *trans,
432 struct btrfs_root *root,
432 int load_cache_only) 433 int load_cache_only)
433{ 434{
434 struct btrfs_fs_info *fs_info = cache->fs_info; 435 struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
442 443
443 /* 444 /*
444 * We can't do the read from on-disk cache during a commit since we need 445 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking. 446 * to have the normal tree locking. Also if we are currently trying to
447 * allocate blocks for the tree root we can't do the fast caching since
448 * we likely hold important locks.
446 */ 449 */
447 if (!trans->transaction->in_commit) { 450 if (!trans->transaction->in_commit &&
451 (root && root != root->fs_info->tree_root)) {
448 spin_lock(&cache->lock); 452 spin_lock(&cache->lock);
449 if (cache->cached != BTRFS_CACHE_NO) { 453 if (cache->cached != BTRFS_CACHE_NO) {
450 spin_unlock(&cache->lock); 454 spin_unlock(&cache->lock);
@@ -4083,7 +4087,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4083 * space back to the block group, otherwise we will leak space. 4087 * space back to the block group, otherwise we will leak space.
4084 */ 4088 */
4085 if (!alloc && cache->cached == BTRFS_CACHE_NO) 4089 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4086 cache_block_group(cache, trans, 1); 4090 cache_block_group(cache, trans, NULL, 1);
4087 4091
4088 byte_in_group = bytenr - cache->key.objectid; 4092 byte_in_group = bytenr - cache->key.objectid;
4089 WARN_ON(byte_in_group > cache->key.offset); 4093 WARN_ON(byte_in_group > cache->key.offset);
@@ -4937,7 +4941,8 @@ have_block_group:
4937 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4941 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4938 u64 free_percent; 4942 u64 free_percent;
4939 4943
4940 ret = cache_block_group(block_group, trans, 1); 4944 ret = cache_block_group(block_group, trans,
4945 orig_root, 1);
4941 if (block_group->cached == BTRFS_CACHE_FINISHED) 4946 if (block_group->cached == BTRFS_CACHE_FINISHED)
4942 goto have_block_group; 4947 goto have_block_group;
4943 4948
@@ -4961,7 +4966,8 @@ have_block_group:
4961 if (loop > LOOP_CACHING_NOWAIT || 4966 if (loop > LOOP_CACHING_NOWAIT ||
4962 (loop > LOOP_FIND_IDEAL && 4967 (loop > LOOP_FIND_IDEAL &&
4963 atomic_read(&space_info->caching_threads) < 2)) { 4968 atomic_read(&space_info->caching_threads) < 2)) {
4964 ret = cache_block_group(block_group, trans, 0); 4969 ret = cache_block_group(block_group, trans,
4970 orig_root, 0);
4965 BUG_ON(ret); 4971 BUG_ON(ret);
4966 } 4972 }
4967 found_uncached_bg = true; 4973 found_uncached_bg = true;
@@ -5518,7 +5524,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5518 u64 num_bytes = ins->offset; 5524 u64 num_bytes = ins->offset;
5519 5525
5520 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 5526 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5521 cache_block_group(block_group, trans, 0); 5527 cache_block_group(block_group, trans, NULL, 0);
5522 caching_ctl = get_caching_control(block_group); 5528 caching_ctl = get_caching_control(block_group);
5523 5529
5524 if (!caching_ctl) { 5530 if (!caching_ctl) {