aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2010-08-25 16:54:15 -0400
committerChris Mason <chris.mason@oracle.com>2010-10-29 09:26:35 -0400
commit9d66e233c7042da27ec699453770f41e567a0442 (patch)
tree27fd70c6c07cb96a48123bdec07e9c2feed90f13 /fs/btrfs/extent-tree.c
parent0cb59c9953171e9adf6da8142a5c85ceb77bb60d (diff)
Btrfs: load free space cache if it exists
This patch actually loads the free space cache if it exists. The only thing that really changes here is that we need to cache the block group if we're going to remove an extent from it. Previously we did not do this since the caching kthread would pick it up. With the on disk cache we don't have this luxury so we need to make sure we read the on disk cache in first, and then remove the extent, that way when the extent is unpinned the free space is added to the block group. This has been tested with all sorts of things. Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c50
1 files changed, 47 insertions, 3 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d5455a2bf60b..9a325e465ad9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -421,7 +421,9 @@ err:
421 return 0; 421 return 0;
422} 422}
423 423
424static int cache_block_group(struct btrfs_block_group_cache *cache) 424static int cache_block_group(struct btrfs_block_group_cache *cache,
425 struct btrfs_trans_handle *trans,
426 int load_cache_only)
425{ 427{
426 struct btrfs_fs_info *fs_info = cache->fs_info; 428 struct btrfs_fs_info *fs_info = cache->fs_info;
427 struct btrfs_caching_control *caching_ctl; 429 struct btrfs_caching_control *caching_ctl;
@@ -432,6 +434,36 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
432 if (cache->cached != BTRFS_CACHE_NO) 434 if (cache->cached != BTRFS_CACHE_NO)
433 return 0; 435 return 0;
434 436
437 /*
438 * We can't do the read from on-disk cache during a commit since we need
439 * to have the normal tree locking.
440 */
441 if (!trans->transaction->in_commit) {
442 spin_lock(&cache->lock);
443 if (cache->cached != BTRFS_CACHE_NO) {
444 spin_unlock(&cache->lock);
445 return 0;
446 }
447 cache->cached = BTRFS_CACHE_STARTED;
448 spin_unlock(&cache->lock);
449
450 ret = load_free_space_cache(fs_info, cache);
451
452 spin_lock(&cache->lock);
453 if (ret == 1) {
454 cache->cached = BTRFS_CACHE_FINISHED;
455 cache->last_byte_to_unpin = (u64)-1;
456 } else {
457 cache->cached = BTRFS_CACHE_NO;
458 }
459 spin_unlock(&cache->lock);
460 if (ret == 1)
461 return 0;
462 }
463
464 if (load_cache_only)
465 return 0;
466
435 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); 467 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
436 BUG_ON(!caching_ctl); 468 BUG_ON(!caching_ctl);
437 469
@@ -3984,6 +4016,14 @@ static int update_block_group(struct btrfs_trans_handle *trans,
3984 factor = 2; 4016 factor = 2;
3985 else 4017 else
3986 factor = 1; 4018 factor = 1;
4019 /*
4020 * If this block group has free space cache written out, we
4021 * need to make sure to load it if we are removing space. This
4022 * is because we need the unpinning stage to actually add the
4023 * space back to the block group, otherwise we will leak space.
4024 */
4025 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4026 cache_block_group(cache, trans, 1);
3987 4027
3988 byte_in_group = bytenr - cache->key.objectid; 4028 byte_in_group = bytenr - cache->key.objectid;
3989 WARN_ON(byte_in_group > cache->key.offset); 4029 WARN_ON(byte_in_group > cache->key.offset);
@@ -4828,6 +4868,10 @@ have_block_group:
4828 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4868 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4829 u64 free_percent; 4869 u64 free_percent;
4830 4870
4871 ret = cache_block_group(block_group, trans, 1);
4872 if (block_group->cached == BTRFS_CACHE_FINISHED)
4873 goto have_block_group;
4874
4831 free_percent = btrfs_block_group_used(&block_group->item); 4875 free_percent = btrfs_block_group_used(&block_group->item);
4832 free_percent *= 100; 4876 free_percent *= 100;
4833 free_percent = div64_u64(free_percent, 4877 free_percent = div64_u64(free_percent,
@@ -4848,7 +4892,7 @@ have_block_group:
4848 if (loop > LOOP_CACHING_NOWAIT || 4892 if (loop > LOOP_CACHING_NOWAIT ||
4849 (loop > LOOP_FIND_IDEAL && 4893 (loop > LOOP_FIND_IDEAL &&
4850 atomic_read(&space_info->caching_threads) < 2)) { 4894 atomic_read(&space_info->caching_threads) < 2)) {
4851 ret = cache_block_group(block_group); 4895 ret = cache_block_group(block_group, trans, 0);
4852 BUG_ON(ret); 4896 BUG_ON(ret);
4853 } 4897 }
4854 found_uncached_bg = true; 4898 found_uncached_bg = true;
@@ -5405,7 +5449,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5405 u64 num_bytes = ins->offset; 5449 u64 num_bytes = ins->offset;
5406 5450
5407 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 5451 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5408 cache_block_group(block_group); 5452 cache_block_group(block_group, trans, 0);
5409 caching_ctl = get_caching_control(block_group); 5453 caching_ctl = get_caching_control(block_group);
5410 5454
5411 if (!caching_ctl) { 5455 if (!caching_ctl) {