aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
commit68556ca1e03d6a35be3b315eba58df2f8176e3a0 (patch)
tree36a390d29a0d03a59a90c0f223b0d98a80f0f6c3 /fs/btrfs/extent-tree.c
parent0604ca48f1689ad06144b81f5c08f297b6edd831 (diff)
parent8ab30691826fc05efa47c4ffba19b80496bb3a2c (diff)
Merge branch 'mfd/wm8994' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc into for-3.3
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c169
1 files changed, 110 insertions, 59 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..930ae8949737 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 struct btrfs_root *root, 467 struct btrfs_root *root,
468 int load_cache_only) 468 int load_cache_only)
469{ 469{
470 DEFINE_WAIT(wait);
470 struct btrfs_fs_info *fs_info = cache->fs_info; 471 struct btrfs_fs_info *fs_info = cache->fs_info;
471 struct btrfs_caching_control *caching_ctl; 472 struct btrfs_caching_control *caching_ctl;
472 int ret = 0; 473 int ret = 0;
473 474
474 smp_mb(); 475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 if (cache->cached != BTRFS_CACHE_NO) 476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
476 return 0; 517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
477 523
478 /* 524 /*
479 * We can't do the read from on-disk cache during a commit since we need 525 * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
484 if (trans && (!trans->transaction->in_commit) && 530 if (trans && (!trans->transaction->in_commit) &&
485 (root && root != root->fs_info->tree_root) && 531 (root && root != root->fs_info->tree_root) &&
486 btrfs_test_opt(root, SPACE_CACHE)) { 532 btrfs_test_opt(root, SPACE_CACHE)) {
487 spin_lock(&cache->lock);
488 if (cache->cached != BTRFS_CACHE_NO) {
489 spin_unlock(&cache->lock);
490 return 0;
491 }
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
496 534
497 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
498 if (ret == 1) { 536 if (ret == 1) {
537 cache->caching_ctl = NULL;
499 cache->cached = BTRFS_CACHE_FINISHED; 538 cache->cached = BTRFS_CACHE_FINISHED;
500 cache->last_byte_to_unpin = (u64)-1; 539 cache->last_byte_to_unpin = (u64)-1;
501 } else { 540 } else {
502 cache->cached = BTRFS_CACHE_NO; 541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
503 } 547 }
504 spin_unlock(&cache->lock); 548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
505 if (ret == 1) { 550 if (ret == 1) {
551 put_caching_control(caching_ctl);
506 free_excluded_extents(fs_info->extent_root, cache); 552 free_excluded_extents(fs_info->extent_root, cache);
507 return 0; 553 return 0;
508 } 554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
509 } 569 }
510 570
511 if (load_cache_only) 571 if (load_cache_only) {
512 return 0; 572 put_caching_control(caching_ctl);
513
514 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
515 BUG_ON(!caching_ctl);
516
517 INIT_LIST_HEAD(&caching_ctl->list);
518 mutex_init(&caching_ctl->mutex);
519 init_waitqueue_head(&caching_ctl->wait);
520 caching_ctl->block_group = cache;
521 caching_ctl->progress = cache->key.objectid;
522 /* one for caching kthread, one for caching block group list */
523 atomic_set(&caching_ctl->count, 2);
524 caching_ctl->work.func = caching_thread;
525
526 spin_lock(&cache->lock);
527 if (cache->cached != BTRFS_CACHE_NO) {
528 spin_unlock(&cache->lock);
529 kfree(caching_ctl);
530 return 0; 573 return 0;
531 } 574 }
532 cache->caching_ctl = caching_ctl;
533 cache->cached = BTRFS_CACHE_STARTED;
534 spin_unlock(&cache->lock);
535 575
536 down_write(&fs_info->extent_commit_sem); 576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
537 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
538 up_write(&fs_info->extent_commit_sem); 579 up_write(&fs_info->extent_commit_sem);
539 580
@@ -3797,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3838 kfree(rsv);
3798} 3839}
3799 3840
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3841static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3842 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3843 u64 num_bytes, int flush)
3803{ 3844{
3804 int ret; 3845 int ret;
3805 3846
3806 if (num_bytes == 0) 3847 if (num_bytes == 0)
3807 return 0; 3848 return 0;
3808 3849
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3850 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3851 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3852 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3853 return 0;
@@ -3815,22 +3856,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3856 return ret;
3816} 3857}
3817 3858
3859int btrfs_block_rsv_add(struct btrfs_root *root,
3860 struct btrfs_block_rsv *block_rsv,
3861 u64 num_bytes)
3862{
3863 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3864}
3865
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3866int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3867 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3868 u64 num_bytes)
3821{ 3869{
3822 int ret; 3870 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3871}
3835 3872
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3873int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -4064,23 +4101,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4101 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4102static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4103{
4104 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4105 unsigned dropped_extents = 0;
4068 4106
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4107 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4108 BTRFS_I(inode)->outstanding_extents--;
4071 4109
4110 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4111 BTRFS_I(inode)->delalloc_meta_reserved) {
4112 drop_inode_space = 1;
4113 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4114 }
4115
4072 /* 4116 /*
4073 * If we have more or the same amount of outsanding extents than we have 4117 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4118 * reserved then we need to leave the reserved extents count alone.
4075 */ 4119 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4120 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4121 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4122 return drop_inode_space;
4079 4123
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4124 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4125 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4126 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4127 return dropped_extents + drop_inode_space;
4084} 4128}
4085 4129
4086/** 4130/**
@@ -4166,9 +4210,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4210 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4211 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents; 4212 BTRFS_I(inode)->reserved_extents += nr_extents;
4213 }
4169 4214
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4215 /*
4216 * Add an item to reserve for updating the inode when we complete the
4217 * delalloc io.
4218 */
4219 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4220 nr_extents++;
4221 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4171 } 4222 }
4223
4224 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4225 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4173 spin_unlock(&BTRFS_I(inode)->lock); 4226 spin_unlock(&BTRFS_I(inode)->lock);
4174 4227
@@ -5166,13 +5219,15 @@ search:
5166 } 5219 }
5167 5220
5168have_block_group: 5221have_block_group:
5169 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 5222 cached = block_group_cache_done(block_group);
5223 if (unlikely(!cached)) {
5170 u64 free_percent; 5224 u64 free_percent;
5171 5225
5226 found_uncached_bg = true;
5172 ret = cache_block_group(block_group, trans, 5227 ret = cache_block_group(block_group, trans,
5173 orig_root, 1); 5228 orig_root, 1);
5174 if (block_group->cached == BTRFS_CACHE_FINISHED) 5229 if (block_group->cached == BTRFS_CACHE_FINISHED)
5175 goto have_block_group; 5230 goto alloc;
5176 5231
5177 free_percent = btrfs_block_group_used(&block_group->item); 5232 free_percent = btrfs_block_group_used(&block_group->item);
5178 free_percent *= 100; 5233 free_percent *= 100;
@@ -5194,7 +5249,6 @@ have_block_group:
5194 orig_root, 0); 5249 orig_root, 0);
5195 BUG_ON(ret); 5250 BUG_ON(ret);
5196 } 5251 }
5197 found_uncached_bg = true;
5198 5252
5199 /* 5253 /*
5200 * If loop is set for cached only, try the next block 5254 * If loop is set for cached only, try the next block
@@ -5204,10 +5258,7 @@ have_block_group:
5204 goto loop; 5258 goto loop;
5205 } 5259 }
5206 5260
5207 cached = block_group_cache_done(block_group); 5261alloc:
5208 if (unlikely(!cached))
5209 found_uncached_bg = true;
5210
5211 if (unlikely(block_group->ro)) 5262 if (unlikely(block_group->ro))
5212 goto loop; 5263 goto loop;
5213 5264