aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c401
1 files changed, 245 insertions, 156 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 71cd456fdb6..f5be06a2462 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -320,12 +320,12 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
320 return total_added; 320 return total_added;
321} 321}
322 322
323static int caching_kthread(void *data) 323static noinline void caching_thread(struct btrfs_work *work)
324{ 324{
325 struct btrfs_block_group_cache *block_group = data; 325 struct btrfs_block_group_cache *block_group;
326 struct btrfs_fs_info *fs_info = block_group->fs_info; 326 struct btrfs_fs_info *fs_info;
327 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl; 327 struct btrfs_caching_control *caching_ctl;
328 struct btrfs_root *extent_root = fs_info->extent_root; 328 struct btrfs_root *extent_root;
329 struct btrfs_path *path; 329 struct btrfs_path *path;
330 struct extent_buffer *leaf; 330 struct extent_buffer *leaf;
331 struct btrfs_key key; 331 struct btrfs_key key;
@@ -334,9 +334,14 @@ static int caching_kthread(void *data)
334 u32 nritems; 334 u32 nritems;
335 int ret = 0; 335 int ret = 0;
336 336
337 caching_ctl = container_of(work, struct btrfs_caching_control, work);
338 block_group = caching_ctl->block_group;
339 fs_info = block_group->fs_info;
340 extent_root = fs_info->extent_root;
341
337 path = btrfs_alloc_path(); 342 path = btrfs_alloc_path();
338 if (!path) 343 if (!path)
339 return -ENOMEM; 344 goto out;
340 345
341 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 346 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
342 347
@@ -433,13 +438,11 @@ err:
433 free_excluded_extents(extent_root, block_group); 438 free_excluded_extents(extent_root, block_group);
434 439
435 mutex_unlock(&caching_ctl->mutex); 440 mutex_unlock(&caching_ctl->mutex);
441out:
436 wake_up(&caching_ctl->wait); 442 wake_up(&caching_ctl->wait);
437 443
438 put_caching_control(caching_ctl); 444 put_caching_control(caching_ctl);
439 atomic_dec(&block_group->space_info->caching_threads);
440 btrfs_put_block_group(block_group); 445 btrfs_put_block_group(block_group);
441
442 return 0;
443} 446}
444 447
445static int cache_block_group(struct btrfs_block_group_cache *cache, 448static int cache_block_group(struct btrfs_block_group_cache *cache,
@@ -449,7 +452,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
449{ 452{
450 struct btrfs_fs_info *fs_info = cache->fs_info; 453 struct btrfs_fs_info *fs_info = cache->fs_info;
451 struct btrfs_caching_control *caching_ctl; 454 struct btrfs_caching_control *caching_ctl;
452 struct task_struct *tsk;
453 int ret = 0; 455 int ret = 0;
454 456
455 smp_mb(); 457 smp_mb();
@@ -501,6 +503,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
501 caching_ctl->progress = cache->key.objectid; 503 caching_ctl->progress = cache->key.objectid;
502 /* one for caching kthread, one for caching block group list */ 504 /* one for caching kthread, one for caching block group list */
503 atomic_set(&caching_ctl->count, 2); 505 atomic_set(&caching_ctl->count, 2);
506 caching_ctl->work.func = caching_thread;
504 507
505 spin_lock(&cache->lock); 508 spin_lock(&cache->lock);
506 if (cache->cached != BTRFS_CACHE_NO) { 509 if (cache->cached != BTRFS_CACHE_NO) {
@@ -516,16 +519,9 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
516 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 519 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
517 up_write(&fs_info->extent_commit_sem); 520 up_write(&fs_info->extent_commit_sem);
518 521
519 atomic_inc(&cache->space_info->caching_threads);
520 btrfs_get_block_group(cache); 522 btrfs_get_block_group(cache);
521 523
522 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 524 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
523 cache->key.objectid);
524 if (IS_ERR(tsk)) {
525 ret = PTR_ERR(tsk);
526 printk(KERN_ERR "error running thread %d\n", ret);
527 BUG();
528 }
529 525
530 return ret; 526 return ret;
531} 527}
@@ -667,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
667 struct btrfs_path *path; 663 struct btrfs_path *path;
668 664
669 path = btrfs_alloc_path(); 665 path = btrfs_alloc_path();
670 BUG_ON(!path); 666 if (!path)
667 return -ENOMEM;
668
671 key.objectid = start; 669 key.objectid = start;
672 key.offset = len; 670 key.offset = len;
673 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 671 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
@@ -1784,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1784 1782
1785 1783
1786 for (i = 0; i < multi->num_stripes; i++, stripe++) { 1784 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1785 if (!stripe->dev->can_discard)
1786 continue;
1787
1787 ret = btrfs_issue_discard(stripe->dev->bdev, 1788 ret = btrfs_issue_discard(stripe->dev->bdev,
1788 stripe->physical, 1789 stripe->physical,
1789 stripe->length); 1790 stripe->length);
@@ -1791,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1791 discarded_bytes += stripe->length; 1792 discarded_bytes += stripe->length;
1792 else if (ret != -EOPNOTSUPP) 1793 else if (ret != -EOPNOTSUPP)
1793 break; 1794 break;
1795
1796 /*
1797 * Just in case we get back EOPNOTSUPP for some reason,
1798 * just ignore the return value so we don't screw up
1799 * people calling discard_extent.
1800 */
1801 ret = 0;
1794 } 1802 }
1795 kfree(multi); 1803 kfree(multi);
1796 } 1804 }
1797 if (discarded_bytes && ret == -EOPNOTSUPP)
1798 ret = 0;
1799 1805
1800 if (actual_bytes) 1806 if (actual_bytes)
1801 *actual_bytes = discarded_bytes; 1807 *actual_bytes = discarded_bytes;
@@ -2932,9 +2938,10 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2932 found->full = 0; 2938 found->full = 0;
2933 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 2939 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
2934 found->chunk_alloc = 0; 2940 found->chunk_alloc = 0;
2941 found->flush = 0;
2942 init_waitqueue_head(&found->wait);
2935 *space_info = found; 2943 *space_info = found;
2936 list_add_rcu(&found->list, &info->space_info); 2944 list_add_rcu(&found->list, &info->space_info);
2937 atomic_set(&found->caching_threads, 0);
2938 return 0; 2945 return 0;
2939} 2946}
2940 2947
@@ -3275,6 +3282,9 @@ again:
3275 } 3282 }
3276 3283
3277 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3284 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3285 if (ret < 0 && ret != -ENOSPC)
3286 goto out;
3287
3278 spin_lock(&space_info->lock); 3288 spin_lock(&space_info->lock);
3279 if (ret) 3289 if (ret)
3280 space_info->full = 1; 3290 space_info->full = 1;
@@ -3284,6 +3294,7 @@ again:
3284 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 3294 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3285 space_info->chunk_alloc = 0; 3295 space_info->chunk_alloc = 0;
3286 spin_unlock(&space_info->lock); 3296 spin_unlock(&space_info->lock);
3297out:
3287 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3298 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3288 return ret; 3299 return ret;
3289} 3300}
@@ -3314,6 +3325,14 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3314 if (reserved == 0) 3325 if (reserved == 0)
3315 return 0; 3326 return 0;
3316 3327
3328 smp_mb();
3329 if (root->fs_info->delalloc_bytes == 0) {
3330 if (trans)
3331 return 0;
3332 btrfs_wait_ordered_extents(root, 0, 0);
3333 return 0;
3334 }
3335
3317 max_reclaim = min(reserved, to_reclaim); 3336 max_reclaim = min(reserved, to_reclaim);
3318 3337
3319 while (loops < 1024) { 3338 while (loops < 1024) {
@@ -3356,6 +3375,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3356 } 3375 }
3357 3376
3358 } 3377 }
3378 if (reclaimed >= to_reclaim && !trans)
3379 btrfs_wait_ordered_extents(root, 0, 0);
3359 return reclaimed >= to_reclaim; 3380 return reclaimed >= to_reclaim;
3360} 3381}
3361 3382
@@ -3380,15 +3401,36 @@ static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3380 u64 num_bytes = orig_bytes; 3401 u64 num_bytes = orig_bytes;
3381 int retries = 0; 3402 int retries = 0;
3382 int ret = 0; 3403 int ret = 0;
3383 bool reserved = false;
3384 bool committed = false; 3404 bool committed = false;
3405 bool flushing = false;
3385 3406
3386again: 3407again:
3387 ret = -ENOSPC; 3408 ret = 0;
3388 if (reserved)
3389 num_bytes = 0;
3390
3391 spin_lock(&space_info->lock); 3409 spin_lock(&space_info->lock);
3410 /*
3411 * We only want to wait if somebody other than us is flushing and we are
3412 * actually alloed to flush.
3413 */
3414 while (flush && !flushing && space_info->flush) {
3415 spin_unlock(&space_info->lock);
3416 /*
3417 * If we have a trans handle we can't wait because the flusher
3418 * may have to commit the transaction, which would mean we would
3419 * deadlock since we are waiting for the flusher to finish, but
3420 * hold the current transaction open.
3421 */
3422 if (trans)
3423 return -EAGAIN;
3424 ret = wait_event_interruptible(space_info->wait,
3425 !space_info->flush);
3426 /* Must have been interrupted, return */
3427 if (ret)
3428 return -EINTR;
3429
3430 spin_lock(&space_info->lock);
3431 }
3432
3433 ret = -ENOSPC;
3392 unused = space_info->bytes_used + space_info->bytes_reserved + 3434 unused = space_info->bytes_used + space_info->bytes_reserved +
3393 space_info->bytes_pinned + space_info->bytes_readonly + 3435 space_info->bytes_pinned + space_info->bytes_readonly +
3394 space_info->bytes_may_use; 3436 space_info->bytes_may_use;
@@ -3403,8 +3445,7 @@ again:
3403 if (unused <= space_info->total_bytes) { 3445 if (unused <= space_info->total_bytes) {
3404 unused = space_info->total_bytes - unused; 3446 unused = space_info->total_bytes - unused;
3405 if (unused >= num_bytes) { 3447 if (unused >= num_bytes) {
3406 if (!reserved) 3448 space_info->bytes_reserved += orig_bytes;
3407 space_info->bytes_reserved += orig_bytes;
3408 ret = 0; 3449 ret = 0;
3409 } else { 3450 } else {
3410 /* 3451 /*
@@ -3429,17 +3470,14 @@ again:
3429 * to reclaim space we can actually use it instead of somebody else 3470 * to reclaim space we can actually use it instead of somebody else
3430 * stealing it from us. 3471 * stealing it from us.
3431 */ 3472 */
3432 if (ret && !reserved) { 3473 if (ret && flush) {
3433 space_info->bytes_reserved += orig_bytes; 3474 flushing = true;
3434 reserved = true; 3475 space_info->flush = 1;
3435 } 3476 }
3436 3477
3437 spin_unlock(&space_info->lock); 3478 spin_unlock(&space_info->lock);
3438 3479
3439 if (!ret) 3480 if (!ret || !flush)
3440 return 0;
3441
3442 if (!flush)
3443 goto out; 3481 goto out;
3444 3482
3445 /* 3483 /*
@@ -3447,11 +3485,11 @@ again:
3447 * metadata until after the IO is completed. 3485 * metadata until after the IO is completed.
3448 */ 3486 */
3449 ret = shrink_delalloc(trans, root, num_bytes, 1); 3487 ret = shrink_delalloc(trans, root, num_bytes, 1);
3450 if (ret > 0) 3488 if (ret < 0)
3451 return 0;
3452 else if (ret < 0)
3453 goto out; 3489 goto out;
3454 3490
3491 ret = 0;
3492
3455 /* 3493 /*
3456 * So if we were overcommitted it's possible that somebody else flushed 3494 * So if we were overcommitted it's possible that somebody else flushed
3457 * out enough space and we simply didn't have enough space to reclaim, 3495 * out enough space and we simply didn't have enough space to reclaim,
@@ -3462,11 +3500,11 @@ again:
3462 goto again; 3500 goto again;
3463 } 3501 }
3464 3502
3465 spin_lock(&space_info->lock);
3466 /* 3503 /*
3467 * Not enough space to be reclaimed, don't bother committing the 3504 * Not enough space to be reclaimed, don't bother committing the
3468 * transaction. 3505 * transaction.
3469 */ 3506 */
3507 spin_lock(&space_info->lock);
3470 if (space_info->bytes_pinned < orig_bytes) 3508 if (space_info->bytes_pinned < orig_bytes)
3471 ret = -ENOSPC; 3509 ret = -ENOSPC;
3472 spin_unlock(&space_info->lock); 3510 spin_unlock(&space_info->lock);
@@ -3474,10 +3512,13 @@ again:
3474 goto out; 3512 goto out;
3475 3513
3476 ret = -EAGAIN; 3514 ret = -EAGAIN;
3477 if (trans || committed) 3515 if (trans)
3478 goto out; 3516 goto out;
3479 3517
3480 ret = -ENOSPC; 3518 ret = -ENOSPC;
3519 if (committed)
3520 goto out;
3521
3481 trans = btrfs_join_transaction(root); 3522 trans = btrfs_join_transaction(root);
3482 if (IS_ERR(trans)) 3523 if (IS_ERR(trans))
3483 goto out; 3524 goto out;
@@ -3489,12 +3530,12 @@ again:
3489 } 3530 }
3490 3531
3491out: 3532out:
3492 if (reserved) { 3533 if (flushing) {
3493 spin_lock(&space_info->lock); 3534 spin_lock(&space_info->lock);
3494 space_info->bytes_reserved -= orig_bytes; 3535 space_info->flush = 0;
3536 wake_up_all(&space_info->wait);
3495 spin_unlock(&space_info->lock); 3537 spin_unlock(&space_info->lock);
3496 } 3538 }
3497
3498 return ret; 3539 return ret;
3499} 3540}
3500 3541
@@ -3704,7 +3745,6 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3704 if (commit_trans) { 3745 if (commit_trans) {
3705 if (trans) 3746 if (trans)
3706 return -EAGAIN; 3747 return -EAGAIN;
3707
3708 trans = btrfs_join_transaction(root); 3748 trans = btrfs_join_transaction(root);
3709 BUG_ON(IS_ERR(trans)); 3749 BUG_ON(IS_ERR(trans));
3710 ret = btrfs_commit_transaction(trans, root); 3750 ret = btrfs_commit_transaction(trans, root);
@@ -3874,26 +3914,6 @@ int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3874 return 0; 3914 return 0;
3875} 3915}
3876 3916
3877int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3878 struct btrfs_root *root,
3879 int num_items)
3880{
3881 u64 num_bytes;
3882 int ret;
3883
3884 if (num_items == 0 || root->fs_info->chunk_root == root)
3885 return 0;
3886
3887 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
3888 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3889 num_bytes);
3890 if (!ret) {
3891 trans->bytes_reserved += num_bytes;
3892 trans->block_rsv = &root->fs_info->trans_block_rsv;
3893 }
3894 return ret;
3895}
3896
3897void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3917void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3898 struct btrfs_root *root) 3918 struct btrfs_root *root)
3899{ 3919{
@@ -3944,6 +3964,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3944 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3964 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3945} 3965}
3946 3966
3967static unsigned drop_outstanding_extent(struct inode *inode)
3968{
3969 unsigned dropped_extents = 0;
3970
3971 spin_lock(&BTRFS_I(inode)->lock);
3972 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
3973 BTRFS_I(inode)->outstanding_extents--;
3974
3975 /*
3976 * If we have more or the same amount of outsanding extents than we have
3977 * reserved then we need to leave the reserved extents count alone.
3978 */
3979 if (BTRFS_I(inode)->outstanding_extents >=
3980 BTRFS_I(inode)->reserved_extents)
3981 goto out;
3982
3983 dropped_extents = BTRFS_I(inode)->reserved_extents -
3984 BTRFS_I(inode)->outstanding_extents;
3985 BTRFS_I(inode)->reserved_extents -= dropped_extents;
3986out:
3987 spin_unlock(&BTRFS_I(inode)->lock);
3988 return dropped_extents;
3989}
3990
3947static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes) 3991static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3948{ 3992{
3949 return num_bytes >>= 3; 3993 return num_bytes >>= 3;
@@ -3953,9 +3997,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3953{ 3997{
3954 struct btrfs_root *root = BTRFS_I(inode)->root; 3998 struct btrfs_root *root = BTRFS_I(inode)->root;
3955 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 3999 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3956 u64 to_reserve; 4000 u64 to_reserve = 0;
3957 int nr_extents; 4001 unsigned nr_extents = 0;
3958 int reserved_extents;
3959 int ret; 4002 int ret;
3960 4003
3961 if (btrfs_transaction_in_commit(root->fs_info)) 4004 if (btrfs_transaction_in_commit(root->fs_info))
@@ -3963,66 +4006,49 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3963 4006
3964 num_bytes = ALIGN(num_bytes, root->sectorsize); 4007 num_bytes = ALIGN(num_bytes, root->sectorsize);
3965 4008
3966 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; 4009 spin_lock(&BTRFS_I(inode)->lock);
3967 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); 4010 BTRFS_I(inode)->outstanding_extents++;
4011
4012 if (BTRFS_I(inode)->outstanding_extents >
4013 BTRFS_I(inode)->reserved_extents) {
4014 nr_extents = BTRFS_I(inode)->outstanding_extents -
4015 BTRFS_I(inode)->reserved_extents;
4016 BTRFS_I(inode)->reserved_extents += nr_extents;
3968 4017
3969 if (nr_extents > reserved_extents) {
3970 nr_extents -= reserved_extents;
3971 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4018 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
3972 } else {
3973 nr_extents = 0;
3974 to_reserve = 0;
3975 } 4019 }
4020 spin_unlock(&BTRFS_I(inode)->lock);
3976 4021
3977 to_reserve += calc_csum_metadata_size(inode, num_bytes); 4022 to_reserve += calc_csum_metadata_size(inode, num_bytes);
3978 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 4023 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
3979 if (ret) 4024 if (ret) {
4025 unsigned dropped;
4026 /*
4027 * We don't need the return value since our reservation failed,
4028 * we just need to clean up our counter.
4029 */
4030 dropped = drop_outstanding_extent(inode);
4031 WARN_ON(dropped > 1);
3980 return ret; 4032 return ret;
3981 4033 }
3982 atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
3983 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3984 4034
3985 block_rsv_add_bytes(block_rsv, to_reserve, 1); 4035 block_rsv_add_bytes(block_rsv, to_reserve, 1);
3986 4036
3987 if (block_rsv->size > 512 * 1024 * 1024)
3988 shrink_delalloc(NULL, root, to_reserve, 0);
3989
3990 return 0; 4037 return 0;
3991} 4038}
3992 4039
3993void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) 4040void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3994{ 4041{
3995 struct btrfs_root *root = BTRFS_I(inode)->root; 4042 struct btrfs_root *root = BTRFS_I(inode)->root;
3996 u64 to_free; 4043 u64 to_free = 0;
3997 int nr_extents; 4044 unsigned dropped;
3998 int reserved_extents;
3999 4045
4000 num_bytes = ALIGN(num_bytes, root->sectorsize); 4046 num_bytes = ALIGN(num_bytes, root->sectorsize);
4001 atomic_dec(&BTRFS_I(inode)->outstanding_extents); 4047 dropped = drop_outstanding_extent(inode);
4002 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4003
4004 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4005 do {
4006 int old, new;
4007
4008 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4009 if (nr_extents >= reserved_extents) {
4010 nr_extents = 0;
4011 break;
4012 }
4013 old = reserved_extents;
4014 nr_extents = reserved_extents - nr_extents;
4015 new = reserved_extents - nr_extents;
4016 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4017 reserved_extents, new);
4018 if (likely(old == reserved_extents))
4019 break;
4020 reserved_extents = old;
4021 } while (1);
4022 4048
4023 to_free = calc_csum_metadata_size(inode, num_bytes); 4049 to_free = calc_csum_metadata_size(inode, num_bytes);
4024 if (nr_extents > 0) 4050 if (dropped > 0)
4025 to_free += btrfs_calc_trans_metadata_size(root, nr_extents); 4051 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4026 4052
4027 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 4053 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4028 to_free); 4054 to_free);
@@ -4444,7 +4470,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4444 printk(KERN_ERR "umm, got %d back from search" 4470 printk(KERN_ERR "umm, got %d back from search"
4445 ", was looking for %llu\n", ret, 4471 ", was looking for %llu\n", ret,
4446 (unsigned long long)bytenr); 4472 (unsigned long long)bytenr);
4447 btrfs_print_leaf(extent_root, path->nodes[0]); 4473 if (ret > 0)
4474 btrfs_print_leaf(extent_root,
4475 path->nodes[0]);
4448 } 4476 }
4449 BUG_ON(ret); 4477 BUG_ON(ret);
4450 extent_slot = path->slots[0]; 4478 extent_slot = path->slots[0];
@@ -4990,14 +5018,10 @@ have_block_group:
4990 } 5018 }
4991 5019
4992 /* 5020 /*
4993 * We only want to start kthread caching if we are at 5021 * The caching workers are limited to 2 threads, so we
4994 * the point where we will wait for caching to make 5022 * can queue as much work as we care to.
4995 * progress, or if our ideal search is over and we've
4996 * found somebody to start caching.
4997 */ 5023 */
4998 if (loop > LOOP_CACHING_NOWAIT || 5024 if (loop > LOOP_FIND_IDEAL) {
4999 (loop > LOOP_FIND_IDEAL &&
5000 atomic_read(&space_info->caching_threads) < 2)) {
5001 ret = cache_block_group(block_group, trans, 5025 ret = cache_block_group(block_group, trans,
5002 orig_root, 0); 5026 orig_root, 0);
5003 BUG_ON(ret); 5027 BUG_ON(ret);
@@ -5065,7 +5089,9 @@ have_block_group:
5065 * group is does point to and try again 5089 * group is does point to and try again
5066 */ 5090 */
5067 if (!last_ptr_loop && last_ptr->block_group && 5091 if (!last_ptr_loop && last_ptr->block_group &&
5068 last_ptr->block_group != block_group) { 5092 last_ptr->block_group != block_group &&
5093 index <=
5094 get_block_group_index(last_ptr->block_group)) {
5069 5095
5070 btrfs_put_block_group(block_group); 5096 btrfs_put_block_group(block_group);
5071 block_group = last_ptr->block_group; 5097 block_group = last_ptr->block_group;
@@ -5219,8 +5245,7 @@ loop:
5219 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { 5245 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5220 found_uncached_bg = false; 5246 found_uncached_bg = false;
5221 loop++; 5247 loop++;
5222 if (!ideal_cache_percent && 5248 if (!ideal_cache_percent)
5223 atomic_read(&space_info->caching_threads))
5224 goto search; 5249 goto search;
5225 5250
5226 /* 5251 /*
@@ -5494,7 +5519,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5494 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); 5519 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5495 5520
5496 path = btrfs_alloc_path(); 5521 path = btrfs_alloc_path();
5497 BUG_ON(!path); 5522 if (!path)
5523 return -ENOMEM;
5498 5524
5499 path->leave_spinning = 1; 5525 path->leave_spinning = 1;
5500 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5526 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -5623,7 +5649,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5623 if (!buf) 5649 if (!buf)
5624 return ERR_PTR(-ENOMEM); 5650 return ERR_PTR(-ENOMEM);
5625 btrfs_set_header_generation(buf, trans->transid); 5651 btrfs_set_header_generation(buf, trans->transid);
5626 btrfs_set_buffer_lockdep_class(buf, level); 5652 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
5627 btrfs_tree_lock(buf); 5653 btrfs_tree_lock(buf);
5628 clean_tree_block(trans, root, buf); 5654 clean_tree_block(trans, root, buf);
5629 5655
@@ -5910,7 +5936,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5910 return 1; 5936 return 1;
5911 5937
5912 if (path->locks[level] && !wc->keep_locks) { 5938 if (path->locks[level] && !wc->keep_locks) {
5913 btrfs_tree_unlock(eb); 5939 btrfs_tree_unlock_rw(eb, path->locks[level]);
5914 path->locks[level] = 0; 5940 path->locks[level] = 0;
5915 } 5941 }
5916 return 0; 5942 return 0;
@@ -5934,7 +5960,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5934 * keep the tree lock 5960 * keep the tree lock
5935 */ 5961 */
5936 if (path->locks[level] && level > 0) { 5962 if (path->locks[level] && level > 0) {
5937 btrfs_tree_unlock(eb); 5963 btrfs_tree_unlock_rw(eb, path->locks[level]);
5938 path->locks[level] = 0; 5964 path->locks[level] = 0;
5939 } 5965 }
5940 return 0; 5966 return 0;
@@ -6047,7 +6073,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6047 BUG_ON(level != btrfs_header_level(next)); 6073 BUG_ON(level != btrfs_header_level(next));
6048 path->nodes[level] = next; 6074 path->nodes[level] = next;
6049 path->slots[level] = 0; 6075 path->slots[level] = 0;
6050 path->locks[level] = 1; 6076 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6051 wc->level = level; 6077 wc->level = level;
6052 if (wc->level == 1) 6078 if (wc->level == 1)
6053 wc->reada_slot = 0; 6079 wc->reada_slot = 0;
@@ -6118,7 +6144,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6118 BUG_ON(level == 0); 6144 BUG_ON(level == 0);
6119 btrfs_tree_lock(eb); 6145 btrfs_tree_lock(eb);
6120 btrfs_set_lock_blocking(eb); 6146 btrfs_set_lock_blocking(eb);
6121 path->locks[level] = 1; 6147 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6122 6148
6123 ret = btrfs_lookup_extent_info(trans, root, 6149 ret = btrfs_lookup_extent_info(trans, root,
6124 eb->start, eb->len, 6150 eb->start, eb->len,
@@ -6127,8 +6153,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6127 BUG_ON(ret); 6153 BUG_ON(ret);
6128 BUG_ON(wc->refs[level] == 0); 6154 BUG_ON(wc->refs[level] == 0);
6129 if (wc->refs[level] == 1) { 6155 if (wc->refs[level] == 1) {
6130 btrfs_tree_unlock(eb); 6156 btrfs_tree_unlock_rw(eb, path->locks[level]);
6131 path->locks[level] = 0;
6132 return 1; 6157 return 1;
6133 } 6158 }
6134 } 6159 }
@@ -6150,7 +6175,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6150 btrfs_header_generation(eb) == trans->transid) { 6175 btrfs_header_generation(eb) == trans->transid) {
6151 btrfs_tree_lock(eb); 6176 btrfs_tree_lock(eb);
6152 btrfs_set_lock_blocking(eb); 6177 btrfs_set_lock_blocking(eb);
6153 path->locks[level] = 1; 6178 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6154 } 6179 }
6155 clean_tree_block(trans, root, eb); 6180 clean_tree_block(trans, root, eb);
6156 } 6181 }
@@ -6229,7 +6254,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6229 return 0; 6254 return 0;
6230 6255
6231 if (path->locks[level]) { 6256 if (path->locks[level]) {
6232 btrfs_tree_unlock(path->nodes[level]); 6257 btrfs_tree_unlock_rw(path->nodes[level],
6258 path->locks[level]);
6233 path->locks[level] = 0; 6259 path->locks[level] = 0;
6234 } 6260 }
6235 free_extent_buffer(path->nodes[level]); 6261 free_extent_buffer(path->nodes[level]);
@@ -6251,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6251 * also make sure backrefs for the shared block and all lower level 6277 * also make sure backrefs for the shared block and all lower level
6252 * blocks are properly updated. 6278 * blocks are properly updated.
6253 */ 6279 */
6254int btrfs_drop_snapshot(struct btrfs_root *root, 6280void btrfs_drop_snapshot(struct btrfs_root *root,
6255 struct btrfs_block_rsv *block_rsv, int update_ref) 6281 struct btrfs_block_rsv *block_rsv, int update_ref)
6256{ 6282{
6257 struct btrfs_path *path; 6283 struct btrfs_path *path;
6258 struct btrfs_trans_handle *trans; 6284 struct btrfs_trans_handle *trans;
@@ -6265,10 +6291,17 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6265 int level; 6291 int level;
6266 6292
6267 path = btrfs_alloc_path(); 6293 path = btrfs_alloc_path();
6268 BUG_ON(!path); 6294 if (!path) {
6295 err = -ENOMEM;
6296 goto out;
6297 }
6269 6298
6270 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6299 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6271 BUG_ON(!wc); 6300 if (!wc) {
6301 btrfs_free_path(path);
6302 err = -ENOMEM;
6303 goto out;
6304 }
6272 6305
6273 trans = btrfs_start_transaction(tree_root, 0); 6306 trans = btrfs_start_transaction(tree_root, 0);
6274 BUG_ON(IS_ERR(trans)); 6307 BUG_ON(IS_ERR(trans));
@@ -6281,7 +6314,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6281 path->nodes[level] = btrfs_lock_root_node(root); 6314 path->nodes[level] = btrfs_lock_root_node(root);
6282 btrfs_set_lock_blocking(path->nodes[level]); 6315 btrfs_set_lock_blocking(path->nodes[level]);
6283 path->slots[level] = 0; 6316 path->slots[level] = 0;
6284 path->locks[level] = 1; 6317 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6285 memset(&wc->update_progress, 0, 6318 memset(&wc->update_progress, 0,
6286 sizeof(wc->update_progress)); 6319 sizeof(wc->update_progress));
6287 } else { 6320 } else {
@@ -6296,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6296 path->lowest_level = 0; 6329 path->lowest_level = 0;
6297 if (ret < 0) { 6330 if (ret < 0) {
6298 err = ret; 6331 err = ret;
6299 goto out; 6332 goto out_free;
6300 } 6333 }
6301 WARN_ON(ret > 0); 6334 WARN_ON(ret > 0);
6302 6335
@@ -6403,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6403 free_extent_buffer(root->commit_root); 6436 free_extent_buffer(root->commit_root);
6404 kfree(root); 6437 kfree(root);
6405 } 6438 }
6406out: 6439out_free:
6407 btrfs_end_transaction_throttle(trans, tree_root); 6440 btrfs_end_transaction_throttle(trans, tree_root);
6408 kfree(wc); 6441 kfree(wc);
6409 btrfs_free_path(path); 6442 btrfs_free_path(path);
6410 return err; 6443out:
6444 if (err)
6445 btrfs_std_error(root->fs_info, err);
6446 return;
6411} 6447}
6412 6448
6413/* 6449/*
@@ -6449,7 +6485,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6449 level = btrfs_header_level(node); 6485 level = btrfs_header_level(node);
6450 path->nodes[level] = node; 6486 path->nodes[level] = node;
6451 path->slots[level] = 0; 6487 path->slots[level] = 0;
6452 path->locks[level] = 1; 6488 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6453 6489
6454 wc->refs[parent_level] = 1; 6490 wc->refs[parent_level] = 1;
6455 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 6491 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -6524,30 +6560,48 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6524 return flags; 6560 return flags;
6525} 6561}
6526 6562
6527static int set_block_group_ro(struct btrfs_block_group_cache *cache) 6563static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6528{ 6564{
6529 struct btrfs_space_info *sinfo = cache->space_info; 6565 struct btrfs_space_info *sinfo = cache->space_info;
6530 u64 num_bytes; 6566 u64 num_bytes;
6567 u64 min_allocable_bytes;
6531 int ret = -ENOSPC; 6568 int ret = -ENOSPC;
6532 6569
6533 if (cache->ro) 6570
6534 return 0; 6571 /*
6572 * We need some metadata space and system metadata space for
6573 * allocating chunks in some corner cases until we force to set
6574 * it to be readonly.
6575 */
6576 if ((sinfo->flags &
6577 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
6578 !force)
6579 min_allocable_bytes = 1 * 1024 * 1024;
6580 else
6581 min_allocable_bytes = 0;
6535 6582
6536 spin_lock(&sinfo->lock); 6583 spin_lock(&sinfo->lock);
6537 spin_lock(&cache->lock); 6584 spin_lock(&cache->lock);
6585
6586 if (cache->ro) {
6587 ret = 0;
6588 goto out;
6589 }
6590
6538 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 6591 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6539 cache->bytes_super - btrfs_block_group_used(&cache->item); 6592 cache->bytes_super - btrfs_block_group_used(&cache->item);
6540 6593
6541 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + 6594 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6542 sinfo->bytes_may_use + sinfo->bytes_readonly + 6595 sinfo->bytes_may_use + sinfo->bytes_readonly +
6543 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) { 6596 cache->reserved_pinned + num_bytes + min_allocable_bytes <=
6597 sinfo->total_bytes) {
6544 sinfo->bytes_readonly += num_bytes; 6598 sinfo->bytes_readonly += num_bytes;
6545 sinfo->bytes_reserved += cache->reserved_pinned; 6599 sinfo->bytes_reserved += cache->reserved_pinned;
6546 cache->reserved_pinned = 0; 6600 cache->reserved_pinned = 0;
6547 cache->ro = 1; 6601 cache->ro = 1;
6548 ret = 0; 6602 ret = 0;
6549 } 6603 }
6550 6604out:
6551 spin_unlock(&cache->lock); 6605 spin_unlock(&cache->lock);
6552 spin_unlock(&sinfo->lock); 6606 spin_unlock(&sinfo->lock);
6553 return ret; 6607 return ret;
@@ -6571,7 +6625,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
6571 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 6625 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6572 CHUNK_ALLOC_FORCE); 6626 CHUNK_ALLOC_FORCE);
6573 6627
6574 ret = set_block_group_ro(cache); 6628 ret = set_block_group_ro(cache, 0);
6575 if (!ret) 6629 if (!ret)
6576 goto out; 6630 goto out;
6577 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 6631 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
@@ -6579,7 +6633,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
6579 CHUNK_ALLOC_FORCE); 6633 CHUNK_ALLOC_FORCE);
6580 if (ret < 0) 6634 if (ret < 0)
6581 goto out; 6635 goto out;
6582 ret = set_block_group_ro(cache); 6636 ret = set_block_group_ro(cache, 0);
6583out: 6637out:
6584 btrfs_end_transaction(trans, root); 6638 btrfs_end_transaction(trans, root);
6585 return ret; 6639 return ret;
@@ -6680,6 +6734,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6680 struct btrfs_space_info *space_info; 6734 struct btrfs_space_info *space_info;
6681 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 6735 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6682 struct btrfs_device *device; 6736 struct btrfs_device *device;
6737 u64 min_free;
6738 u64 dev_min = 1;
6739 u64 dev_nr = 0;
6740 int index;
6683 int full = 0; 6741 int full = 0;
6684 int ret = 0; 6742 int ret = 0;
6685 6743
@@ -6689,8 +6747,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6689 if (!block_group) 6747 if (!block_group)
6690 return -1; 6748 return -1;
6691 6749
6750 min_free = btrfs_block_group_used(&block_group->item);
6751
6692 /* no bytes used, we're good */ 6752 /* no bytes used, we're good */
6693 if (!btrfs_block_group_used(&block_group->item)) 6753 if (!min_free)
6694 goto out; 6754 goto out;
6695 6755
6696 space_info = block_group->space_info; 6756 space_info = block_group->space_info;
@@ -6706,10 +6766,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6706 * all of the extents from this block group. If we can, we're good 6766 * all of the extents from this block group. If we can, we're good
6707 */ 6767 */
6708 if ((space_info->total_bytes != block_group->key.offset) && 6768 if ((space_info->total_bytes != block_group->key.offset) &&
6709 (space_info->bytes_used + space_info->bytes_reserved + 6769 (space_info->bytes_used + space_info->bytes_reserved +
6710 space_info->bytes_pinned + space_info->bytes_readonly + 6770 space_info->bytes_pinned + space_info->bytes_readonly +
6711 btrfs_block_group_used(&block_group->item) < 6771 min_free < space_info->total_bytes)) {
6712 space_info->total_bytes)) {
6713 spin_unlock(&space_info->lock); 6772 spin_unlock(&space_info->lock);
6714 goto out; 6773 goto out;
6715 } 6774 }
@@ -6726,9 +6785,31 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6726 if (full) 6785 if (full)
6727 goto out; 6786 goto out;
6728 6787
6788 /*
6789 * index:
6790 * 0: raid10
6791 * 1: raid1
6792 * 2: dup
6793 * 3: raid0
6794 * 4: single
6795 */
6796 index = get_block_group_index(block_group);
6797 if (index == 0) {
6798 dev_min = 4;
6799 /* Divide by 2 */
6800 min_free >>= 1;
6801 } else if (index == 1) {
6802 dev_min = 2;
6803 } else if (index == 2) {
6804 /* Multiply by 2 */
6805 min_free <<= 1;
6806 } else if (index == 3) {
6807 dev_min = fs_devices->rw_devices;
6808 do_div(min_free, dev_min);
6809 }
6810
6729 mutex_lock(&root->fs_info->chunk_mutex); 6811 mutex_lock(&root->fs_info->chunk_mutex);
6730 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 6812 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6731 u64 min_free = btrfs_block_group_used(&block_group->item);
6732 u64 dev_offset; 6813 u64 dev_offset;
6733 6814
6734 /* 6815 /*
@@ -6739,7 +6820,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6739 ret = find_free_dev_extent(NULL, device, min_free, 6820 ret = find_free_dev_extent(NULL, device, min_free,
6740 &dev_offset, NULL); 6821 &dev_offset, NULL);
6741 if (!ret) 6822 if (!ret)
6823 dev_nr++;
6824
6825 if (dev_nr >= dev_min)
6742 break; 6826 break;
6827
6743 ret = -1; 6828 ret = -1;
6744 } 6829 }
6745 } 6830 }
@@ -7016,7 +7101,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7016 7101
7017 set_avail_alloc_bits(root->fs_info, cache->flags); 7102 set_avail_alloc_bits(root->fs_info, cache->flags);
7018 if (btrfs_chunk_readonly(root, cache->key.objectid)) 7103 if (btrfs_chunk_readonly(root, cache->key.objectid))
7019 set_block_group_ro(cache); 7104 set_block_group_ro(cache, 1);
7020 } 7105 }
7021 7106
7022 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { 7107 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
@@ -7030,9 +7115,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7030 * mirrored block groups. 7115 * mirrored block groups.
7031 */ 7116 */
7032 list_for_each_entry(cache, &space_info->block_groups[3], list) 7117 list_for_each_entry(cache, &space_info->block_groups[3], list)
7033 set_block_group_ro(cache); 7118 set_block_group_ro(cache, 1);
7034 list_for_each_entry(cache, &space_info->block_groups[4], list) 7119 list_for_each_entry(cache, &space_info->block_groups[4], list)
7035 set_block_group_ro(cache); 7120 set_block_group_ro(cache, 1);
7036 } 7121 }
7037 7122
7038 init_global_block_rsv(info); 7123 init_global_block_rsv(info);
@@ -7162,11 +7247,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7162 spin_unlock(&cluster->refill_lock); 7247 spin_unlock(&cluster->refill_lock);
7163 7248
7164 path = btrfs_alloc_path(); 7249 path = btrfs_alloc_path();
7165 BUG_ON(!path); 7250 if (!path) {
7251 ret = -ENOMEM;
7252 goto out;
7253 }
7166 7254
7167 inode = lookup_free_space_inode(root, block_group, path); 7255 inode = lookup_free_space_inode(root, block_group, path);
7168 if (!IS_ERR(inode)) { 7256 if (!IS_ERR(inode)) {
7169 btrfs_orphan_add(trans, inode); 7257 ret = btrfs_orphan_add(trans, inode);
7258 BUG_ON(ret);
7170 clear_nlink(inode); 7259 clear_nlink(inode);
7171 /* One for the block groups ref */ 7260 /* One for the block groups ref */
7172 spin_lock(&block_group->lock); 7261 spin_lock(&block_group->lock);