diff options
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 354 |
1 files changed, 275 insertions, 79 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7b3089b5c2df..31f33ba56fe8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -33,11 +33,28 @@ | |||
33 | #include "locking.h" | 33 | #include "locking.h" |
34 | #include "free-space-cache.h" | 34 | #include "free-space-cache.h" |
35 | 35 | ||
36 | /* control flags for do_chunk_alloc's force field | ||
37 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk | ||
38 | * if we really need one. | ||
39 | * | ||
40 | * CHUNK_ALLOC_FORCE means it must try to allocate one | ||
41 | * | ||
42 | * CHUNK_ALLOC_LIMITED means to only try and allocate one | ||
43 | * if we have very few chunks already allocated. This is | ||
44 | * used as part of the clustering code to help make sure | ||
45 | * we have a good pool of storage to cluster in, without | ||
46 | * filling the FS with empty chunks | ||
47 | * | ||
48 | */ | ||
49 | enum { | ||
50 | CHUNK_ALLOC_NO_FORCE = 0, | ||
51 | CHUNK_ALLOC_FORCE = 1, | ||
52 | CHUNK_ALLOC_LIMITED = 2, | ||
53 | }; | ||
54 | |||
36 | static int update_block_group(struct btrfs_trans_handle *trans, | 55 | static int update_block_group(struct btrfs_trans_handle *trans, |
37 | struct btrfs_root *root, | 56 | struct btrfs_root *root, |
38 | u64 bytenr, u64 num_bytes, int alloc); | 57 | u64 bytenr, u64 num_bytes, int alloc); |
39 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | ||
40 | u64 num_bytes, int reserve, int sinfo); | ||
41 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | 58 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
42 | struct btrfs_root *root, | 59 | struct btrfs_root *root, |
43 | u64 bytenr, u64 num_bytes, u64 parent, | 60 | u64 bytenr, u64 num_bytes, u64 parent, |
@@ -442,7 +459,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
442 | * allocate blocks for the tree root we can't do the fast caching since | 459 | * allocate blocks for the tree root we can't do the fast caching since |
443 | * we likely hold important locks. | 460 | * we likely hold important locks. |
444 | */ | 461 | */ |
445 | if (!trans->transaction->in_commit && | 462 | if (trans && (!trans->transaction->in_commit) && |
446 | (root && root != root->fs_info->tree_root)) { | 463 | (root && root != root->fs_info->tree_root)) { |
447 | spin_lock(&cache->lock); | 464 | spin_lock(&cache->lock); |
448 | if (cache->cached != BTRFS_CACHE_NO) { | 465 | if (cache->cached != BTRFS_CACHE_NO) { |
@@ -471,7 +488,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
471 | if (load_cache_only) | 488 | if (load_cache_only) |
472 | return 0; | 489 | return 0; |
473 | 490 | ||
474 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); | 491 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
475 | BUG_ON(!caching_ctl); | 492 | BUG_ON(!caching_ctl); |
476 | 493 | ||
477 | INIT_LIST_HEAD(&caching_ctl->list); | 494 | INIT_LIST_HEAD(&caching_ctl->list); |
@@ -1740,39 +1757,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, | |||
1740 | return ret; | 1757 | return ret; |
1741 | } | 1758 | } |
1742 | 1759 | ||
1743 | static void btrfs_issue_discard(struct block_device *bdev, | 1760 | static int btrfs_issue_discard(struct block_device *bdev, |
1744 | u64 start, u64 len) | 1761 | u64 start, u64 len) |
1745 | { | 1762 | { |
1746 | blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); | 1763 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); |
1747 | } | 1764 | } |
1748 | 1765 | ||
1749 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | 1766 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, |
1750 | u64 num_bytes) | 1767 | u64 num_bytes, u64 *actual_bytes) |
1751 | { | 1768 | { |
1752 | int ret; | 1769 | int ret; |
1753 | u64 map_length = num_bytes; | 1770 | u64 discarded_bytes = 0; |
1754 | struct btrfs_multi_bio *multi = NULL; | 1771 | struct btrfs_multi_bio *multi = NULL; |
1755 | 1772 | ||
1756 | if (!btrfs_test_opt(root, DISCARD)) | ||
1757 | return 0; | ||
1758 | 1773 | ||
1759 | /* Tell the block device(s) that the sectors can be discarded */ | 1774 | /* Tell the block device(s) that the sectors can be discarded */ |
1760 | ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, | 1775 | ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, |
1761 | bytenr, &map_length, &multi, 0); | 1776 | bytenr, &num_bytes, &multi, 0); |
1762 | if (!ret) { | 1777 | if (!ret) { |
1763 | struct btrfs_bio_stripe *stripe = multi->stripes; | 1778 | struct btrfs_bio_stripe *stripe = multi->stripes; |
1764 | int i; | 1779 | int i; |
1765 | 1780 | ||
1766 | if (map_length > num_bytes) | ||
1767 | map_length = num_bytes; | ||
1768 | 1781 | ||
1769 | for (i = 0; i < multi->num_stripes; i++, stripe++) { | 1782 | for (i = 0; i < multi->num_stripes; i++, stripe++) { |
1770 | btrfs_issue_discard(stripe->dev->bdev, | 1783 | ret = btrfs_issue_discard(stripe->dev->bdev, |
1771 | stripe->physical, | 1784 | stripe->physical, |
1772 | map_length); | 1785 | stripe->length); |
1786 | if (!ret) | ||
1787 | discarded_bytes += stripe->length; | ||
1788 | else if (ret != -EOPNOTSUPP) | ||
1789 | break; | ||
1773 | } | 1790 | } |
1774 | kfree(multi); | 1791 | kfree(multi); |
1775 | } | 1792 | } |
1793 | if (discarded_bytes && ret == -EOPNOTSUPP) | ||
1794 | ret = 0; | ||
1795 | |||
1796 | if (actual_bytes) | ||
1797 | *actual_bytes = discarded_bytes; | ||
1798 | |||
1776 | 1799 | ||
1777 | return ret; | 1800 | return ret; |
1778 | } | 1801 | } |
@@ -3015,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
3015 | found->bytes_readonly = 0; | 3038 | found->bytes_readonly = 0; |
3016 | found->bytes_may_use = 0; | 3039 | found->bytes_may_use = 0; |
3017 | found->full = 0; | 3040 | found->full = 0; |
3018 | found->force_alloc = 0; | 3041 | found->force_alloc = CHUNK_ALLOC_NO_FORCE; |
3042 | found->chunk_alloc = 0; | ||
3019 | *space_info = found; | 3043 | *space_info = found; |
3020 | list_add_rcu(&found->list, &info->space_info); | 3044 | list_add_rcu(&found->list, &info->space_info); |
3021 | atomic_set(&found->caching_threads, 0); | 3045 | atomic_set(&found->caching_threads, 0); |
@@ -3146,7 +3170,7 @@ again: | |||
3146 | if (!data_sinfo->full && alloc_chunk) { | 3170 | if (!data_sinfo->full && alloc_chunk) { |
3147 | u64 alloc_target; | 3171 | u64 alloc_target; |
3148 | 3172 | ||
3149 | data_sinfo->force_alloc = 1; | 3173 | data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; |
3150 | spin_unlock(&data_sinfo->lock); | 3174 | spin_unlock(&data_sinfo->lock); |
3151 | alloc: | 3175 | alloc: |
3152 | alloc_target = btrfs_get_alloc_profile(root, 1); | 3176 | alloc_target = btrfs_get_alloc_profile(root, 1); |
@@ -3156,7 +3180,8 @@ alloc: | |||
3156 | 3180 | ||
3157 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 3181 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
3158 | bytes + 2 * 1024 * 1024, | 3182 | bytes + 2 * 1024 * 1024, |
3159 | alloc_target, 0); | 3183 | alloc_target, |
3184 | CHUNK_ALLOC_NO_FORCE); | ||
3160 | btrfs_end_transaction(trans, root); | 3185 | btrfs_end_transaction(trans, root); |
3161 | if (ret < 0) { | 3186 | if (ret < 0) { |
3162 | if (ret != -ENOSPC) | 3187 | if (ret != -ENOSPC) |
@@ -3235,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) | |||
3235 | rcu_read_lock(); | 3260 | rcu_read_lock(); |
3236 | list_for_each_entry_rcu(found, head, list) { | 3261 | list_for_each_entry_rcu(found, head, list) { |
3237 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) | 3262 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) |
3238 | found->force_alloc = 1; | 3263 | found->force_alloc = CHUNK_ALLOC_FORCE; |
3239 | } | 3264 | } |
3240 | rcu_read_unlock(); | 3265 | rcu_read_unlock(); |
3241 | } | 3266 | } |
3242 | 3267 | ||
3243 | static int should_alloc_chunk(struct btrfs_root *root, | 3268 | static int should_alloc_chunk(struct btrfs_root *root, |
3244 | struct btrfs_space_info *sinfo, u64 alloc_bytes) | 3269 | struct btrfs_space_info *sinfo, u64 alloc_bytes, |
3270 | int force) | ||
3245 | { | 3271 | { |
3246 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; | 3272 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; |
3273 | u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; | ||
3247 | u64 thresh; | 3274 | u64 thresh; |
3248 | 3275 | ||
3249 | if (sinfo->bytes_used + sinfo->bytes_reserved + | 3276 | if (force == CHUNK_ALLOC_FORCE) |
3250 | alloc_bytes + 256 * 1024 * 1024 < num_bytes) | 3277 | return 1; |
3278 | |||
3279 | /* | ||
3280 | * in limited mode, we want to have some free space up to | ||
3281 | * about 1% of the FS size. | ||
3282 | */ | ||
3283 | if (force == CHUNK_ALLOC_LIMITED) { | ||
3284 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | ||
3285 | thresh = max_t(u64, 64 * 1024 * 1024, | ||
3286 | div_factor_fine(thresh, 1)); | ||
3287 | |||
3288 | if (num_bytes - num_allocated < thresh) | ||
3289 | return 1; | ||
3290 | } | ||
3291 | |||
3292 | /* | ||
3293 | * we have two similar checks here, one based on percentage | ||
3294 | * and once based on a hard number of 256MB. The idea | ||
3295 | * is that if we have a good amount of free | ||
3296 | * room, don't allocate a chunk. A good mount is | ||
3297 | * less than 80% utilized of the chunks we have allocated, | ||
3298 | * or more than 256MB free | ||
3299 | */ | ||
3300 | if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) | ||
3251 | return 0; | 3301 | return 0; |
3252 | 3302 | ||
3253 | if (sinfo->bytes_used + sinfo->bytes_reserved + | 3303 | if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) |
3254 | alloc_bytes < div_factor(num_bytes, 8)) | ||
3255 | return 0; | 3304 | return 0; |
3256 | 3305 | ||
3257 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | 3306 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); |
3307 | |||
3308 | /* 256MB or 5% of the FS */ | ||
3258 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); | 3309 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); |
3259 | 3310 | ||
3260 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) | 3311 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) |
3261 | return 0; | 3312 | return 0; |
3262 | |||
3263 | return 1; | 3313 | return 1; |
3264 | } | 3314 | } |
3265 | 3315 | ||
@@ -3269,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3269 | { | 3319 | { |
3270 | struct btrfs_space_info *space_info; | 3320 | struct btrfs_space_info *space_info; |
3271 | struct btrfs_fs_info *fs_info = extent_root->fs_info; | 3321 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
3322 | int wait_for_alloc = 0; | ||
3272 | int ret = 0; | 3323 | int ret = 0; |
3273 | 3324 | ||
3274 | mutex_lock(&fs_info->chunk_mutex); | ||
3275 | |||
3276 | flags = btrfs_reduce_alloc_profile(extent_root, flags); | 3325 | flags = btrfs_reduce_alloc_profile(extent_root, flags); |
3277 | 3326 | ||
3278 | space_info = __find_space_info(extent_root->fs_info, flags); | 3327 | space_info = __find_space_info(extent_root->fs_info, flags); |
@@ -3283,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3283 | } | 3332 | } |
3284 | BUG_ON(!space_info); | 3333 | BUG_ON(!space_info); |
3285 | 3334 | ||
3335 | again: | ||
3286 | spin_lock(&space_info->lock); | 3336 | spin_lock(&space_info->lock); |
3287 | if (space_info->force_alloc) | 3337 | if (space_info->force_alloc) |
3288 | force = 1; | 3338 | force = space_info->force_alloc; |
3289 | if (space_info->full) { | 3339 | if (space_info->full) { |
3290 | spin_unlock(&space_info->lock); | 3340 | spin_unlock(&space_info->lock); |
3291 | goto out; | 3341 | return 0; |
3292 | } | 3342 | } |
3293 | 3343 | ||
3294 | if (!force && !should_alloc_chunk(extent_root, space_info, | 3344 | if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { |
3295 | alloc_bytes)) { | ||
3296 | spin_unlock(&space_info->lock); | 3345 | spin_unlock(&space_info->lock); |
3297 | goto out; | 3346 | return 0; |
3347 | } else if (space_info->chunk_alloc) { | ||
3348 | wait_for_alloc = 1; | ||
3349 | } else { | ||
3350 | space_info->chunk_alloc = 1; | ||
3298 | } | 3351 | } |
3352 | |||
3299 | spin_unlock(&space_info->lock); | 3353 | spin_unlock(&space_info->lock); |
3300 | 3354 | ||
3355 | mutex_lock(&fs_info->chunk_mutex); | ||
3356 | |||
3357 | /* | ||
3358 | * The chunk_mutex is held throughout the entirety of a chunk | ||
3359 | * allocation, so once we've acquired the chunk_mutex we know that the | ||
3360 | * other guy is done and we need to recheck and see if we should | ||
3361 | * allocate. | ||
3362 | */ | ||
3363 | if (wait_for_alloc) { | ||
3364 | mutex_unlock(&fs_info->chunk_mutex); | ||
3365 | wait_for_alloc = 0; | ||
3366 | goto again; | ||
3367 | } | ||
3368 | |||
3301 | /* | 3369 | /* |
3302 | * If we have mixed data/metadata chunks we want to make sure we keep | 3370 | * If we have mixed data/metadata chunks we want to make sure we keep |
3303 | * allocating mixed chunks instead of individual chunks. | 3371 | * allocating mixed chunks instead of individual chunks. |
@@ -3323,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3323 | space_info->full = 1; | 3391 | space_info->full = 1; |
3324 | else | 3392 | else |
3325 | ret = 1; | 3393 | ret = 1; |
3326 | space_info->force_alloc = 0; | 3394 | |
3395 | space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; | ||
3396 | space_info->chunk_alloc = 0; | ||
3327 | spin_unlock(&space_info->lock); | 3397 | spin_unlock(&space_info->lock); |
3328 | out: | ||
3329 | mutex_unlock(&extent_root->fs_info->chunk_mutex); | 3398 | mutex_unlock(&extent_root->fs_info->chunk_mutex); |
3330 | return ret; | 3399 | return ret; |
3331 | } | 3400 | } |
@@ -3996,6 +4065,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
3996 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; | 4065 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; |
3997 | u64 to_reserve; | 4066 | u64 to_reserve; |
3998 | int nr_extents; | 4067 | int nr_extents; |
4068 | int reserved_extents; | ||
3999 | int ret; | 4069 | int ret; |
4000 | 4070 | ||
4001 | if (btrfs_transaction_in_commit(root->fs_info)) | 4071 | if (btrfs_transaction_in_commit(root->fs_info)) |
@@ -4003,25 +4073,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
4003 | 4073 | ||
4004 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4074 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4005 | 4075 | ||
4006 | spin_lock(&BTRFS_I(inode)->accounting_lock); | ||
4007 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; | 4076 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; |
4008 | if (nr_extents > BTRFS_I(inode)->reserved_extents) { | 4077 | reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); |
4009 | nr_extents -= BTRFS_I(inode)->reserved_extents; | 4078 | |
4079 | if (nr_extents > reserved_extents) { | ||
4080 | nr_extents -= reserved_extents; | ||
4010 | to_reserve = calc_trans_metadata_size(root, nr_extents); | 4081 | to_reserve = calc_trans_metadata_size(root, nr_extents); |
4011 | } else { | 4082 | } else { |
4012 | nr_extents = 0; | 4083 | nr_extents = 0; |
4013 | to_reserve = 0; | 4084 | to_reserve = 0; |
4014 | } | 4085 | } |
4015 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | 4086 | |
4016 | to_reserve += calc_csum_metadata_size(inode, num_bytes); | 4087 | to_reserve += calc_csum_metadata_size(inode, num_bytes); |
4017 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); | 4088 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); |
4018 | if (ret) | 4089 | if (ret) |
4019 | return ret; | 4090 | return ret; |
4020 | 4091 | ||
4021 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4092 | atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents); |
4022 | BTRFS_I(inode)->reserved_extents += nr_extents; | ||
4023 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); | 4093 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); |
4024 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
4025 | 4094 | ||
4026 | block_rsv_add_bytes(block_rsv, to_reserve, 1); | 4095 | block_rsv_add_bytes(block_rsv, to_reserve, 1); |
4027 | 4096 | ||
@@ -4036,20 +4105,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
4036 | struct btrfs_root *root = BTRFS_I(inode)->root; | 4105 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4037 | u64 to_free; | 4106 | u64 to_free; |
4038 | int nr_extents; | 4107 | int nr_extents; |
4108 | int reserved_extents; | ||
4039 | 4109 | ||
4040 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4110 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4041 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | 4111 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); |
4042 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); | 4112 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); |
4043 | 4113 | ||
4044 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4114 | reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); |
4045 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); | 4115 | do { |
4046 | if (nr_extents < BTRFS_I(inode)->reserved_extents) { | 4116 | int old, new; |
4047 | nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; | 4117 | |
4048 | BTRFS_I(inode)->reserved_extents -= nr_extents; | 4118 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); |
4049 | } else { | 4119 | if (nr_extents >= reserved_extents) { |
4050 | nr_extents = 0; | 4120 | nr_extents = 0; |
4051 | } | 4121 | break; |
4052 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | 4122 | } |
4123 | old = reserved_extents; | ||
4124 | nr_extents = reserved_extents - nr_extents; | ||
4125 | new = reserved_extents - nr_extents; | ||
4126 | old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents, | ||
4127 | reserved_extents, new); | ||
4128 | if (likely(old == reserved_extents)) | ||
4129 | break; | ||
4130 | reserved_extents = old; | ||
4131 | } while (1); | ||
4053 | 4132 | ||
4054 | to_free = calc_csum_metadata_size(inode, num_bytes); | 4133 | to_free = calc_csum_metadata_size(inode, num_bytes); |
4055 | if (nr_extents > 0) | 4134 | if (nr_extents > 0) |
@@ -4223,8 +4302,8 @@ int btrfs_pin_extent(struct btrfs_root *root, | |||
4223 | * update size of reserved extents. this function may return -EAGAIN | 4302 | * update size of reserved extents. this function may return -EAGAIN |
4224 | * if 'reserve' is true or 'sinfo' is false. | 4303 | * if 'reserve' is true or 'sinfo' is false. |
4225 | */ | 4304 | */ |
4226 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | 4305 | int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, |
4227 | u64 num_bytes, int reserve, int sinfo) | 4306 | u64 num_bytes, int reserve, int sinfo) |
4228 | { | 4307 | { |
4229 | int ret = 0; | 4308 | int ret = 0; |
4230 | if (sinfo) { | 4309 | if (sinfo) { |
@@ -4363,7 +4442,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
4363 | if (ret) | 4442 | if (ret) |
4364 | break; | 4443 | break; |
4365 | 4444 | ||
4366 | ret = btrfs_discard_extent(root, start, end + 1 - start); | 4445 | if (btrfs_test_opt(root, DISCARD)) |
4446 | ret = btrfs_discard_extent(root, start, | ||
4447 | end + 1 - start, NULL); | ||
4367 | 4448 | ||
4368 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 4449 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
4369 | unpin_extent_range(root, start, end); | 4450 | unpin_extent_range(root, start, end); |
@@ -4704,10 +4785,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | |||
4704 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); | 4785 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); |
4705 | 4786 | ||
4706 | btrfs_add_free_space(cache, buf->start, buf->len); | 4787 | btrfs_add_free_space(cache, buf->start, buf->len); |
4707 | ret = update_reserved_bytes(cache, buf->len, 0, 0); | 4788 | ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0); |
4708 | if (ret == -EAGAIN) { | 4789 | if (ret == -EAGAIN) { |
4709 | /* block group became read-only */ | 4790 | /* block group became read-only */ |
4710 | update_reserved_bytes(cache, buf->len, 0, 1); | 4791 | btrfs_update_reserved_bytes(cache, buf->len, 0, 1); |
4711 | goto out; | 4792 | goto out; |
4712 | } | 4793 | } |
4713 | 4794 | ||
@@ -4744,6 +4825,11 @@ pin: | |||
4744 | } | 4825 | } |
4745 | } | 4826 | } |
4746 | out: | 4827 | out: |
4828 | /* | ||
4829 | * Deleting the buffer, clear the corrupt flag since it doesn't matter | ||
4830 | * anymore. | ||
4831 | */ | ||
4832 | clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); | ||
4747 | btrfs_put_block_group(cache); | 4833 | btrfs_put_block_group(cache); |
4748 | } | 4834 | } |
4749 | 4835 | ||
@@ -5191,7 +5277,7 @@ checks: | |||
5191 | search_start - offset); | 5277 | search_start - offset); |
5192 | BUG_ON(offset > search_start); | 5278 | BUG_ON(offset > search_start); |
5193 | 5279 | ||
5194 | ret = update_reserved_bytes(block_group, num_bytes, 1, | 5280 | ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1, |
5195 | (data & BTRFS_BLOCK_GROUP_DATA)); | 5281 | (data & BTRFS_BLOCK_GROUP_DATA)); |
5196 | if (ret == -EAGAIN) { | 5282 | if (ret == -EAGAIN) { |
5197 | btrfs_add_free_space(block_group, offset, num_bytes); | 5283 | btrfs_add_free_space(block_group, offset, num_bytes); |
@@ -5282,11 +5368,13 @@ loop: | |||
5282 | 5368 | ||
5283 | if (allowed_chunk_alloc) { | 5369 | if (allowed_chunk_alloc) { |
5284 | ret = do_chunk_alloc(trans, root, num_bytes + | 5370 | ret = do_chunk_alloc(trans, root, num_bytes + |
5285 | 2 * 1024 * 1024, data, 1); | 5371 | 2 * 1024 * 1024, data, |
5372 | CHUNK_ALLOC_LIMITED); | ||
5286 | allowed_chunk_alloc = 0; | 5373 | allowed_chunk_alloc = 0; |
5287 | done_chunk_alloc = 1; | 5374 | done_chunk_alloc = 1; |
5288 | } else if (!done_chunk_alloc) { | 5375 | } else if (!done_chunk_alloc && |
5289 | space_info->force_alloc = 1; | 5376 | space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { |
5377 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | ||
5290 | } | 5378 | } |
5291 | 5379 | ||
5292 | if (loop < LOOP_NO_EMPTY_SIZE) { | 5380 | if (loop < LOOP_NO_EMPTY_SIZE) { |
@@ -5372,7 +5460,8 @@ again: | |||
5372 | */ | 5460 | */ |
5373 | if (empty_size || root->ref_cows) | 5461 | if (empty_size || root->ref_cows) |
5374 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | 5462 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, |
5375 | num_bytes + 2 * 1024 * 1024, data, 0); | 5463 | num_bytes + 2 * 1024 * 1024, data, |
5464 | CHUNK_ALLOC_NO_FORCE); | ||
5376 | 5465 | ||
5377 | WARN_ON(num_bytes < root->sectorsize); | 5466 | WARN_ON(num_bytes < root->sectorsize); |
5378 | ret = find_free_extent(trans, root, num_bytes, empty_size, | 5467 | ret = find_free_extent(trans, root, num_bytes, empty_size, |
@@ -5384,7 +5473,7 @@ again: | |||
5384 | num_bytes = num_bytes & ~(root->sectorsize - 1); | 5473 | num_bytes = num_bytes & ~(root->sectorsize - 1); |
5385 | num_bytes = max(num_bytes, min_alloc_size); | 5474 | num_bytes = max(num_bytes, min_alloc_size); |
5386 | do_chunk_alloc(trans, root->fs_info->extent_root, | 5475 | do_chunk_alloc(trans, root->fs_info->extent_root, |
5387 | num_bytes, data, 1); | 5476 | num_bytes, data, CHUNK_ALLOC_FORCE); |
5388 | goto again; | 5477 | goto again; |
5389 | } | 5478 | } |
5390 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { | 5479 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { |
@@ -5397,6 +5486,8 @@ again: | |||
5397 | dump_space_info(sinfo, num_bytes, 1); | 5486 | dump_space_info(sinfo, num_bytes, 1); |
5398 | } | 5487 | } |
5399 | 5488 | ||
5489 | trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); | ||
5490 | |||
5400 | return ret; | 5491 | return ret; |
5401 | } | 5492 | } |
5402 | 5493 | ||
@@ -5412,12 +5503,15 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) | |||
5412 | return -ENOSPC; | 5503 | return -ENOSPC; |
5413 | } | 5504 | } |
5414 | 5505 | ||
5415 | ret = btrfs_discard_extent(root, start, len); | 5506 | if (btrfs_test_opt(root, DISCARD)) |
5507 | ret = btrfs_discard_extent(root, start, len, NULL); | ||
5416 | 5508 | ||
5417 | btrfs_add_free_space(cache, start, len); | 5509 | btrfs_add_free_space(cache, start, len); |
5418 | update_reserved_bytes(cache, len, 0, 1); | 5510 | btrfs_update_reserved_bytes(cache, len, 0, 1); |
5419 | btrfs_put_block_group(cache); | 5511 | btrfs_put_block_group(cache); |
5420 | 5512 | ||
5513 | trace_btrfs_reserved_extent_free(root, start, len); | ||
5514 | |||
5421 | return ret; | 5515 | return ret; |
5422 | } | 5516 | } |
5423 | 5517 | ||
@@ -5444,7 +5538,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
5444 | size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); | 5538 | size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); |
5445 | 5539 | ||
5446 | path = btrfs_alloc_path(); | 5540 | path = btrfs_alloc_path(); |
5447 | BUG_ON(!path); | 5541 | if (!path) |
5542 | return -ENOMEM; | ||
5448 | 5543 | ||
5449 | path->leave_spinning = 1; | 5544 | path->leave_spinning = 1; |
5450 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, | 5545 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, |
@@ -5614,7 +5709,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
5614 | put_caching_control(caching_ctl); | 5709 | put_caching_control(caching_ctl); |
5615 | } | 5710 | } |
5616 | 5711 | ||
5617 | ret = update_reserved_bytes(block_group, ins->offset, 1, 1); | 5712 | ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1); |
5618 | BUG_ON(ret); | 5713 | BUG_ON(ret); |
5619 | btrfs_put_block_group(block_group); | 5714 | btrfs_put_block_group(block_group); |
5620 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, | 5715 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, |
@@ -6047,6 +6142,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | |||
6047 | if (reada && level == 1) | 6142 | if (reada && level == 1) |
6048 | reada_walk_down(trans, root, wc, path); | 6143 | reada_walk_down(trans, root, wc, path); |
6049 | next = read_tree_block(root, bytenr, blocksize, generation); | 6144 | next = read_tree_block(root, bytenr, blocksize, generation); |
6145 | if (!next) | ||
6146 | return -EIO; | ||
6050 | btrfs_tree_lock(next); | 6147 | btrfs_tree_lock(next); |
6051 | btrfs_set_lock_blocking(next); | 6148 | btrfs_set_lock_blocking(next); |
6052 | } | 6149 | } |
@@ -6438,10 +6535,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |||
6438 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | 6535 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
6439 | 6536 | ||
6440 | path = btrfs_alloc_path(); | 6537 | path = btrfs_alloc_path(); |
6441 | BUG_ON(!path); | 6538 | if (!path) |
6539 | return -ENOMEM; | ||
6442 | 6540 | ||
6443 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | 6541 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
6444 | BUG_ON(!wc); | 6542 | if (!wc) { |
6543 | btrfs_free_path(path); | ||
6544 | return -ENOMEM; | ||
6545 | } | ||
6445 | 6546 | ||
6446 | btrfs_assert_tree_locked(parent); | 6547 | btrfs_assert_tree_locked(parent); |
6447 | parent_level = btrfs_header_level(parent); | 6548 | parent_level = btrfs_header_level(parent); |
@@ -6899,7 +7000,11 @@ static noinline int get_new_locations(struct inode *reloc_inode, | |||
6899 | } | 7000 | } |
6900 | 7001 | ||
6901 | path = btrfs_alloc_path(); | 7002 | path = btrfs_alloc_path(); |
6902 | BUG_ON(!path); | 7003 | if (!path) { |
7004 | if (exts != *extents) | ||
7005 | kfree(exts); | ||
7006 | return -ENOMEM; | ||
7007 | } | ||
6903 | 7008 | ||
6904 | cur_pos = extent_key->objectid - offset; | 7009 | cur_pos = extent_key->objectid - offset; |
6905 | last_byte = extent_key->objectid + extent_key->offset; | 7010 | last_byte = extent_key->objectid + extent_key->offset; |
@@ -6941,6 +7046,10 @@ static noinline int get_new_locations(struct inode *reloc_inode, | |||
6941 | struct disk_extent *old = exts; | 7046 | struct disk_extent *old = exts; |
6942 | max *= 2; | 7047 | max *= 2; |
6943 | exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); | 7048 | exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); |
7049 | if (!exts) { | ||
7050 | ret = -ENOMEM; | ||
7051 | goto out; | ||
7052 | } | ||
6944 | memcpy(exts, old, sizeof(*exts) * nr); | 7053 | memcpy(exts, old, sizeof(*exts) * nr); |
6945 | if (old != *extents) | 7054 | if (old != *extents) |
6946 | kfree(old); | 7055 | kfree(old); |
@@ -7423,7 +7532,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, | |||
7423 | int ret; | 7532 | int ret; |
7424 | 7533 | ||
7425 | new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); | 7534 | new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); |
7426 | BUG_ON(!new_extent); | 7535 | if (!new_extent) |
7536 | return -ENOMEM; | ||
7427 | 7537 | ||
7428 | ref = btrfs_lookup_leaf_ref(root, leaf->start); | 7538 | ref = btrfs_lookup_leaf_ref(root, leaf->start); |
7429 | BUG_ON(!ref); | 7539 | BUG_ON(!ref); |
@@ -7609,7 +7719,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) | |||
7609 | 7719 | ||
7610 | reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | 7720 | reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); |
7611 | BUG_ON(!reloc_root); | 7721 | BUG_ON(!reloc_root); |
7612 | btrfs_orphan_cleanup(reloc_root); | 7722 | ret = btrfs_orphan_cleanup(reloc_root); |
7723 | BUG_ON(ret); | ||
7613 | return 0; | 7724 | return 0; |
7614 | } | 7725 | } |
7615 | 7726 | ||
@@ -7627,7 +7738,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, | |||
7627 | return 0; | 7738 | return 0; |
7628 | 7739 | ||
7629 | root_item = kmalloc(sizeof(*root_item), GFP_NOFS); | 7740 | root_item = kmalloc(sizeof(*root_item), GFP_NOFS); |
7630 | BUG_ON(!root_item); | 7741 | if (!root_item) |
7742 | return -ENOMEM; | ||
7631 | 7743 | ||
7632 | ret = btrfs_copy_root(trans, root, root->commit_root, | 7744 | ret = btrfs_copy_root(trans, root, root->commit_root, |
7633 | &eb, BTRFS_TREE_RELOC_OBJECTID); | 7745 | &eb, BTRFS_TREE_RELOC_OBJECTID); |
@@ -7653,7 +7765,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, | |||
7653 | 7765 | ||
7654 | reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, | 7766 | reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, |
7655 | &root_key); | 7767 | &root_key); |
7656 | BUG_ON(!reloc_root); | 7768 | BUG_ON(IS_ERR(reloc_root)); |
7657 | reloc_root->last_trans = trans->transid; | 7769 | reloc_root->last_trans = trans->transid; |
7658 | reloc_root->commit_root = NULL; | 7770 | reloc_root->commit_root = NULL; |
7659 | reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; | 7771 | reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; |
@@ -7906,6 +8018,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, | |||
7906 | 8018 | ||
7907 | eb = read_tree_block(found_root, block_start, | 8019 | eb = read_tree_block(found_root, block_start, |
7908 | block_size, 0); | 8020 | block_size, 0); |
8021 | if (!eb) { | ||
8022 | ret = -EIO; | ||
8023 | goto out; | ||
8024 | } | ||
7909 | btrfs_tree_lock(eb); | 8025 | btrfs_tree_lock(eb); |
7910 | BUG_ON(level != btrfs_header_level(eb)); | 8026 | BUG_ON(level != btrfs_header_level(eb)); |
7911 | 8027 | ||
@@ -8061,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, | |||
8061 | 8177 | ||
8062 | alloc_flags = update_block_group_flags(root, cache->flags); | 8178 | alloc_flags = update_block_group_flags(root, cache->flags); |
8063 | if (alloc_flags != cache->flags) | 8179 | if (alloc_flags != cache->flags) |
8064 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8180 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
8181 | CHUNK_ALLOC_FORCE); | ||
8065 | 8182 | ||
8066 | ret = set_block_group_ro(cache); | 8183 | ret = set_block_group_ro(cache); |
8067 | if (!ret) | 8184 | if (!ret) |
8068 | goto out; | 8185 | goto out; |
8069 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); | 8186 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); |
8070 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8187 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
8188 | CHUNK_ALLOC_FORCE); | ||
8071 | if (ret < 0) | 8189 | if (ret < 0) |
8072 | goto out; | 8190 | goto out; |
8073 | ret = set_block_group_ro(cache); | 8191 | ret = set_block_group_ro(cache); |
@@ -8080,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | |||
8080 | struct btrfs_root *root, u64 type) | 8198 | struct btrfs_root *root, u64 type) |
8081 | { | 8199 | { |
8082 | u64 alloc_flags = get_alloc_profile(root, type); | 8200 | u64 alloc_flags = get_alloc_profile(root, type); |
8083 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | 8201 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, |
8202 | CHUNK_ALLOC_FORCE); | ||
8084 | } | 8203 | } |
8085 | 8204 | ||
8086 | /* | 8205 | /* |
@@ -8621,6 +8740,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
8621 | BUG_ON(!block_group); | 8740 | BUG_ON(!block_group); |
8622 | BUG_ON(!block_group->ro); | 8741 | BUG_ON(!block_group->ro); |
8623 | 8742 | ||
8743 | /* | ||
8744 | * Free the reserved super bytes from this block group before | ||
8745 | * remove it. | ||
8746 | */ | ||
8747 | free_excluded_extents(root, block_group); | ||
8748 | |||
8624 | memcpy(&key, &block_group->key, sizeof(key)); | 8749 | memcpy(&key, &block_group->key, sizeof(key)); |
8625 | if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | | 8750 | if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | |
8626 | BTRFS_BLOCK_GROUP_RAID1 | | 8751 | BTRFS_BLOCK_GROUP_RAID1 | |
@@ -8724,13 +8849,84 @@ out: | |||
8724 | return ret; | 8849 | return ret; |
8725 | } | 8850 | } |
8726 | 8851 | ||
8852 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) | ||
8853 | { | ||
8854 | struct btrfs_space_info *space_info; | ||
8855 | int ret; | ||
8856 | |||
8857 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, | ||
8858 | &space_info); | ||
8859 | if (ret) | ||
8860 | return ret; | ||
8861 | |||
8862 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, | ||
8863 | &space_info); | ||
8864 | if (ret) | ||
8865 | return ret; | ||
8866 | |||
8867 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, | ||
8868 | &space_info); | ||
8869 | if (ret) | ||
8870 | return ret; | ||
8871 | |||
8872 | return ret; | ||
8873 | } | ||
8874 | |||
8727 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) | 8875 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) |
8728 | { | 8876 | { |
8729 | return unpin_extent_range(root, start, end); | 8877 | return unpin_extent_range(root, start, end); |
8730 | } | 8878 | } |
8731 | 8879 | ||
8732 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 8880 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
8733 | u64 num_bytes) | 8881 | u64 num_bytes, u64 *actual_bytes) |
8882 | { | ||
8883 | return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); | ||
8884 | } | ||
8885 | |||
8886 | int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) | ||
8734 | { | 8887 | { |
8735 | return btrfs_discard_extent(root, bytenr, num_bytes); | 8888 | struct btrfs_fs_info *fs_info = root->fs_info; |
8889 | struct btrfs_block_group_cache *cache = NULL; | ||
8890 | u64 group_trimmed; | ||
8891 | u64 start; | ||
8892 | u64 end; | ||
8893 | u64 trimmed = 0; | ||
8894 | int ret = 0; | ||
8895 | |||
8896 | cache = btrfs_lookup_block_group(fs_info, range->start); | ||
8897 | |||
8898 | while (cache) { | ||
8899 | if (cache->key.objectid >= (range->start + range->len)) { | ||
8900 | btrfs_put_block_group(cache); | ||
8901 | break; | ||
8902 | } | ||
8903 | |||
8904 | start = max(range->start, cache->key.objectid); | ||
8905 | end = min(range->start + range->len, | ||
8906 | cache->key.objectid + cache->key.offset); | ||
8907 | |||
8908 | if (end - start >= range->minlen) { | ||
8909 | if (!block_group_cache_done(cache)) { | ||
8910 | ret = cache_block_group(cache, NULL, root, 0); | ||
8911 | if (!ret) | ||
8912 | wait_block_group_cache_done(cache); | ||
8913 | } | ||
8914 | ret = btrfs_trim_block_group(cache, | ||
8915 | &group_trimmed, | ||
8916 | start, | ||
8917 | end, | ||
8918 | range->minlen); | ||
8919 | |||
8920 | trimmed += group_trimmed; | ||
8921 | if (ret) { | ||
8922 | btrfs_put_block_group(cache); | ||
8923 | break; | ||
8924 | } | ||
8925 | } | ||
8926 | |||
8927 | cache = next_block_group(fs_info->tree_root, cache); | ||
8928 | } | ||
8929 | |||
8930 | range->len = trimmed; | ||
8931 | return ret; | ||
8736 | } | 8932 | } |