aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c112
1 files changed, 70 insertions, 42 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 94627c4cc193..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
83 return (cache->flags & bits) == bits; 83 return (cache->flags & bits) == bits;
84} 84}
85 85
86void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
87{
88 atomic_inc(&cache->count);
89}
90
91void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
92{
93 if (atomic_dec_and_test(&cache->count))
94 kfree(cache);
95}
96
86/* 97/*
87 * this adds the block group to the fs_info rb tree for the block group 98 * this adds the block group to the fs_info rb tree for the block group
88 * cache 99 * cache
@@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
156 } 167 }
157 } 168 }
158 if (ret) 169 if (ret)
159 atomic_inc(&ret->count); 170 btrfs_get_block_group(ret);
160 spin_unlock(&info->block_group_cache_lock); 171 spin_unlock(&info->block_group_cache_lock);
161 172
162 return ret; 173 return ret;
@@ -195,6 +206,14 @@ static int exclude_super_stripes(struct btrfs_root *root,
195 int stripe_len; 206 int stripe_len;
196 int i, nr, ret; 207 int i, nr, ret;
197 208
209 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
210 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
211 cache->bytes_super += stripe_len;
212 ret = add_excluded_extent(root, cache->key.objectid,
213 stripe_len);
214 BUG_ON(ret);
215 }
216
198 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 217 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
199 bytenr = btrfs_sb_offset(i); 218 bytenr = btrfs_sb_offset(i);
200 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 219 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
@@ -255,7 +274,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
255 if (ret) 274 if (ret)
256 break; 275 break;
257 276
258 if (extent_start == start) { 277 if (extent_start <= start) {
259 start = extent_end + 1; 278 start = extent_end + 1;
260 } else if (extent_start > start && extent_start < end) { 279 } else if (extent_start > start && extent_start < end) {
261 size = extent_start - start; 280 size = extent_start - start;
@@ -399,6 +418,8 @@ err:
399 418
400 put_caching_control(caching_ctl); 419 put_caching_control(caching_ctl);
401 atomic_dec(&block_group->space_info->caching_threads); 420 atomic_dec(&block_group->space_info->caching_threads);
421 btrfs_put_block_group(block_group);
422
402 return 0; 423 return 0;
403} 424}
404 425
@@ -439,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
439 up_write(&fs_info->extent_commit_sem); 460 up_write(&fs_info->extent_commit_sem);
440 461
441 atomic_inc(&cache->space_info->caching_threads); 462 atomic_inc(&cache->space_info->caching_threads);
463 btrfs_get_block_group(cache);
442 464
443 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 465 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
444 cache->key.objectid); 466 cache->key.objectid);
@@ -478,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
478 return cache; 500 return cache;
479} 501}
480 502
481void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
482{
483 if (atomic_dec_and_test(&cache->count))
484 kfree(cache);
485}
486
487static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 503static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
488 u64 flags) 504 u64 flags)
489{ 505{
@@ -2574,7 +2590,7 @@ next_block_group(struct btrfs_root *root,
2574 if (node) { 2590 if (node) {
2575 cache = rb_entry(node, struct btrfs_block_group_cache, 2591 cache = rb_entry(node, struct btrfs_block_group_cache,
2576 cache_node); 2592 cache_node);
2577 atomic_inc(&cache->count); 2593 btrfs_get_block_group(cache);
2578 } else 2594 } else
2579 cache = NULL; 2595 cache = NULL;
2580 spin_unlock(&root->fs_info->block_group_cache_lock); 2596 spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -2880,9 +2896,9 @@ static noinline void flush_delalloc_async(struct btrfs_work *work)
2880 root = async->root; 2896 root = async->root;
2881 info = async->info; 2897 info = async->info;
2882 2898
2883 btrfs_start_delalloc_inodes(root); 2899 btrfs_start_delalloc_inodes(root, 0);
2884 wake_up(&info->flush_wait); 2900 wake_up(&info->flush_wait);
2885 btrfs_wait_ordered_extents(root, 0); 2901 btrfs_wait_ordered_extents(root, 0, 0);
2886 2902
2887 spin_lock(&info->lock); 2903 spin_lock(&info->lock);
2888 info->flushing = 0; 2904 info->flushing = 0;
@@ -2956,8 +2972,8 @@ static void flush_delalloc(struct btrfs_root *root,
2956 return; 2972 return;
2957 2973
2958flush: 2974flush:
2959 btrfs_start_delalloc_inodes(root); 2975 btrfs_start_delalloc_inodes(root, 0);
2960 btrfs_wait_ordered_extents(root, 0); 2976 btrfs_wait_ordered_extents(root, 0, 0);
2961 2977
2962 spin_lock(&info->lock); 2978 spin_lock(&info->lock);
2963 info->flushing = 0; 2979 info->flushing = 0;
@@ -3454,14 +3470,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
3454 else 3470 else
3455 old_val -= num_bytes; 3471 old_val -= num_bytes;
3456 btrfs_set_super_bytes_used(&info->super_copy, old_val); 3472 btrfs_set_super_bytes_used(&info->super_copy, old_val);
3457
3458 /* block accounting for root item */
3459 old_val = btrfs_root_used(&root->root_item);
3460 if (alloc)
3461 old_val += num_bytes;
3462 else
3463 old_val -= num_bytes;
3464 btrfs_set_root_used(&root->root_item, old_val);
3465 spin_unlock(&info->delalloc_lock); 3473 spin_unlock(&info->delalloc_lock);
3466 3474
3467 while (total) { 3475 while (total) {
@@ -4049,6 +4057,21 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
4049 return ret; 4057 return ret;
4050} 4058}
4051 4059
4060int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4061 struct btrfs_root *root,
4062 u64 bytenr, u32 blocksize,
4063 u64 parent, u64 root_objectid, int level)
4064{
4065 u64 used;
4066 spin_lock(&root->node_lock);
4067 used = btrfs_root_used(&root->root_item) - blocksize;
4068 btrfs_set_root_used(&root->root_item, used);
4069 spin_unlock(&root->node_lock);
4070
4071 return btrfs_free_extent(trans, root, bytenr, blocksize,
4072 parent, root_objectid, level, 0);
4073}
4074
4052static u64 stripe_align(struct btrfs_root *root, u64 val) 4075static u64 stripe_align(struct btrfs_root *root, u64 val)
4053{ 4076{
4054 u64 mask = ((u64)root->stripesize - 1); 4077 u64 mask = ((u64)root->stripesize - 1);
@@ -4212,7 +4235,7 @@ search:
4212 u64 offset; 4235 u64 offset;
4213 int cached; 4236 int cached;
4214 4237
4215 atomic_inc(&block_group->count); 4238 btrfs_get_block_group(block_group);
4216 search_start = block_group->key.objectid; 4239 search_start = block_group->key.objectid;
4217 4240
4218have_block_group: 4241have_block_group:
@@ -4300,7 +4323,7 @@ have_block_group:
4300 4323
4301 btrfs_put_block_group(block_group); 4324 btrfs_put_block_group(block_group);
4302 block_group = last_ptr->block_group; 4325 block_group = last_ptr->block_group;
4303 atomic_inc(&block_group->count); 4326 btrfs_get_block_group(block_group);
4304 spin_unlock(&last_ptr->lock); 4327 spin_unlock(&last_ptr->lock);
4305 spin_unlock(&last_ptr->refill_lock); 4328 spin_unlock(&last_ptr->refill_lock);
4306 4329
@@ -4578,7 +4601,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4578{ 4601{
4579 int ret; 4602 int ret;
4580 u64 search_start = 0; 4603 u64 search_start = 0;
4581 struct btrfs_fs_info *info = root->fs_info;
4582 4604
4583 data = btrfs_get_alloc_profile(root, data); 4605 data = btrfs_get_alloc_profile(root, data);
4584again: 4606again:
@@ -4586,17 +4608,9 @@ again:
4586 * the only place that sets empty_size is btrfs_realloc_node, which 4608 * the only place that sets empty_size is btrfs_realloc_node, which
4587 * is not called recursively on allocations 4609 * is not called recursively on allocations
4588 */ 4610 */
4589 if (empty_size || root->ref_cows) { 4611 if (empty_size || root->ref_cows)
4590 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
4591 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4592 2 * 1024 * 1024,
4593 BTRFS_BLOCK_GROUP_METADATA |
4594 (info->metadata_alloc_profile &
4595 info->avail_metadata_alloc_bits), 0);
4596 }
4597 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 4612 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4598 num_bytes + 2 * 1024 * 1024, data, 0); 4613 num_bytes + 2 * 1024 * 1024, data, 0);
4599 }
4600 4614
4601 WARN_ON(num_bytes < root->sectorsize); 4615 WARN_ON(num_bytes < root->sectorsize);
4602 ret = find_free_extent(trans, root, num_bytes, empty_size, 4616 ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -4897,6 +4911,14 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
4897 extent_op); 4911 extent_op);
4898 BUG_ON(ret); 4912 BUG_ON(ret);
4899 } 4913 }
4914
4915 if (root_objectid == root->root_key.objectid) {
4916 u64 used;
4917 spin_lock(&root->node_lock);
4918 used = btrfs_root_used(&root->root_item) + num_bytes;
4919 btrfs_set_root_used(&root->root_item, used);
4920 spin_unlock(&root->node_lock);
4921 }
4900 return ret; 4922 return ret;
4901} 4923}
4902 4924
@@ -4919,8 +4941,16 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4919 btrfs_set_buffer_uptodate(buf); 4941 btrfs_set_buffer_uptodate(buf);
4920 4942
4921 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4943 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4922 set_extent_dirty(&root->dirty_log_pages, buf->start, 4944 /*
4923 buf->start + buf->len - 1, GFP_NOFS); 4945 * we allow two log transactions at a time, use different
4946 * EXENT bit to differentiate dirty pages.
4947 */
4948 if (root->log_transid % 2 == 0)
4949 set_extent_dirty(&root->dirty_log_pages, buf->start,
4950 buf->start + buf->len - 1, GFP_NOFS);
4951 else
4952 set_extent_new(&root->dirty_log_pages, buf->start,
4953 buf->start + buf->len - 1, GFP_NOFS);
4924 } else { 4954 } else {
4925 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 4955 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4926 buf->start + buf->len - 1, GFP_NOFS); 4956 buf->start + buf->len - 1, GFP_NOFS);
@@ -5372,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5372 int ret; 5402 int ret;
5373 5403
5374 while (level >= 0) { 5404 while (level >= 0) {
5375 if (path->slots[level] >=
5376 btrfs_header_nritems(path->nodes[level]))
5377 break;
5378
5379 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5405 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5380 if (ret > 0) 5406 if (ret > 0)
5381 break; 5407 break;
@@ -5383,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5383 if (level == 0) 5409 if (level == 0)
5384 break; 5410 break;
5385 5411
5412 if (path->slots[level] >=
5413 btrfs_header_nritems(path->nodes[level]))
5414 break;
5415
5386 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5416 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5387 if (ret > 0) { 5417 if (ret > 0) {
5388 path->slots[level]++; 5418 path->slots[level]++;
@@ -7373,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7373 wait_block_group_cache_done(block_group); 7403 wait_block_group_cache_done(block_group);
7374 7404
7375 btrfs_remove_free_space_cache(block_group); 7405 btrfs_remove_free_space_cache(block_group);
7376 7406 btrfs_put_block_group(block_group);
7377 WARN_ON(atomic_read(&block_group->count) != 1);
7378 kfree(block_group);
7379 7407
7380 spin_lock(&info->block_group_cache_lock); 7408 spin_lock(&info->block_group_cache_lock);
7381 } 7409 }