aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c167
1 files changed, 107 insertions, 60 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 94627c4cc193..b34d32fdaaec 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -22,6 +22,7 @@
22#include <linux/sort.h> 22#include <linux/sort.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/slab.h>
25#include "compat.h" 26#include "compat.h"
26#include "hash.h" 27#include "hash.h"
27#include "ctree.h" 28#include "ctree.h"
@@ -83,6 +84,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
83 return (cache->flags & bits) == bits; 84 return (cache->flags & bits) == bits;
84} 85}
85 86
87void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
88{
89 atomic_inc(&cache->count);
90}
91
92void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
93{
94 if (atomic_dec_and_test(&cache->count))
95 kfree(cache);
96}
97
86/* 98/*
87 * this adds the block group to the fs_info rb tree for the block group 99 * this adds the block group to the fs_info rb tree for the block group
88 * cache 100 * cache
@@ -156,7 +168,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
156 } 168 }
157 } 169 }
158 if (ret) 170 if (ret)
159 atomic_inc(&ret->count); 171 btrfs_get_block_group(ret);
160 spin_unlock(&info->block_group_cache_lock); 172 spin_unlock(&info->block_group_cache_lock);
161 173
162 return ret; 174 return ret;
@@ -195,6 +207,14 @@ static int exclude_super_stripes(struct btrfs_root *root,
195 int stripe_len; 207 int stripe_len;
196 int i, nr, ret; 208 int i, nr, ret;
197 209
210 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
211 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
212 cache->bytes_super += stripe_len;
213 ret = add_excluded_extent(root, cache->key.objectid,
214 stripe_len);
215 BUG_ON(ret);
216 }
217
198 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 218 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
199 bytenr = btrfs_sb_offset(i); 219 bytenr = btrfs_sb_offset(i);
200 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 220 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
@@ -255,7 +275,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
255 if (ret) 275 if (ret)
256 break; 276 break;
257 277
258 if (extent_start == start) { 278 if (extent_start <= start) {
259 start = extent_end + 1; 279 start = extent_end + 1;
260 } else if (extent_start > start && extent_start < end) { 280 } else if (extent_start > start && extent_start < end) {
261 size = extent_start - start; 281 size = extent_start - start;
@@ -399,6 +419,8 @@ err:
399 419
400 put_caching_control(caching_ctl); 420 put_caching_control(caching_ctl);
401 atomic_dec(&block_group->space_info->caching_threads); 421 atomic_dec(&block_group->space_info->caching_threads);
422 btrfs_put_block_group(block_group);
423
402 return 0; 424 return 0;
403} 425}
404 426
@@ -439,6 +461,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
439 up_write(&fs_info->extent_commit_sem); 461 up_write(&fs_info->extent_commit_sem);
440 462
441 atomic_inc(&cache->space_info->caching_threads); 463 atomic_inc(&cache->space_info->caching_threads);
464 btrfs_get_block_group(cache);
442 465
443 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 466 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
444 cache->key.objectid); 467 cache->key.objectid);
@@ -478,12 +501,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
478 return cache; 501 return cache;
479} 502}
480 503
481void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
482{
483 if (atomic_dec_and_test(&cache->count))
484 kfree(cache);
485}
486
487static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 504static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
488 u64 flags) 505 u64 flags)
489{ 506{
@@ -2574,7 +2591,7 @@ next_block_group(struct btrfs_root *root,
2574 if (node) { 2591 if (node) {
2575 cache = rb_entry(node, struct btrfs_block_group_cache, 2592 cache = rb_entry(node, struct btrfs_block_group_cache,
2576 cache_node); 2593 cache_node);
2577 atomic_inc(&cache->count); 2594 btrfs_get_block_group(cache);
2578 } else 2595 } else
2579 cache = NULL; 2596 cache = NULL;
2580 spin_unlock(&root->fs_info->block_group_cache_lock); 2597 spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -2660,6 +2677,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2660 2677
2661 INIT_LIST_HEAD(&found->block_groups); 2678 INIT_LIST_HEAD(&found->block_groups);
2662 init_rwsem(&found->groups_sem); 2679 init_rwsem(&found->groups_sem);
2680 init_waitqueue_head(&found->flush_wait);
2681 init_waitqueue_head(&found->allocate_wait);
2663 spin_lock_init(&found->lock); 2682 spin_lock_init(&found->lock);
2664 found->flags = flags; 2683 found->flags = flags;
2665 found->total_bytes = total_bytes; 2684 found->total_bytes = total_bytes;
@@ -2830,7 +2849,7 @@ int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2830 } 2849 }
2831 spin_unlock(&BTRFS_I(inode)->accounting_lock); 2850 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2832 2851
2833 BTRFS_I(inode)->reserved_extents--; 2852 BTRFS_I(inode)->reserved_extents -= num_items;
2834 BUG_ON(BTRFS_I(inode)->reserved_extents < 0); 2853 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2835 2854
2836 if (meta_sinfo->bytes_delalloc < num_bytes) { 2855 if (meta_sinfo->bytes_delalloc < num_bytes) {
@@ -2880,9 +2899,9 @@ static noinline void flush_delalloc_async(struct btrfs_work *work)
2880 root = async->root; 2899 root = async->root;
2881 info = async->info; 2900 info = async->info;
2882 2901
2883 btrfs_start_delalloc_inodes(root); 2902 btrfs_start_delalloc_inodes(root, 0);
2884 wake_up(&info->flush_wait); 2903 wake_up(&info->flush_wait);
2885 btrfs_wait_ordered_extents(root, 0); 2904 btrfs_wait_ordered_extents(root, 0, 0);
2886 2905
2887 spin_lock(&info->lock); 2906 spin_lock(&info->lock);
2888 info->flushing = 0; 2907 info->flushing = 0;
@@ -2928,12 +2947,10 @@ static void flush_delalloc(struct btrfs_root *root,
2928 2947
2929 spin_lock(&info->lock); 2948 spin_lock(&info->lock);
2930 2949
2931 if (!info->flushing) { 2950 if (!info->flushing)
2932 info->flushing = 1; 2951 info->flushing = 1;
2933 init_waitqueue_head(&info->flush_wait); 2952 else
2934 } else {
2935 wait = true; 2953 wait = true;
2936 }
2937 2954
2938 spin_unlock(&info->lock); 2955 spin_unlock(&info->lock);
2939 2956
@@ -2956,8 +2973,8 @@ static void flush_delalloc(struct btrfs_root *root,
2956 return; 2973 return;
2957 2974
2958flush: 2975flush:
2959 btrfs_start_delalloc_inodes(root); 2976 btrfs_start_delalloc_inodes(root, 0);
2960 btrfs_wait_ordered_extents(root, 0); 2977 btrfs_wait_ordered_extents(root, 0, 0);
2961 2978
2962 spin_lock(&info->lock); 2979 spin_lock(&info->lock);
2963 info->flushing = 0; 2980 info->flushing = 0;
@@ -2995,7 +3012,6 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2995 if (!info->allocating_chunk) { 3012 if (!info->allocating_chunk) {
2996 info->force_alloc = 1; 3013 info->force_alloc = 1;
2997 info->allocating_chunk = 1; 3014 info->allocating_chunk = 1;
2998 init_waitqueue_head(&info->allocate_wait);
2999 } else { 3015 } else {
3000 wait = true; 3016 wait = true;
3001 } 3017 }
@@ -3095,7 +3111,7 @@ again:
3095 return -ENOSPC; 3111 return -ENOSPC;
3096 } 3112 }
3097 3113
3098 BTRFS_I(inode)->reserved_extents++; 3114 BTRFS_I(inode)->reserved_extents += num_items;
3099 check_force_delalloc(meta_sinfo); 3115 check_force_delalloc(meta_sinfo);
3100 spin_unlock(&meta_sinfo->lock); 3116 spin_unlock(&meta_sinfo->lock);
3101 3117
@@ -3219,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3219 u64 bytes) 3235 u64 bytes)
3220{ 3236{
3221 struct btrfs_space_info *data_sinfo; 3237 struct btrfs_space_info *data_sinfo;
3222 int ret = 0, committed = 0; 3238 u64 used;
3239 int ret = 0, committed = 0, flushed = 0;
3223 3240
3224 /* make sure bytes are sectorsize aligned */ 3241 /* make sure bytes are sectorsize aligned */
3225 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3242 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -3231,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3231again: 3248again:
3232 /* make sure we have enough space to handle the data first */ 3249 /* make sure we have enough space to handle the data first */
3233 spin_lock(&data_sinfo->lock); 3250 spin_lock(&data_sinfo->lock);
3234 if (data_sinfo->total_bytes - data_sinfo->bytes_used - 3251 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc +
3235 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - 3252 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned +
3236 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - 3253 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use +
3237 data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { 3254 data_sinfo->bytes_super;
3255
3256 if (used + bytes > data_sinfo->total_bytes) {
3238 struct btrfs_trans_handle *trans; 3257 struct btrfs_trans_handle *trans;
3239 3258
3259 if (!flushed) {
3260 spin_unlock(&data_sinfo->lock);
3261 flush_delalloc(root, data_sinfo);
3262 flushed = 1;
3263 goto again;
3264 }
3265
3240 /* 3266 /*
3241 * if we don't have enough free bytes in this space then we need 3267 * if we don't have enough free bytes in this space then we need
3242 * to alloc a new chunk. 3268 * to alloc a new chunk.
@@ -3454,14 +3480,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
3454 else 3480 else
3455 old_val -= num_bytes; 3481 old_val -= num_bytes;
3456 btrfs_set_super_bytes_used(&info->super_copy, old_val); 3482 btrfs_set_super_bytes_used(&info->super_copy, old_val);
3457
3458 /* block accounting for root item */
3459 old_val = btrfs_root_used(&root->root_item);
3460 if (alloc)
3461 old_val += num_bytes;
3462 else
3463 old_val -= num_bytes;
3464 btrfs_set_root_used(&root->root_item, old_val);
3465 spin_unlock(&info->delalloc_lock); 3483 spin_unlock(&info->delalloc_lock);
3466 3484
3467 while (total) { 3485 while (total) {
@@ -4049,6 +4067,21 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
4049 return ret; 4067 return ret;
4050} 4068}
4051 4069
4070int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4071 struct btrfs_root *root,
4072 u64 bytenr, u32 blocksize,
4073 u64 parent, u64 root_objectid, int level)
4074{
4075 u64 used;
4076 spin_lock(&root->node_lock);
4077 used = btrfs_root_used(&root->root_item) - blocksize;
4078 btrfs_set_root_used(&root->root_item, used);
4079 spin_unlock(&root->node_lock);
4080
4081 return btrfs_free_extent(trans, root, bytenr, blocksize,
4082 parent, root_objectid, level, 0);
4083}
4084
4052static u64 stripe_align(struct btrfs_root *root, u64 val) 4085static u64 stripe_align(struct btrfs_root *root, u64 val)
4053{ 4086{
4054 u64 mask = ((u64)root->stripesize - 1); 4087 u64 mask = ((u64)root->stripesize - 1);
@@ -4147,6 +4180,10 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4147 ins->offset = 0; 4180 ins->offset = 0;
4148 4181
4149 space_info = __find_space_info(root->fs_info, data); 4182 space_info = __find_space_info(root->fs_info, data);
4183 if (!space_info) {
4184 printk(KERN_ERR "No space info for %d\n", data);
4185 return -ENOSPC;
4186 }
4150 4187
4151 if (orig_root->ref_cows || empty_size) 4188 if (orig_root->ref_cows || empty_size)
4152 allowed_chunk_alloc = 1; 4189 allowed_chunk_alloc = 1;
@@ -4212,7 +4249,7 @@ search:
4212 u64 offset; 4249 u64 offset;
4213 int cached; 4250 int cached;
4214 4251
4215 atomic_inc(&block_group->count); 4252 btrfs_get_block_group(block_group);
4216 search_start = block_group->key.objectid; 4253 search_start = block_group->key.objectid;
4217 4254
4218have_block_group: 4255have_block_group:
@@ -4300,7 +4337,7 @@ have_block_group:
4300 4337
4301 btrfs_put_block_group(block_group); 4338 btrfs_put_block_group(block_group);
4302 block_group = last_ptr->block_group; 4339 block_group = last_ptr->block_group;
4303 atomic_inc(&block_group->count); 4340 btrfs_get_block_group(block_group);
4304 spin_unlock(&last_ptr->lock); 4341 spin_unlock(&last_ptr->lock);
4305 spin_unlock(&last_ptr->refill_lock); 4342 spin_unlock(&last_ptr->refill_lock);
4306 4343
@@ -4578,7 +4615,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4578{ 4615{
4579 int ret; 4616 int ret;
4580 u64 search_start = 0; 4617 u64 search_start = 0;
4581 struct btrfs_fs_info *info = root->fs_info;
4582 4618
4583 data = btrfs_get_alloc_profile(root, data); 4619 data = btrfs_get_alloc_profile(root, data);
4584again: 4620again:
@@ -4586,17 +4622,9 @@ again:
4586 * the only place that sets empty_size is btrfs_realloc_node, which 4622 * the only place that sets empty_size is btrfs_realloc_node, which
4587 * is not called recursively on allocations 4623 * is not called recursively on allocations
4588 */ 4624 */
4589 if (empty_size || root->ref_cows) { 4625 if (empty_size || root->ref_cows)
4590 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
4591 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4592 2 * 1024 * 1024,
4593 BTRFS_BLOCK_GROUP_METADATA |
4594 (info->metadata_alloc_profile &
4595 info->avail_metadata_alloc_bits), 0);
4596 }
4597 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 4626 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4598 num_bytes + 2 * 1024 * 1024, data, 0); 4627 num_bytes + 2 * 1024 * 1024, data, 0);
4599 }
4600 4628
4601 WARN_ON(num_bytes < root->sectorsize); 4629 WARN_ON(num_bytes < root->sectorsize);
4602 ret = find_free_extent(trans, root, num_bytes, empty_size, 4630 ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -4897,6 +4925,14 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
4897 extent_op); 4925 extent_op);
4898 BUG_ON(ret); 4926 BUG_ON(ret);
4899 } 4927 }
4928
4929 if (root_objectid == root->root_key.objectid) {
4930 u64 used;
4931 spin_lock(&root->node_lock);
4932 used = btrfs_root_used(&root->root_item) + num_bytes;
4933 btrfs_set_root_used(&root->root_item, used);
4934 spin_unlock(&root->node_lock);
4935 }
4900 return ret; 4936 return ret;
4901} 4937}
4902 4938
@@ -4919,8 +4955,16 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4919 btrfs_set_buffer_uptodate(buf); 4955 btrfs_set_buffer_uptodate(buf);
4920 4956
4921 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4957 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4922 set_extent_dirty(&root->dirty_log_pages, buf->start, 4958 /*
4923 buf->start + buf->len - 1, GFP_NOFS); 4959 * we allow two log transactions at a time, use different
4960 * EXENT bit to differentiate dirty pages.
4961 */
4962 if (root->log_transid % 2 == 0)
4963 set_extent_dirty(&root->dirty_log_pages, buf->start,
4964 buf->start + buf->len - 1, GFP_NOFS);
4965 else
4966 set_extent_new(&root->dirty_log_pages, buf->start,
4967 buf->start + buf->len - 1, GFP_NOFS);
4924 } else { 4968 } else {
4925 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 4969 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4926 buf->start + buf->len - 1, GFP_NOFS); 4970 buf->start + buf->len - 1, GFP_NOFS);
@@ -5175,6 +5219,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5175 next = btrfs_find_tree_block(root, bytenr, blocksize); 5219 next = btrfs_find_tree_block(root, bytenr, blocksize);
5176 if (!next) { 5220 if (!next) {
5177 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 5221 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5222 if (!next)
5223 return -ENOMEM;
5178 reada = 1; 5224 reada = 1;
5179 } 5225 }
5180 btrfs_tree_lock(next); 5226 btrfs_tree_lock(next);
@@ -5372,10 +5418,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5372 int ret; 5418 int ret;
5373 5419
5374 while (level >= 0) { 5420 while (level >= 0) {
5375 if (path->slots[level] >=
5376 btrfs_header_nritems(path->nodes[level]))
5377 break;
5378
5379 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5421 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5380 if (ret > 0) 5422 if (ret > 0)
5381 break; 5423 break;
@@ -5383,11 +5425,16 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5383 if (level == 0) 5425 if (level == 0)
5384 break; 5426 break;
5385 5427
5428 if (path->slots[level] >=
5429 btrfs_header_nritems(path->nodes[level]))
5430 break;
5431
5386 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5432 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5387 if (ret > 0) { 5433 if (ret > 0) {
5388 path->slots[level]++; 5434 path->slots[level]++;
5389 continue; 5435 continue;
5390 } 5436 } else if (ret < 0)
5437 return ret;
5391 level = wc->level; 5438 level = wc->level;
5392 } 5439 }
5393 return 0; 5440 return 0;
@@ -6531,6 +6578,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6531 struct btrfs_key key; 6578 struct btrfs_key key;
6532 struct inode *inode = NULL; 6579 struct inode *inode = NULL;
6533 struct btrfs_file_extent_item *fi; 6580 struct btrfs_file_extent_item *fi;
6581 struct extent_state *cached_state = NULL;
6534 u64 num_bytes; 6582 u64 num_bytes;
6535 u64 skip_objectid = 0; 6583 u64 skip_objectid = 0;
6536 u32 nritems; 6584 u32 nritems;
@@ -6559,12 +6607,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6559 } 6607 }
6560 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 6608 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6561 6609
6562 lock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6610 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
6563 key.offset + num_bytes - 1, GFP_NOFS); 6611 key.offset + num_bytes - 1, 0, &cached_state,
6612 GFP_NOFS);
6564 btrfs_drop_extent_cache(inode, key.offset, 6613 btrfs_drop_extent_cache(inode, key.offset,
6565 key.offset + num_bytes - 1, 1); 6614 key.offset + num_bytes - 1, 1);
6566 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6615 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
6567 key.offset + num_bytes - 1, GFP_NOFS); 6616 key.offset + num_bytes - 1, &cached_state,
6617 GFP_NOFS);
6568 cond_resched(); 6618 cond_resched();
6569 } 6619 }
6570 iput(inode); 6620 iput(inode);
@@ -7336,7 +7386,6 @@ static int find_first_block_group(struct btrfs_root *root,
7336 } 7386 }
7337 path->slots[0]++; 7387 path->slots[0]++;
7338 } 7388 }
7339 ret = -ENOENT;
7340out: 7389out:
7341 return ret; 7390 return ret;
7342} 7391}
@@ -7373,9 +7422,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7373 wait_block_group_cache_done(block_group); 7422 wait_block_group_cache_done(block_group);
7374 7423
7375 btrfs_remove_free_space_cache(block_group); 7424 btrfs_remove_free_space_cache(block_group);
7376 7425 btrfs_put_block_group(block_group);
7377 WARN_ON(atomic_read(&block_group->count) != 1);
7378 kfree(block_group);
7379 7426
7380 spin_lock(&info->block_group_cache_lock); 7427 spin_lock(&info->block_group_cache_lock);
7381 } 7428 }