aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c95
1 files changed, 60 insertions, 35 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 56e50137d0e6..b34d32fdaaec 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -22,6 +22,7 @@
22#include <linux/sort.h> 22#include <linux/sort.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/slab.h>
25#include "compat.h" 26#include "compat.h"
26#include "hash.h" 27#include "hash.h"
27#include "ctree.h" 28#include "ctree.h"
@@ -83,6 +84,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
83 return (cache->flags & bits) == bits; 84 return (cache->flags & bits) == bits;
84} 85}
85 86
87void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
88{
89 atomic_inc(&cache->count);
90}
91
92void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
93{
94 if (atomic_dec_and_test(&cache->count))
95 kfree(cache);
96}
97
86/* 98/*
87 * this adds the block group to the fs_info rb tree for the block group 99 * this adds the block group to the fs_info rb tree for the block group
88 * cache 100 * cache
@@ -156,7 +168,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
156 } 168 }
157 } 169 }
158 if (ret) 170 if (ret)
159 atomic_inc(&ret->count); 171 btrfs_get_block_group(ret);
160 spin_unlock(&info->block_group_cache_lock); 172 spin_unlock(&info->block_group_cache_lock);
161 173
162 return ret; 174 return ret;
@@ -407,6 +419,8 @@ err:
407 419
408 put_caching_control(caching_ctl); 420 put_caching_control(caching_ctl);
409 atomic_dec(&block_group->space_info->caching_threads); 421 atomic_dec(&block_group->space_info->caching_threads);
422 btrfs_put_block_group(block_group);
423
410 return 0; 424 return 0;
411} 425}
412 426
@@ -447,6 +461,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
447 up_write(&fs_info->extent_commit_sem); 461 up_write(&fs_info->extent_commit_sem);
448 462
449 atomic_inc(&cache->space_info->caching_threads); 463 atomic_inc(&cache->space_info->caching_threads);
464 btrfs_get_block_group(cache);
450 465
451 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 466 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
452 cache->key.objectid); 467 cache->key.objectid);
@@ -486,12 +501,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
486 return cache; 501 return cache;
487} 502}
488 503
489void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
490{
491 if (atomic_dec_and_test(&cache->count))
492 kfree(cache);
493}
494
495static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 504static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
496 u64 flags) 505 u64 flags)
497{ 506{
@@ -2582,7 +2591,7 @@ next_block_group(struct btrfs_root *root,
2582 if (node) { 2591 if (node) {
2583 cache = rb_entry(node, struct btrfs_block_group_cache, 2592 cache = rb_entry(node, struct btrfs_block_group_cache,
2584 cache_node); 2593 cache_node);
2585 atomic_inc(&cache->count); 2594 btrfs_get_block_group(cache);
2586 } else 2595 } else
2587 cache = NULL; 2596 cache = NULL;
2588 spin_unlock(&root->fs_info->block_group_cache_lock); 2597 spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -2668,6 +2677,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2668 2677
2669 INIT_LIST_HEAD(&found->block_groups); 2678 INIT_LIST_HEAD(&found->block_groups);
2670 init_rwsem(&found->groups_sem); 2679 init_rwsem(&found->groups_sem);
2680 init_waitqueue_head(&found->flush_wait);
2681 init_waitqueue_head(&found->allocate_wait);
2671 spin_lock_init(&found->lock); 2682 spin_lock_init(&found->lock);
2672 found->flags = flags; 2683 found->flags = flags;
2673 found->total_bytes = total_bytes; 2684 found->total_bytes = total_bytes;
@@ -2838,7 +2849,7 @@ int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2838 } 2849 }
2839 spin_unlock(&BTRFS_I(inode)->accounting_lock); 2850 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2840 2851
2841 BTRFS_I(inode)->reserved_extents--; 2852 BTRFS_I(inode)->reserved_extents -= num_items;
2842 BUG_ON(BTRFS_I(inode)->reserved_extents < 0); 2853 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2843 2854
2844 if (meta_sinfo->bytes_delalloc < num_bytes) { 2855 if (meta_sinfo->bytes_delalloc < num_bytes) {
@@ -2936,12 +2947,10 @@ static void flush_delalloc(struct btrfs_root *root,
2936 2947
2937 spin_lock(&info->lock); 2948 spin_lock(&info->lock);
2938 2949
2939 if (!info->flushing) { 2950 if (!info->flushing)
2940 info->flushing = 1; 2951 info->flushing = 1;
2941 init_waitqueue_head(&info->flush_wait); 2952 else
2942 } else {
2943 wait = true; 2953 wait = true;
2944 }
2945 2954
2946 spin_unlock(&info->lock); 2955 spin_unlock(&info->lock);
2947 2956
@@ -3003,7 +3012,6 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
3003 if (!info->allocating_chunk) { 3012 if (!info->allocating_chunk) {
3004 info->force_alloc = 1; 3013 info->force_alloc = 1;
3005 info->allocating_chunk = 1; 3014 info->allocating_chunk = 1;
3006 init_waitqueue_head(&info->allocate_wait);
3007 } else { 3015 } else {
3008 wait = true; 3016 wait = true;
3009 } 3017 }
@@ -3103,7 +3111,7 @@ again:
3103 return -ENOSPC; 3111 return -ENOSPC;
3104 } 3112 }
3105 3113
3106 BTRFS_I(inode)->reserved_extents++; 3114 BTRFS_I(inode)->reserved_extents += num_items;
3107 check_force_delalloc(meta_sinfo); 3115 check_force_delalloc(meta_sinfo);
3108 spin_unlock(&meta_sinfo->lock); 3116 spin_unlock(&meta_sinfo->lock);
3109 3117
@@ -3227,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3227 u64 bytes) 3235 u64 bytes)
3228{ 3236{
3229 struct btrfs_space_info *data_sinfo; 3237 struct btrfs_space_info *data_sinfo;
3230 int ret = 0, committed = 0; 3238 u64 used;
3239 int ret = 0, committed = 0, flushed = 0;
3231 3240
3232 /* make sure bytes are sectorsize aligned */ 3241 /* make sure bytes are sectorsize aligned */
3233 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3242 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -3239,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3239again: 3248again:
3240 /* make sure we have enough space to handle the data first */ 3249 /* make sure we have enough space to handle the data first */
3241 spin_lock(&data_sinfo->lock); 3250 spin_lock(&data_sinfo->lock);
3242 if (data_sinfo->total_bytes - data_sinfo->bytes_used - 3251 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc +
3243 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - 3252 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned +
3244 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - 3253 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use +
3245 data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { 3254 data_sinfo->bytes_super;
3255
3256 if (used + bytes > data_sinfo->total_bytes) {
3246 struct btrfs_trans_handle *trans; 3257 struct btrfs_trans_handle *trans;
3247 3258
3259 if (!flushed) {
3260 spin_unlock(&data_sinfo->lock);
3261 flush_delalloc(root, data_sinfo);
3262 flushed = 1;
3263 goto again;
3264 }
3265
3248 /* 3266 /*
3249 * if we don't have enough free bytes in this space then we need 3267 * if we don't have enough free bytes in this space then we need
3250 * to alloc a new chunk. 3268 * to alloc a new chunk.
@@ -4162,6 +4180,10 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4162 ins->offset = 0; 4180 ins->offset = 0;
4163 4181
4164 space_info = __find_space_info(root->fs_info, data); 4182 space_info = __find_space_info(root->fs_info, data);
4183 if (!space_info) {
4184 printk(KERN_ERR "No space info for %d\n", data);
4185 return -ENOSPC;
4186 }
4165 4187
4166 if (orig_root->ref_cows || empty_size) 4188 if (orig_root->ref_cows || empty_size)
4167 allowed_chunk_alloc = 1; 4189 allowed_chunk_alloc = 1;
@@ -4227,7 +4249,7 @@ search:
4227 u64 offset; 4249 u64 offset;
4228 int cached; 4250 int cached;
4229 4251
4230 atomic_inc(&block_group->count); 4252 btrfs_get_block_group(block_group);
4231 search_start = block_group->key.objectid; 4253 search_start = block_group->key.objectid;
4232 4254
4233have_block_group: 4255have_block_group:
@@ -4315,7 +4337,7 @@ have_block_group:
4315 4337
4316 btrfs_put_block_group(block_group); 4338 btrfs_put_block_group(block_group);
4317 block_group = last_ptr->block_group; 4339 block_group = last_ptr->block_group;
4318 atomic_inc(&block_group->count); 4340 btrfs_get_block_group(block_group);
4319 spin_unlock(&last_ptr->lock); 4341 spin_unlock(&last_ptr->lock);
4320 spin_unlock(&last_ptr->refill_lock); 4342 spin_unlock(&last_ptr->refill_lock);
4321 4343
@@ -5197,6 +5219,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5197 next = btrfs_find_tree_block(root, bytenr, blocksize); 5219 next = btrfs_find_tree_block(root, bytenr, blocksize);
5198 if (!next) { 5220 if (!next) {
5199 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 5221 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5222 if (!next)
5223 return -ENOMEM;
5200 reada = 1; 5224 reada = 1;
5201 } 5225 }
5202 btrfs_tree_lock(next); 5226 btrfs_tree_lock(next);
@@ -5394,10 +5418,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5394 int ret; 5418 int ret;
5395 5419
5396 while (level >= 0) { 5420 while (level >= 0) {
5397 if (path->slots[level] >=
5398 btrfs_header_nritems(path->nodes[level]))
5399 break;
5400
5401 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5421 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5402 if (ret > 0) 5422 if (ret > 0)
5403 break; 5423 break;
@@ -5405,11 +5425,16 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5405 if (level == 0) 5425 if (level == 0)
5406 break; 5426 break;
5407 5427
5428 if (path->slots[level] >=
5429 btrfs_header_nritems(path->nodes[level]))
5430 break;
5431
5408 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5432 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5409 if (ret > 0) { 5433 if (ret > 0) {
5410 path->slots[level]++; 5434 path->slots[level]++;
5411 continue; 5435 continue;
5412 } 5436 } else if (ret < 0)
5437 return ret;
5413 level = wc->level; 5438 level = wc->level;
5414 } 5439 }
5415 return 0; 5440 return 0;
@@ -6553,6 +6578,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6553 struct btrfs_key key; 6578 struct btrfs_key key;
6554 struct inode *inode = NULL; 6579 struct inode *inode = NULL;
6555 struct btrfs_file_extent_item *fi; 6580 struct btrfs_file_extent_item *fi;
6581 struct extent_state *cached_state = NULL;
6556 u64 num_bytes; 6582 u64 num_bytes;
6557 u64 skip_objectid = 0; 6583 u64 skip_objectid = 0;
6558 u32 nritems; 6584 u32 nritems;
@@ -6581,12 +6607,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6581 } 6607 }
6582 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 6608 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6583 6609
6584 lock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6610 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
6585 key.offset + num_bytes - 1, GFP_NOFS); 6611 key.offset + num_bytes - 1, 0, &cached_state,
6612 GFP_NOFS);
6586 btrfs_drop_extent_cache(inode, key.offset, 6613 btrfs_drop_extent_cache(inode, key.offset,
6587 key.offset + num_bytes - 1, 1); 6614 key.offset + num_bytes - 1, 1);
6588 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6615 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
6589 key.offset + num_bytes - 1, GFP_NOFS); 6616 key.offset + num_bytes - 1, &cached_state,
6617 GFP_NOFS);
6590 cond_resched(); 6618 cond_resched();
6591 } 6619 }
6592 iput(inode); 6620 iput(inode);
@@ -7358,7 +7386,6 @@ static int find_first_block_group(struct btrfs_root *root,
7358 } 7386 }
7359 path->slots[0]++; 7387 path->slots[0]++;
7360 } 7388 }
7361 ret = -ENOENT;
7362out: 7389out:
7363 return ret; 7390 return ret;
7364} 7391}
@@ -7395,9 +7422,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7395 wait_block_group_cache_done(block_group); 7422 wait_block_group_cache_done(block_group);
7396 7423
7397 btrfs_remove_free_space_cache(block_group); 7424 btrfs_remove_free_space_cache(block_group);
7398 7425 btrfs_put_block_group(block_group);
7399 WARN_ON(atomic_read(&block_group->count) != 1);
7400 kfree(block_group);
7401 7426
7402 spin_lock(&info->block_group_cache_lock); 7427 spin_lock(&info->block_group_cache_lock);
7403 } 7428 }