diff options
author | Josef Bacik <josef@redhat.com> | 2009-11-13 15:12:59 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2010-01-17 20:40:30 -0500 |
commit | 11dfe35a0108097f2df1f042c485fa7f758c2cdf (patch) | |
tree | 83d5af6992db15ee61bc0c960626c378a2f5b436 | |
parent | a9cc71a60c29a09174bee2fcef8f924c529fd4b7 (diff) |
Btrfs: fix possible panic on unmount
We can race with the unmount of an fs and the stopping of a kthread where we
will free the block group before we're done using it. The reason for this is
because we do not hold a reference on the block group while its caching, since
the allocator drops its reference once it exits or moves on to the next block
group. This patch fixes the problem by taking a reference to the block group
before we start caching and dropping it when we're done to make sure all
accesses to the block group are safe. Thanks,
Signed-off-by: Josef Bacik <josef@redhat.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r-- | fs/btrfs/extent-tree.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 56e50137d0e6..432a2da4641e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | |||
83 | return (cache->flags & bits) == bits; | 83 | return (cache->flags & bits) == bits; |
84 | } | 84 | } |
85 | 85 | ||
86 | void btrfs_get_block_group(struct btrfs_block_group_cache *cache) | ||
87 | { | ||
88 | atomic_inc(&cache->count); | ||
89 | } | ||
90 | |||
91 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | ||
92 | { | ||
93 | if (atomic_dec_and_test(&cache->count)) | ||
94 | kfree(cache); | ||
95 | } | ||
96 | |||
86 | /* | 97 | /* |
87 | * this adds the block group to the fs_info rb tree for the block group | 98 | * this adds the block group to the fs_info rb tree for the block group |
88 | * cache | 99 | * cache |
@@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, | |||
156 | } | 167 | } |
157 | } | 168 | } |
158 | if (ret) | 169 | if (ret) |
159 | atomic_inc(&ret->count); | 170 | btrfs_get_block_group(ret); |
160 | spin_unlock(&info->block_group_cache_lock); | 171 | spin_unlock(&info->block_group_cache_lock); |
161 | 172 | ||
162 | return ret; | 173 | return ret; |
@@ -407,6 +418,8 @@ err: | |||
407 | 418 | ||
408 | put_caching_control(caching_ctl); | 419 | put_caching_control(caching_ctl); |
409 | atomic_dec(&block_group->space_info->caching_threads); | 420 | atomic_dec(&block_group->space_info->caching_threads); |
421 | btrfs_put_block_group(block_group); | ||
422 | |||
410 | return 0; | 423 | return 0; |
411 | } | 424 | } |
412 | 425 | ||
@@ -447,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache) | |||
447 | up_write(&fs_info->extent_commit_sem); | 460 | up_write(&fs_info->extent_commit_sem); |
448 | 461 | ||
449 | atomic_inc(&cache->space_info->caching_threads); | 462 | atomic_inc(&cache->space_info->caching_threads); |
463 | btrfs_get_block_group(cache); | ||
450 | 464 | ||
451 | tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", | 465 | tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", |
452 | cache->key.objectid); | 466 | cache->key.objectid); |
@@ -486,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group( | |||
486 | return cache; | 500 | return cache; |
487 | } | 501 | } |
488 | 502 | ||
489 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | ||
490 | { | ||
491 | if (atomic_dec_and_test(&cache->count)) | ||
492 | kfree(cache); | ||
493 | } | ||
494 | |||
495 | static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, | 503 | static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, |
496 | u64 flags) | 504 | u64 flags) |
497 | { | 505 | { |
@@ -2582,7 +2590,7 @@ next_block_group(struct btrfs_root *root, | |||
2582 | if (node) { | 2590 | if (node) { |
2583 | cache = rb_entry(node, struct btrfs_block_group_cache, | 2591 | cache = rb_entry(node, struct btrfs_block_group_cache, |
2584 | cache_node); | 2592 | cache_node); |
2585 | atomic_inc(&cache->count); | 2593 | btrfs_get_block_group(cache); |
2586 | } else | 2594 | } else |
2587 | cache = NULL; | 2595 | cache = NULL; |
2588 | spin_unlock(&root->fs_info->block_group_cache_lock); | 2596 | spin_unlock(&root->fs_info->block_group_cache_lock); |
@@ -4227,7 +4235,7 @@ search: | |||
4227 | u64 offset; | 4235 | u64 offset; |
4228 | int cached; | 4236 | int cached; |
4229 | 4237 | ||
4230 | atomic_inc(&block_group->count); | 4238 | btrfs_get_block_group(block_group); |
4231 | search_start = block_group->key.objectid; | 4239 | search_start = block_group->key.objectid; |
4232 | 4240 | ||
4233 | have_block_group: | 4241 | have_block_group: |
@@ -4315,7 +4323,7 @@ have_block_group: | |||
4315 | 4323 | ||
4316 | btrfs_put_block_group(block_group); | 4324 | btrfs_put_block_group(block_group); |
4317 | block_group = last_ptr->block_group; | 4325 | block_group = last_ptr->block_group; |
4318 | atomic_inc(&block_group->count); | 4326 | btrfs_get_block_group(block_group); |
4319 | spin_unlock(&last_ptr->lock); | 4327 | spin_unlock(&last_ptr->lock); |
4320 | spin_unlock(&last_ptr->refill_lock); | 4328 | spin_unlock(&last_ptr->refill_lock); |
4321 | 4329 | ||
@@ -7395,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
7395 | wait_block_group_cache_done(block_group); | 7403 | wait_block_group_cache_done(block_group); |
7396 | 7404 | ||
7397 | btrfs_remove_free_space_cache(block_group); | 7405 | btrfs_remove_free_space_cache(block_group); |
7398 | 7406 | btrfs_put_block_group(block_group); | |
7399 | WARN_ON(atomic_read(&block_group->count) != 1); | ||
7400 | kfree(block_group); | ||
7401 | 7407 | ||
7402 | spin_lock(&info->block_group_cache_lock); | 7408 | spin_lock(&info->block_group_cache_lock); |
7403 | } | 7409 | } |