aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-06-20 14:53:48 -0400
committerJosef Bacik <josef@redhat.com>2011-07-11 10:00:48 -0400
commitdf98b6e2c52f65665eaf0fc23e647fb64335b289 (patch)
tree0f7e235f8f51d87928266129392e630524847be5 /fs/btrfs
parent2f356126c589d562f98e2287f9c7b983388dc62f (diff)
Btrfs: fix how we merge extent states and deal with cached states
First, we can sometimes free the state we're merging, which means anybody who calls merge_state() may have the state it passed in free'ed. This is problematic because we could end up caching the state, which makes caching useless as the state will no longer be part of the tree. So instead of free'ing the state we passed into merge_state(), set it's end to the other->end and free the other state. This way we are sure to cache the correct state. Also because we can merge states together, instead of only using the cache'd state if it's start == the start we are looking for, go ahead and use it if the start we are looking for is within the range of the cached state. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent_io.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b181a94a7170..abb922daf1b6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -279,11 +279,10 @@ static int merge_state(struct extent_io_tree *tree,
279 if (other->start == state->end + 1 && 279 if (other->start == state->end + 1 &&
280 other->state == state->state) { 280 other->state == state->state) {
281 merge_cb(tree, state, other); 281 merge_cb(tree, state, other);
282 other->start = state->start; 282 state->end = other->end;
283 state->tree = NULL; 283 other->tree = NULL;
284 rb_erase(&state->rb_node, &tree->state); 284 rb_erase(&other->rb_node, &tree->state);
285 free_extent_state(state); 285 free_extent_state(other);
286 state = NULL;
287 } 286 }
288 } 287 }
289 288
@@ -349,7 +348,6 @@ static int insert_state(struct extent_io_tree *tree,
349 "%llu %llu\n", (unsigned long long)found->start, 348 "%llu %llu\n", (unsigned long long)found->start,
350 (unsigned long long)found->end, 349 (unsigned long long)found->end,
351 (unsigned long long)start, (unsigned long long)end); 350 (unsigned long long)start, (unsigned long long)end);
352 free_extent_state(state);
353 return -EEXIST; 351 return -EEXIST;
354 } 352 }
355 state->tree = tree; 353 state->tree = tree;
@@ -498,7 +496,8 @@ again:
498 cached_state = NULL; 496 cached_state = NULL;
499 } 497 }
500 498
501 if (cached && cached->tree && cached->start == start) { 499 if (cached && cached->tree && cached->start <= start &&
500 cached->end > start) {
502 if (clear) 501 if (clear)
503 atomic_dec(&cached->refs); 502 atomic_dec(&cached->refs);
504 state = cached; 503 state = cached;
@@ -740,7 +739,8 @@ again:
740 spin_lock(&tree->lock); 739 spin_lock(&tree->lock);
741 if (cached_state && *cached_state) { 740 if (cached_state && *cached_state) {
742 state = *cached_state; 741 state = *cached_state;
743 if (state->start == start && state->tree) { 742 if (state->start <= start && state->end > start &&
743 state->tree) {
744 node = &state->rb_node; 744 node = &state->rb_node;
745 goto hit_next; 745 goto hit_next;
746 } 746 }
@@ -781,13 +781,13 @@ hit_next:
781 if (err) 781 if (err)
782 goto out; 782 goto out;
783 783
784 next_node = rb_next(node);
785 cache_state(state, cached_state); 784 cache_state(state, cached_state);
786 merge_state(tree, state); 785 merge_state(tree, state);
787 if (last_end == (u64)-1) 786 if (last_end == (u64)-1)
788 goto out; 787 goto out;
789 788
790 start = last_end + 1; 789 start = last_end + 1;
790 next_node = rb_next(&state->rb_node);
791 if (next_node && start < end && prealloc && !need_resched()) { 791 if (next_node && start < end && prealloc && !need_resched()) {
792 state = rb_entry(next_node, struct extent_state, 792 state = rb_entry(next_node, struct extent_state,
793 rb_node); 793 rb_node);
@@ -860,7 +860,6 @@ hit_next:
860 * Avoid to free 'prealloc' if it can be merged with 860 * Avoid to free 'prealloc' if it can be merged with
861 * the later extent. 861 * the later extent.
862 */ 862 */
863 atomic_inc(&prealloc->refs);
864 err = insert_state(tree, prealloc, start, this_end, 863 err = insert_state(tree, prealloc, start, this_end,
865 &bits); 864 &bits);
866 BUG_ON(err == -EEXIST); 865 BUG_ON(err == -EEXIST);
@@ -870,7 +869,6 @@ hit_next:
870 goto out; 869 goto out;
871 } 870 }
872 cache_state(prealloc, cached_state); 871 cache_state(prealloc, cached_state);
873 free_extent_state(prealloc);
874 prealloc = NULL; 872 prealloc = NULL;
875 start = this_end + 1; 873 start = this_end + 1;
876 goto search_again; 874 goto search_again;
@@ -1562,7 +1560,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1562 int bitset = 0; 1560 int bitset = 0;
1563 1561
1564 spin_lock(&tree->lock); 1562 spin_lock(&tree->lock);
1565 if (cached && cached->tree && cached->start == start) 1563 if (cached && cached->tree && cached->start <= start &&
1564 cached->end > start)
1566 node = &cached->rb_node; 1565 node = &cached->rb_node;
1567 else 1566 else
1568 node = tree_search(tree, start); 1567 node = tree_search(tree, start);