aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-04-20 02:45:49 -0400
committerChris Mason <chris.mason@oracle.com>2011-05-23 13:24:41 -0400
commitc7f895a2b2d1a002810d52e7b6653c9dc2fd0b0b (patch)
tree1b2a05c2efe1f169983bff7a497d812aa86c3000 /fs/btrfs/extent_io.c
parent8233767a227ac5843f1023b88c7272a7b5058f5f (diff)
Btrfs: fix unsafe usage of merge_state
merge_state can free the current state if it can be merged with the next node, but in set_extent_bit(), after merge_state, we still use the current extent to get the next node and cache it into cached_state Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9ccea86dd015..ebfff5b44752 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -780,20 +780,18 @@ hit_next:
780 if (err) 780 if (err)
781 goto out; 781 goto out;
782 782
783 next_node = rb_next(node);
783 cache_state(state, cached_state); 784 cache_state(state, cached_state);
784 merge_state(tree, state); 785 merge_state(tree, state);
785 if (last_end == (u64)-1) 786 if (last_end == (u64)-1)
786 goto out; 787 goto out;
787 788
788 start = last_end + 1; 789 start = last_end + 1;
789 if (start < end && prealloc && !need_resched()) { 790 if (next_node && start < end && prealloc && !need_resched()) {
790 next_node = rb_next(node); 791 state = rb_entry(next_node, struct extent_state,
791 if (next_node) { 792 rb_node);
792 state = rb_entry(next_node, struct extent_state, 793 if (state->start == start)
793 rb_node); 794 goto hit_next;
794 if (state->start == start)
795 goto hit_next;
796 }
797 } 795 }
798 goto search_again; 796 goto search_again;
799 } 797 }
@@ -856,14 +854,22 @@ hit_next:
856 854
857 prealloc = alloc_extent_state_atomic(prealloc); 855 prealloc = alloc_extent_state_atomic(prealloc);
858 BUG_ON(!prealloc); 856 BUG_ON(!prealloc);
857
858 /*
859 * Avoid to free 'prealloc' if it can be merged with
860 * the later extent.
861 */
862 atomic_inc(&prealloc->refs);
859 err = insert_state(tree, prealloc, start, this_end, 863 err = insert_state(tree, prealloc, start, this_end,
860 &bits); 864 &bits);
861 BUG_ON(err == -EEXIST); 865 BUG_ON(err == -EEXIST);
862 if (err) { 866 if (err) {
867 free_extent_state(prealloc);
863 prealloc = NULL; 868 prealloc = NULL;
864 goto out; 869 goto out;
865 } 870 }
866 cache_state(prealloc, cached_state); 871 cache_state(prealloc, cached_state);
872 free_extent_state(prealloc);
867 prealloc = NULL; 873 prealloc = NULL;
868 start = this_end + 1; 874 start = this_end + 1;
869 goto search_again; 875 goto search_again;