diff options
author | Naohiro Aota <naohiro.aota@wdc.com> | 2019-07-26 03:47:05 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2019-07-26 06:21:22 -0400 |
commit | a3b46b86ca76d7f9d487e6a0b594fd1984e0796e (patch) | |
tree | efab271e267efffe207b474b9959af63152f683d | |
parent | 6e7ca09b583de4be6c27d9d4b06e8c5dd46a58fa (diff) |
btrfs: fix extent_state leak in btrfs_lock_and_flush_ordered_range
btrfs_lock_and_flush_ordered_range() loads given "*cached_state" into
cachedp, which, in general, is NULL. Then, lock_extent_bits() updates
"cachedp", but it never goes backs to the caller. Thus the caller still
see its "cached_state" to be NULL and never free the state allocated
under btrfs_lock_and_flush_ordered_range(). As a result, we will
see massive state leak with e.g. fstests btrfs/005. Fix this bug by
properly handling the pointers.
Fixes: bd80d94efb83 ("btrfs: Always use a cached extent_state in btrfs_lock_and_flush_ordered_range")
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r-- | fs/btrfs/ordered-data.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 1744ba8b2754..ae7f64a8facb 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -985,13 +985,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, | |||
985 | struct extent_state **cached_state) | 985 | struct extent_state **cached_state) |
986 | { | 986 | { |
987 | struct btrfs_ordered_extent *ordered; | 987 | struct btrfs_ordered_extent *ordered; |
988 | struct extent_state *cachedp = NULL; | 988 | struct extent_state *cache = NULL; |
989 | struct extent_state **cachedp = &cache; | ||
989 | 990 | ||
990 | if (cached_state) | 991 | if (cached_state) |
991 | cachedp = *cached_state; | 992 | cachedp = cached_state; |
992 | 993 | ||
993 | while (1) { | 994 | while (1) { |
994 | lock_extent_bits(tree, start, end, &cachedp); | 995 | lock_extent_bits(tree, start, end, cachedp); |
995 | ordered = btrfs_lookup_ordered_range(inode, start, | 996 | ordered = btrfs_lookup_ordered_range(inode, start, |
996 | end - start + 1); | 997 | end - start + 1); |
997 | if (!ordered) { | 998 | if (!ordered) { |
@@ -1001,10 +1002,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, | |||
1001 | * aren't exposing it outside of this function | 1002 | * aren't exposing it outside of this function |
1002 | */ | 1003 | */ |
1003 | if (!cached_state) | 1004 | if (!cached_state) |
1004 | refcount_dec(&cachedp->refs); | 1005 | refcount_dec(&cache->refs); |
1005 | break; | 1006 | break; |
1006 | } | 1007 | } |
1007 | unlock_extent_cached(tree, start, end, &cachedp); | 1008 | unlock_extent_cached(tree, start, end, cachedp); |
1008 | btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); | 1009 | btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); |
1009 | btrfs_put_ordered_extent(ordered); | 1010 | btrfs_put_ordered_extent(ordered); |
1010 | } | 1011 | } |