diff options
author | Liu Bo <bo.li.liu@oracle.com> | 2016-07-11 13:39:07 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2016-07-26 07:52:25 -0400 |
commit | baf863b9c29617cc9eaf24e039f58846e700db48 (patch) | |
tree | 916fe6a7c3967160de5d6d991dd6e1088953add8 /fs/btrfs | |
parent | f49070957ffed84feb7944550f7edd53672b5201 (diff) |
Btrfs: fix eb memory leak due to readpage failure
eb->io_pages is set in read_extent_buffer_pages().
In case of readpage failure, for pages that have been added to bio,
it calls bio_endio and later readpage_io_failed_hook() does the work.
When this eb's page (couldn't be the 1st page) fails to add itself to bio
due to failure in merge_bio(), it cannot decrease eb->io_pages via bio_endio,
and ends up with a memory leak eventually.
This lets __do_readpage propagate errors to callers and adds the
'atomic_dec(&eb->io_pages)'.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/extent_io.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ea12d0ee684f..dae2f8470e04 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2878,6 +2878,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, | |||
2878 | * into the tree that are removed when the IO is done (by the end_io | 2878 | * into the tree that are removed when the IO is done (by the end_io |
2879 | * handlers) | 2879 | * handlers) |
2880 | * XXX JDM: This needs looking at to ensure proper page locking | 2880 | * XXX JDM: This needs looking at to ensure proper page locking |
2881 | * return 0 on success, otherwise return error | ||
2881 | */ | 2882 | */ |
2882 | static int __do_readpage(struct extent_io_tree *tree, | 2883 | static int __do_readpage(struct extent_io_tree *tree, |
2883 | struct page *page, | 2884 | struct page *page, |
@@ -2899,7 +2900,7 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2899 | sector_t sector; | 2900 | sector_t sector; |
2900 | struct extent_map *em; | 2901 | struct extent_map *em; |
2901 | struct block_device *bdev; | 2902 | struct block_device *bdev; |
2902 | int ret; | 2903 | int ret = 0; |
2903 | int nr = 0; | 2904 | int nr = 0; |
2904 | size_t pg_offset = 0; | 2905 | size_t pg_offset = 0; |
2905 | size_t iosize; | 2906 | size_t iosize; |
@@ -3080,6 +3081,7 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
3080 | } else { | 3081 | } else { |
3081 | SetPageError(page); | 3082 | SetPageError(page); |
3082 | unlock_extent(tree, cur, cur + iosize - 1); | 3083 | unlock_extent(tree, cur, cur + iosize - 1); |
3084 | goto out; | ||
3083 | } | 3085 | } |
3084 | cur = cur + iosize; | 3086 | cur = cur + iosize; |
3085 | pg_offset += iosize; | 3087 | pg_offset += iosize; |
@@ -3090,7 +3092,7 @@ out: | |||
3090 | SetPageUptodate(page); | 3092 | SetPageUptodate(page); |
3091 | unlock_page(page); | 3093 | unlock_page(page); |
3092 | } | 3094 | } |
3093 | return 0; | 3095 | return ret; |
3094 | } | 3096 | } |
3095 | 3097 | ||
3096 | static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | 3098 | static inline void __do_contiguous_readpages(struct extent_io_tree *tree, |
@@ -5230,14 +5232,31 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
5230 | atomic_set(&eb->io_pages, num_reads); | 5232 | atomic_set(&eb->io_pages, num_reads); |
5231 | for (i = start_i; i < num_pages; i++) { | 5233 | for (i = start_i; i < num_pages; i++) { |
5232 | page = eb->pages[i]; | 5234 | page = eb->pages[i]; |
5235 | |||
5233 | if (!PageUptodate(page)) { | 5236 | if (!PageUptodate(page)) { |
5237 | if (ret) { | ||
5238 | atomic_dec(&eb->io_pages); | ||
5239 | unlock_page(page); | ||
5240 | continue; | ||
5241 | } | ||
5242 | |||
5234 | ClearPageError(page); | 5243 | ClearPageError(page); |
5235 | err = __extent_read_full_page(tree, page, | 5244 | err = __extent_read_full_page(tree, page, |
5236 | get_extent, &bio, | 5245 | get_extent, &bio, |
5237 | mirror_num, &bio_flags, | 5246 | mirror_num, &bio_flags, |
5238 | READ | REQ_META); | 5247 | READ | REQ_META); |
5239 | if (err) | 5248 | if (err) { |
5240 | ret = err; | 5249 | ret = err; |
5250 | /* | ||
5251 | * We use &bio in above __extent_read_full_page, | ||
5252 | * so we ensure that if it returns error, the | ||
5253 | * current page fails to add itself to bio and | ||
5254 | * it's been unlocked. | ||
5255 | * | ||
5256 | * We must dec io_pages by ourselves. | ||
5257 | */ | ||
5258 | atomic_dec(&eb->io_pages); | ||
5259 | } | ||
5241 | } else { | 5260 | } else { |
5242 | unlock_page(page); | 5261 | unlock_page(page); |
5243 | } | 5262 | } |