diff options
author | Filipe Manana <fdmanana@suse.com> | 2015-09-28 04:56:26 -0400 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2015-10-05 19:56:27 -0400 |
commit | 808f80b46790f27e145c72112189d6a3be2bc884 (patch) | |
tree | d4fed13bcd51297693ec319a9901dacc53dd44c2 /fs/btrfs/extent_io.c | |
parent | b786f16ac3c5d4f7a5fd136656b6a1301b29b73b (diff) |
Btrfs: update fix for read corruption of compressed and shared extents
My previous fix in commit 005efedf2c7d ("Btrfs: fix read corruption of
compressed and shared extents") was effective only if the compressed
extents cover a file range with a length that is not a multiple of 16
pages. That's because the detection of when we reached a different range
of the file that shares the same compressed extent as the previously
processed range was done at extent_io.c:__do_contiguous_readpages(),
which covers subranges with a length up to 16 pages, because
extent_readpages() groups the pages in clusters no larger than 16 pages.
So fix this by tracking the start of the previously processed file
range's extent map at extent_readpages().
The following test case for fstests reproduces the issue:
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
_need_to_be_root
_supported_fs btrfs
_supported_os Linux
_require_scratch
_require_cloner
rm -f $seqres.full
test_clone_and_read_compressed_extent()
{
local mount_opts=$1
_scratch_mkfs >>$seqres.full 2>&1
_scratch_mount $mount_opts
# Create our test file with a single extent of 64Kb that is going to
# be compressed no matter which compression algo is used (zlib/lzo).
$XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 64K" \
$SCRATCH_MNT/foo | _filter_xfs_io
# Now clone the compressed extent into an adjacent file offset.
$CLONER_PROG -s 0 -d $((64 * 1024)) -l $((64 * 1024)) \
$SCRATCH_MNT/foo $SCRATCH_MNT/foo
echo "File digest before unmount:"
md5sum $SCRATCH_MNT/foo | _filter_scratch
# Remount the fs or clear the page cache to trigger the bug in
# btrfs. Because the extent has an uncompressed length that is a
# multiple of 16 pages, all the pages belonging to the second range
# of the file (64K to 128K), which points to the same extent as the
# first range (0K to 64K), had their contents full of zeroes instead
# of the byte 0xaa. This was a bug exclusively in the read path of
# compressed extents, the correct data was stored on disk, btrfs
# just failed to fill in the pages correctly.
_scratch_remount
echo "File digest after remount:"
# Must match the digest we got before.
md5sum $SCRATCH_MNT/foo | _filter_scratch
}
echo -e "\nTesting with zlib compression..."
test_clone_and_read_compressed_extent "-o compress=zlib"
_scratch_unmount
echo -e "\nTesting with lzo compression..."
test_clone_and_read_compressed_extent "-o compress=lzo"
status=0
exit
Cc: stable@vger.kernel.org
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Tested-by: Timofey Titovets <nefelim4ag@gmail.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 11aa8f743b90..363726b08a51 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -3144,12 +3144,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3144 | get_extent_t *get_extent, | 3144 | get_extent_t *get_extent, |
3145 | struct extent_map **em_cached, | 3145 | struct extent_map **em_cached, |
3146 | struct bio **bio, int mirror_num, | 3146 | struct bio **bio, int mirror_num, |
3147 | unsigned long *bio_flags, int rw) | 3147 | unsigned long *bio_flags, int rw, |
3148 | u64 *prev_em_start) | ||
3148 | { | 3149 | { |
3149 | struct inode *inode; | 3150 | struct inode *inode; |
3150 | struct btrfs_ordered_extent *ordered; | 3151 | struct btrfs_ordered_extent *ordered; |
3151 | int index; | 3152 | int index; |
3152 | u64 prev_em_start = (u64)-1; | ||
3153 | 3153 | ||
3154 | inode = pages[0]->mapping->host; | 3154 | inode = pages[0]->mapping->host; |
3155 | while (1) { | 3155 | while (1) { |
@@ -3165,7 +3165,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3165 | 3165 | ||
3166 | for (index = 0; index < nr_pages; index++) { | 3166 | for (index = 0; index < nr_pages; index++) { |
3167 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, | 3167 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, |
3168 | mirror_num, bio_flags, rw, &prev_em_start); | 3168 | mirror_num, bio_flags, rw, prev_em_start); |
3169 | page_cache_release(pages[index]); | 3169 | page_cache_release(pages[index]); |
3170 | } | 3170 | } |
3171 | } | 3171 | } |
@@ -3175,7 +3175,8 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3175 | int nr_pages, get_extent_t *get_extent, | 3175 | int nr_pages, get_extent_t *get_extent, |
3176 | struct extent_map **em_cached, | 3176 | struct extent_map **em_cached, |
3177 | struct bio **bio, int mirror_num, | 3177 | struct bio **bio, int mirror_num, |
3178 | unsigned long *bio_flags, int rw) | 3178 | unsigned long *bio_flags, int rw, |
3179 | u64 *prev_em_start) | ||
3179 | { | 3180 | { |
3180 | u64 start = 0; | 3181 | u64 start = 0; |
3181 | u64 end = 0; | 3182 | u64 end = 0; |
@@ -3196,7 +3197,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3196 | index - first_index, start, | 3197 | index - first_index, start, |
3197 | end, get_extent, em_cached, | 3198 | end, get_extent, em_cached, |
3198 | bio, mirror_num, bio_flags, | 3199 | bio, mirror_num, bio_flags, |
3199 | rw); | 3200 | rw, prev_em_start); |
3200 | start = page_start; | 3201 | start = page_start; |
3201 | end = start + PAGE_CACHE_SIZE - 1; | 3202 | end = start + PAGE_CACHE_SIZE - 1; |
3202 | first_index = index; | 3203 | first_index = index; |
@@ -3207,7 +3208,8 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3207 | __do_contiguous_readpages(tree, &pages[first_index], | 3208 | __do_contiguous_readpages(tree, &pages[first_index], |
3208 | index - first_index, start, | 3209 | index - first_index, start, |
3209 | end, get_extent, em_cached, bio, | 3210 | end, get_extent, em_cached, bio, |
3210 | mirror_num, bio_flags, rw); | 3211 | mirror_num, bio_flags, rw, |
3212 | prev_em_start); | ||
3211 | } | 3213 | } |
3212 | 3214 | ||
3213 | static int __extent_read_full_page(struct extent_io_tree *tree, | 3215 | static int __extent_read_full_page(struct extent_io_tree *tree, |
@@ -4218,6 +4220,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4218 | struct page *page; | 4220 | struct page *page; |
4219 | struct extent_map *em_cached = NULL; | 4221 | struct extent_map *em_cached = NULL; |
4220 | int nr = 0; | 4222 | int nr = 0; |
4223 | u64 prev_em_start = (u64)-1; | ||
4221 | 4224 | ||
4222 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 4225 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
4223 | page = list_entry(pages->prev, struct page, lru); | 4226 | page = list_entry(pages->prev, struct page, lru); |
@@ -4234,12 +4237,12 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4234 | if (nr < ARRAY_SIZE(pagepool)) | 4237 | if (nr < ARRAY_SIZE(pagepool)) |
4235 | continue; | 4238 | continue; |
4236 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4239 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4237 | &bio, 0, &bio_flags, READ); | 4240 | &bio, 0, &bio_flags, READ, &prev_em_start); |
4238 | nr = 0; | 4241 | nr = 0; |
4239 | } | 4242 | } |
4240 | if (nr) | 4243 | if (nr) |
4241 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4244 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4242 | &bio, 0, &bio_flags, READ); | 4245 | &bio, 0, &bio_flags, READ, &prev_em_start); |
4243 | 4246 | ||
4244 | if (em_cached) | 4247 | if (em_cached) |
4245 | free_extent_map(em_cached); | 4248 | free_extent_map(em_cached); |