aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-15 11:44:56 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-15 11:44:56 -0500
commit42dc7babdcc99feadb04d461592ce5898a362550 (patch)
tree9497d6dddba1a046b4a40a51a49cc41a13922394 /fs/btrfs/inode.c
parent17d217fe970d34720f4f1633dca73a6aa2f3d9d1 (diff)
Btrfs: Fix compressed writes on truncated pages
The compression code was using isize to limit the amount of data it sent through zlib. But, it wasn't properly limiting the looping to just the pages inside i_size. The end result was trying to compress too many pages, including those that had not been setup and properly locked down. This made the compression code oops while trying find_get_page on a page that didn't exist. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e64a4fe19a60..5313a13a998d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -332,6 +332,7 @@ static noinline int compress_file_range(struct inode *inode,
332 u64 disk_num_bytes; 332 u64 disk_num_bytes;
333 u64 blocksize = root->sectorsize; 333 u64 blocksize = root->sectorsize;
334 u64 actual_end; 334 u64 actual_end;
335 u64 isize = i_size_read(inode);
335 int ret = 0; 336 int ret = 0;
336 struct page **pages = NULL; 337 struct page **pages = NULL;
337 unsigned long nr_pages; 338 unsigned long nr_pages;
@@ -345,12 +346,12 @@ static noinline int compress_file_range(struct inode *inode,
345 346
346 orig_start = start; 347 orig_start = start;
347 348
349 actual_end = min_t(u64, isize, end + 1);
348again: 350again:
349 will_compress = 0; 351 will_compress = 0;
350 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 352 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
351 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 353 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
352 354
353 actual_end = min_t(u64, i_size_read(inode), end + 1);
354 total_compressed = actual_end - start; 355 total_compressed = actual_end - start;
355 356
356 /* we want to make sure that amount of ram required to uncompress 357 /* we want to make sure that amount of ram required to uncompress
@@ -488,7 +489,7 @@ again:
488 add_async_extent(async_cow, start, num_bytes, 489 add_async_extent(async_cow, start, num_bytes,
489 total_compressed, pages, nr_pages_ret); 490 total_compressed, pages, nr_pages_ret);
490 491
491 if (start + num_bytes < end) { 492 if (start + num_bytes < end && start + num_bytes < actual_end) {
492 start += num_bytes; 493 start += num_bytes;
493 pages = NULL; 494 pages = NULL;
494 cond_resched(); 495 cond_resched();
@@ -696,6 +697,7 @@ static noinline int cow_file_range(struct inode *inode,
696 u64 cur_alloc_size; 697 u64 cur_alloc_size;
697 u64 blocksize = root->sectorsize; 698 u64 blocksize = root->sectorsize;
698 u64 actual_end; 699 u64 actual_end;
700 u64 isize = i_size_read(inode);
699 struct btrfs_key ins; 701 struct btrfs_key ins;
700 struct extent_map *em; 702 struct extent_map *em;
701 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 703 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -705,7 +707,7 @@ static noinline int cow_file_range(struct inode *inode,
705 BUG_ON(!trans); 707 BUG_ON(!trans);
706 btrfs_set_trans_block_group(trans, inode); 708 btrfs_set_trans_block_group(trans, inode);
707 709
708 actual_end = min_t(u64, i_size_read(inode), end + 1); 710 actual_end = min_t(u64, isize, end + 1);
709 711
710 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 712 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
711 num_bytes = max(blocksize, num_bytes); 713 num_bytes = max(blocksize, num_bytes);