aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-30 15:44:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-30 15:44:29 -0400
commit9613bebb223dea3179c265dc31e1bb41ae39f321 (patch)
tree39bf883573d23775a53be3172323c0237fef5630 /fs/btrfs/compression.c
parent40380f1c7841a5dcbf0b20f0b6da11969211ef77 (diff)
parentbc3f116fec194f1d7329b160c266fe16b9266a1e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes and features from Chris Mason: "We've merged in the error handling patches from SuSE. These are already shipping in the sles kernel, and they give btrfs the ability to abort transactions and go readonly on errors. It involves a lot of churn as they clarify BUG_ONs, and remove the ones we now properly deal with. Josef reworked the way our metadata interacts with the page cache. page->private now points to the btrfs extent_buffer object, which makes everything faster. He changed it so we write an whole extent buffer at a time instead of allowing individual pages to go down,, which will be important for the raid5/6 code (for the 3.5 merge window ;) Josef also made us more aggressive about dropping pages for metadata blocks that were freed due to COW. Overall, our metadata caching is much faster now. We've integrated my patch for metadata bigger than the page size. This allows metadata blocks up to 64KB in size. In practice 16K and 32K seem to work best. For workloads with lots of metadata, this cuts down the size of the extent allocation tree dramatically and fragments much less. Scrub was updated to support the larger block sizes, which ended up being a fairly large change (thanks Stefan Behrens). We also have an assortment of fixes and updates, especially to the balancing code (Ilya Dryomov), the back ref walker (Jan Schmidt) and the defragging code (Liu Bo)." Fixed up trivial conflicts in fs/btrfs/scrub.c that were just due to removal of the second argument to k[un]map_atomic() in commit 7ac687d9e047. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (75 commits) Btrfs: update the checks for mixed block groups with big metadata blocks Btrfs: update to the right index of defragment Btrfs: do not bother to defrag an extent if it is a big real extent Btrfs: add a check to decide if we should defrag the range Btrfs: fix recursive defragment with autodefrag option Btrfs: fix the mismatch of page->mapping Btrfs: fix race between direct io and autodefrag Btrfs: fix deadlock during allocating chunks Btrfs: show useful info in space reservation tracepoint Btrfs: don't use crc items bigger than 4KB Btrfs: flush out and clean up any block device pages during mount btrfs: disallow unequal data/metadata blocksize for mixed block groups Btrfs: enhance superblock sanity checks Btrfs: change scrub to support big blocks Btrfs: minor cleanup in scrub Btrfs: introduce common define for max number of mirrors Btrfs: fix infinite loop in btrfs_shrink_device() Btrfs: fix memory leak in resolver code Btrfs: allow dup for data chunks in mixed mode Btrfs: validate target profiles only if we are going to use them ...
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b805afb37fa8..d286b40a5671 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -226,8 +226,8 @@ out:
226 * Clear the writeback bits on all of the file 226 * Clear the writeback bits on all of the file
227 * pages for a compressed write 227 * pages for a compressed write
228 */ 228 */
229static noinline int end_compressed_writeback(struct inode *inode, u64 start, 229static noinline void end_compressed_writeback(struct inode *inode, u64 start,
230 unsigned long ram_size) 230 unsigned long ram_size)
231{ 231{
232 unsigned long index = start >> PAGE_CACHE_SHIFT; 232 unsigned long index = start >> PAGE_CACHE_SHIFT;
233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
253 index += ret; 253 index += ret;
254 } 254 }
255 /* the inode may be gone now */ 255 /* the inode may be gone now */
256 return 0;
257} 256}
258 257
259/* 258/*
@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
392 */ 391 */
393 atomic_inc(&cb->pending_bios); 392 atomic_inc(&cb->pending_bios);
394 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 393 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
395 BUG_ON(ret); 394 BUG_ON(ret); /* -ENOMEM */
396 395
397 if (!skip_sum) { 396 if (!skip_sum) {
398 ret = btrfs_csum_one_bio(root, inode, bio, 397 ret = btrfs_csum_one_bio(root, inode, bio,
399 start, 1); 398 start, 1);
400 BUG_ON(ret); 399 BUG_ON(ret); /* -ENOMEM */
401 } 400 }
402 401
403 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 402 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
404 BUG_ON(ret); 403 BUG_ON(ret); /* -ENOMEM */
405 404
406 bio_put(bio); 405 bio_put(bio);
407 406
@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
421 bio_get(bio); 420 bio_get(bio);
422 421
423 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 422 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
424 BUG_ON(ret); 423 BUG_ON(ret); /* -ENOMEM */
425 424
426 if (!skip_sum) { 425 if (!skip_sum) {
427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 426 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
428 BUG_ON(ret); 427 BUG_ON(ret); /* -ENOMEM */
429 } 428 }
430 429
431 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
432 BUG_ON(ret); 431 BUG_ON(ret); /* -ENOMEM */
433 432
434 bio_put(bio); 433 bio_put(bio);
435 return 0; 434 return 0;
@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
497 * sure they map to this compressed extent on disk. 496 * sure they map to this compressed extent on disk.
498 */ 497 */
499 set_page_extent_mapped(page); 498 set_page_extent_mapped(page);
500 lock_extent(tree, last_offset, end, GFP_NOFS); 499 lock_extent(tree, last_offset, end);
501 read_lock(&em_tree->lock); 500 read_lock(&em_tree->lock);
502 em = lookup_extent_mapping(em_tree, last_offset, 501 em = lookup_extent_mapping(em_tree, last_offset,
503 PAGE_CACHE_SIZE); 502 PAGE_CACHE_SIZE);
@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 506 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
508 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 507 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
509 free_extent_map(em); 508 free_extent_map(em);
510 unlock_extent(tree, last_offset, end, GFP_NOFS); 509 unlock_extent(tree, last_offset, end);
511 unlock_page(page); 510 unlock_page(page);
512 page_cache_release(page); 511 page_cache_release(page);
513 break; 512 break;
@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
535 nr_pages++; 534 nr_pages++;
536 page_cache_release(page); 535 page_cache_release(page);
537 } else { 536 } else {
538 unlock_extent(tree, last_offset, end, GFP_NOFS); 537 unlock_extent(tree, last_offset, end);
539 unlock_page(page); 538 unlock_page(page);
540 page_cache_release(page); 539 page_cache_release(page);
541 break; 540 break;
@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
662 bio_get(comp_bio); 661 bio_get(comp_bio);
663 662
664 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 663 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
665 BUG_ON(ret); 664 BUG_ON(ret); /* -ENOMEM */
666 665
667 /* 666 /*
668 * inc the count before we submit the bio so 667 * inc the count before we submit the bio so
@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 674 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676 ret = btrfs_lookup_bio_sums(root, inode, 675 ret = btrfs_lookup_bio_sums(root, inode,
677 comp_bio, sums); 676 comp_bio, sums);
678 BUG_ON(ret); 677 BUG_ON(ret); /* -ENOMEM */
679 } 678 }
680 sums += (comp_bio->bi_size + root->sectorsize - 1) / 679 sums += (comp_bio->bi_size + root->sectorsize - 1) /
681 root->sectorsize; 680 root->sectorsize;
682 681
683 ret = btrfs_map_bio(root, READ, comp_bio, 682 ret = btrfs_map_bio(root, READ, comp_bio,
684 mirror_num, 0); 683 mirror_num, 0);
685 BUG_ON(ret); 684 BUG_ON(ret); /* -ENOMEM */
686 685
687 bio_put(comp_bio); 686 bio_put(comp_bio);
688 687
@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
698 bio_get(comp_bio); 697 bio_get(comp_bio);
699 698
700 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 699 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
701 BUG_ON(ret); 700 BUG_ON(ret); /* -ENOMEM */
702 701
703 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 702 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
704 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 703 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
705 BUG_ON(ret); 704 BUG_ON(ret); /* -ENOMEM */
706 } 705 }
707 706
708 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 707 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
709 BUG_ON(ret); 708 BUG_ON(ret); /* -ENOMEM */
710 709
711 bio_put(comp_bio); 710 bio_put(comp_bio);
712 return 0; 711 return 0;
@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {
734 &btrfs_lzo_compress, 733 &btrfs_lzo_compress,
735}; 734};
736 735
737int __init btrfs_init_compress(void) 736void __init btrfs_init_compress(void)
738{ 737{
739 int i; 738 int i;
740 739
@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)
744 atomic_set(&comp_alloc_workspace[i], 0); 743 atomic_set(&comp_alloc_workspace[i], 0);
745 init_waitqueue_head(&comp_workspace_wait[i]); 744 init_waitqueue_head(&comp_workspace_wait[i]);
746 } 745 }
747 return 0;
748} 746}
749 747
750/* 748/*