diff options
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r-- | fs/btrfs/compression.c | 419 |
1 files changed, 377 insertions, 42 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 396039b3a8a2..bfe42b03eaf9 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -62,6 +62,9 @@ struct compressed_bio { | |||
62 | /* number of bytes on disk */ | 62 | /* number of bytes on disk */ |
63 | unsigned long compressed_len; | 63 | unsigned long compressed_len; |
64 | 64 | ||
65 | /* the compression algorithm for this bio */ | ||
66 | int compress_type; | ||
67 | |||
65 | /* number of compressed pages in the array */ | 68 | /* number of compressed pages in the array */ |
66 | unsigned long nr_pages; | 69 | unsigned long nr_pages; |
67 | 70 | ||
@@ -91,23 +94,10 @@ static inline int compressed_bio_size(struct btrfs_root *root, | |||
91 | static struct bio *compressed_bio_alloc(struct block_device *bdev, | 94 | static struct bio *compressed_bio_alloc(struct block_device *bdev, |
92 | u64 first_byte, gfp_t gfp_flags) | 95 | u64 first_byte, gfp_t gfp_flags) |
93 | { | 96 | { |
94 | struct bio *bio; | ||
95 | int nr_vecs; | 97 | int nr_vecs; |
96 | 98 | ||
97 | nr_vecs = bio_get_nr_vecs(bdev); | 99 | nr_vecs = bio_get_nr_vecs(bdev); |
98 | bio = bio_alloc(gfp_flags, nr_vecs); | 100 | return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); |
99 | |||
100 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | ||
101 | while (!bio && (nr_vecs /= 2)) | ||
102 | bio = bio_alloc(gfp_flags, nr_vecs); | ||
103 | } | ||
104 | |||
105 | if (bio) { | ||
106 | bio->bi_size = 0; | ||
107 | bio->bi_bdev = bdev; | ||
108 | bio->bi_sector = first_byte >> 9; | ||
109 | } | ||
110 | return bio; | ||
111 | } | 101 | } |
112 | 102 | ||
113 | static int check_compressed_csum(struct inode *inode, | 103 | static int check_compressed_csum(struct inode *inode, |
@@ -135,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, | |||
135 | kunmap_atomic(kaddr, KM_USER0); | 125 | kunmap_atomic(kaddr, KM_USER0); |
136 | 126 | ||
137 | if (csum != *cb_sum) { | 127 | if (csum != *cb_sum) { |
138 | printk(KERN_INFO "btrfs csum failed ino %lu " | 128 | printk(KERN_INFO "btrfs csum failed ino %llu " |
139 | "extent %llu csum %u " | 129 | "extent %llu csum %u " |
140 | "wanted %u mirror %d\n", inode->i_ino, | 130 | "wanted %u mirror %d\n", |
131 | (unsigned long long)btrfs_ino(inode), | ||
141 | (unsigned long long)disk_start, | 132 | (unsigned long long)disk_start, |
142 | csum, *cb_sum, cb->mirror_num); | 133 | csum, *cb_sum, cb->mirror_num); |
143 | ret = -EIO; | 134 | ret = -EIO; |
@@ -163,7 +154,6 @@ fail: | |||
163 | */ | 154 | */ |
164 | static void end_compressed_bio_read(struct bio *bio, int err) | 155 | static void end_compressed_bio_read(struct bio *bio, int err) |
165 | { | 156 | { |
166 | struct extent_io_tree *tree; | ||
167 | struct compressed_bio *cb = bio->bi_private; | 157 | struct compressed_bio *cb = bio->bi_private; |
168 | struct inode *inode; | 158 | struct inode *inode; |
169 | struct page *page; | 159 | struct page *page; |
@@ -187,12 +177,12 @@ static void end_compressed_bio_read(struct bio *bio, int err) | |||
187 | /* ok, we're the last bio for this extent, lets start | 177 | /* ok, we're the last bio for this extent, lets start |
188 | * the decompression. | 178 | * the decompression. |
189 | */ | 179 | */ |
190 | tree = &BTRFS_I(inode)->io_tree; | 180 | ret = btrfs_decompress_biovec(cb->compress_type, |
191 | ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, | 181 | cb->compressed_pages, |
192 | cb->start, | 182 | cb->start, |
193 | cb->orig_bio->bi_io_vec, | 183 | cb->orig_bio->bi_io_vec, |
194 | cb->orig_bio->bi_vcnt, | 184 | cb->orig_bio->bi_vcnt, |
195 | cb->compressed_len); | 185 | cb->compressed_len); |
196 | csum_failed: | 186 | csum_failed: |
197 | if (ret) | 187 | if (ret) |
198 | cb->errors = 1; | 188 | cb->errors = 1; |
@@ -343,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
343 | struct compressed_bio *cb; | 333 | struct compressed_bio *cb; |
344 | unsigned long bytes_left; | 334 | unsigned long bytes_left; |
345 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 335 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
346 | int page_index = 0; | 336 | int pg_index = 0; |
347 | struct page *page; | 337 | struct page *page; |
348 | u64 first_byte = disk_start; | 338 | u64 first_byte = disk_start; |
349 | struct block_device *bdev; | 339 | struct block_device *bdev; |
@@ -351,6 +341,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
351 | 341 | ||
352 | WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); | 342 | WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); |
353 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 343 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
344 | if (!cb) | ||
345 | return -ENOMEM; | ||
354 | atomic_set(&cb->pending_bios, 0); | 346 | atomic_set(&cb->pending_bios, 0); |
355 | cb->errors = 0; | 347 | cb->errors = 0; |
356 | cb->inode = inode; | 348 | cb->inode = inode; |
@@ -365,14 +357,18 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
365 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 357 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
366 | 358 | ||
367 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); | 359 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); |
360 | if(!bio) { | ||
361 | kfree(cb); | ||
362 | return -ENOMEM; | ||
363 | } | ||
368 | bio->bi_private = cb; | 364 | bio->bi_private = cb; |
369 | bio->bi_end_io = end_compressed_bio_write; | 365 | bio->bi_end_io = end_compressed_bio_write; |
370 | atomic_inc(&cb->pending_bios); | 366 | atomic_inc(&cb->pending_bios); |
371 | 367 | ||
372 | /* create and submit bios for the compressed pages */ | 368 | /* create and submit bios for the compressed pages */ |
373 | bytes_left = compressed_len; | 369 | bytes_left = compressed_len; |
374 | for (page_index = 0; page_index < cb->nr_pages; page_index++) { | 370 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
375 | page = compressed_pages[page_index]; | 371 | page = compressed_pages[pg_index]; |
376 | page->mapping = inode->i_mapping; | 372 | page->mapping = inode->i_mapping; |
377 | if (bio->bi_size) | 373 | if (bio->bi_size) |
378 | ret = io_tree->ops->merge_bio_hook(page, 0, | 374 | ret = io_tree->ops->merge_bio_hook(page, 0, |
@@ -437,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
437 | struct compressed_bio *cb) | 433 | struct compressed_bio *cb) |
438 | { | 434 | { |
439 | unsigned long end_index; | 435 | unsigned long end_index; |
440 | unsigned long page_index; | 436 | unsigned long pg_index; |
441 | u64 last_offset; | 437 | u64 last_offset; |
442 | u64 isize = i_size_read(inode); | 438 | u64 isize = i_size_read(inode); |
443 | int ret; | 439 | int ret; |
@@ -461,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
461 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | 457 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; |
462 | 458 | ||
463 | while (last_offset < compressed_end) { | 459 | while (last_offset < compressed_end) { |
464 | page_index = last_offset >> PAGE_CACHE_SHIFT; | 460 | pg_index = last_offset >> PAGE_CACHE_SHIFT; |
465 | 461 | ||
466 | if (page_index > end_index) | 462 | if (pg_index > end_index) |
467 | break; | 463 | break; |
468 | 464 | ||
469 | rcu_read_lock(); | 465 | rcu_read_lock(); |
470 | page = radix_tree_lookup(&mapping->page_tree, page_index); | 466 | page = radix_tree_lookup(&mapping->page_tree, pg_index); |
471 | rcu_read_unlock(); | 467 | rcu_read_unlock(); |
472 | if (page) { | 468 | if (page) { |
473 | misses++; | 469 | misses++; |
@@ -481,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
481 | if (!page) | 477 | if (!page) |
482 | break; | 478 | break; |
483 | 479 | ||
484 | if (add_to_page_cache_lru(page, mapping, page_index, | 480 | if (add_to_page_cache_lru(page, mapping, pg_index, |
485 | GFP_NOFS)) { | 481 | GFP_NOFS)) { |
486 | page_cache_release(page); | 482 | page_cache_release(page); |
487 | goto next; | 483 | goto next; |
@@ -565,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
565 | unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; | 561 | unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; |
566 | unsigned long compressed_len; | 562 | unsigned long compressed_len; |
567 | unsigned long nr_pages; | 563 | unsigned long nr_pages; |
568 | unsigned long page_index; | 564 | unsigned long pg_index; |
569 | struct page *page; | 565 | struct page *page; |
570 | struct block_device *bdev; | 566 | struct block_device *bdev; |
571 | struct bio *comp_bio; | 567 | struct bio *comp_bio; |
@@ -573,7 +569,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
573 | u64 em_len; | 569 | u64 em_len; |
574 | u64 em_start; | 570 | u64 em_start; |
575 | struct extent_map *em; | 571 | struct extent_map *em; |
576 | int ret; | 572 | int ret = -ENOMEM; |
577 | u32 *sums; | 573 | u32 *sums; |
578 | 574 | ||
579 | tree = &BTRFS_I(inode)->io_tree; | 575 | tree = &BTRFS_I(inode)->io_tree; |
@@ -588,6 +584,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
588 | 584 | ||
589 | compressed_len = em->block_len; | 585 | compressed_len = em->block_len; |
590 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 586 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
587 | if (!cb) | ||
588 | goto out; | ||
589 | |||
591 | atomic_set(&cb->pending_bios, 0); | 590 | atomic_set(&cb->pending_bios, 0); |
592 | cb->errors = 0; | 591 | cb->errors = 0; |
593 | cb->inode = inode; | 592 | cb->inode = inode; |
@@ -603,17 +602,23 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
603 | 602 | ||
604 | cb->len = uncompressed_len; | 603 | cb->len = uncompressed_len; |
605 | cb->compressed_len = compressed_len; | 604 | cb->compressed_len = compressed_len; |
605 | cb->compress_type = extent_compress_type(bio_flags); | ||
606 | cb->orig_bio = bio; | 606 | cb->orig_bio = bio; |
607 | 607 | ||
608 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / | 608 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / |
609 | PAGE_CACHE_SIZE; | 609 | PAGE_CACHE_SIZE; |
610 | cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, | 610 | cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, |
611 | GFP_NOFS); | 611 | GFP_NOFS); |
612 | if (!cb->compressed_pages) | ||
613 | goto fail1; | ||
614 | |||
612 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 615 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
613 | 616 | ||
614 | for (page_index = 0; page_index < nr_pages; page_index++) { | 617 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
615 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | | 618 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
616 | __GFP_HIGHMEM); | 619 | __GFP_HIGHMEM); |
620 | if (!cb->compressed_pages[pg_index]) | ||
621 | goto fail2; | ||
617 | } | 622 | } |
618 | cb->nr_pages = nr_pages; | 623 | cb->nr_pages = nr_pages; |
619 | 624 | ||
@@ -624,12 +629,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
624 | cb->len = uncompressed_len; | 629 | cb->len = uncompressed_len; |
625 | 630 | ||
626 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); | 631 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); |
632 | if (!comp_bio) | ||
633 | goto fail2; | ||
627 | comp_bio->bi_private = cb; | 634 | comp_bio->bi_private = cb; |
628 | comp_bio->bi_end_io = end_compressed_bio_read; | 635 | comp_bio->bi_end_io = end_compressed_bio_read; |
629 | atomic_inc(&cb->pending_bios); | 636 | atomic_inc(&cb->pending_bios); |
630 | 637 | ||
631 | for (page_index = 0; page_index < nr_pages; page_index++) { | 638 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
632 | page = cb->compressed_pages[page_index]; | 639 | page = cb->compressed_pages[pg_index]; |
633 | page->mapping = inode->i_mapping; | 640 | page->mapping = inode->i_mapping; |
634 | page->index = em_start >> PAGE_CACHE_SHIFT; | 641 | page->index = em_start >> PAGE_CACHE_SHIFT; |
635 | 642 | ||
@@ -657,8 +664,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
657 | atomic_inc(&cb->pending_bios); | 664 | atomic_inc(&cb->pending_bios); |
658 | 665 | ||
659 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { | 666 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
660 | btrfs_lookup_bio_sums(root, inode, comp_bio, | 667 | ret = btrfs_lookup_bio_sums(root, inode, |
661 | sums); | 668 | comp_bio, sums); |
669 | BUG_ON(ret); | ||
662 | } | 670 | } |
663 | sums += (comp_bio->bi_size + root->sectorsize - 1) / | 671 | sums += (comp_bio->bi_size + root->sectorsize - 1) / |
664 | root->sectorsize; | 672 | root->sectorsize; |
@@ -683,12 +691,339 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
683 | ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); | 691 | ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); |
684 | BUG_ON(ret); | 692 | BUG_ON(ret); |
685 | 693 | ||
686 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) | 694 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
687 | btrfs_lookup_bio_sums(root, inode, comp_bio, sums); | 695 | ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); |
696 | BUG_ON(ret); | ||
697 | } | ||
688 | 698 | ||
689 | ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); | 699 | ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); |
690 | BUG_ON(ret); | 700 | BUG_ON(ret); |
691 | 701 | ||
692 | bio_put(comp_bio); | 702 | bio_put(comp_bio); |
693 | return 0; | 703 | return 0; |
704 | |||
705 | fail2: | ||
706 | for (pg_index = 0; pg_index < nr_pages; pg_index++) | ||
707 | free_page((unsigned long)cb->compressed_pages[pg_index]); | ||
708 | |||
709 | kfree(cb->compressed_pages); | ||
710 | fail1: | ||
711 | kfree(cb); | ||
712 | out: | ||
713 | free_extent_map(em); | ||
714 | return ret; | ||
715 | } | ||
716 | |||
717 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; | ||
718 | static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; | ||
719 | static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; | ||
720 | static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; | ||
721 | static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; | ||
722 | |||
723 | struct btrfs_compress_op *btrfs_compress_op[] = { | ||
724 | &btrfs_zlib_compress, | ||
725 | &btrfs_lzo_compress, | ||
726 | }; | ||
727 | |||
728 | int __init btrfs_init_compress(void) | ||
729 | { | ||
730 | int i; | ||
731 | |||
732 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { | ||
733 | INIT_LIST_HEAD(&comp_idle_workspace[i]); | ||
734 | spin_lock_init(&comp_workspace_lock[i]); | ||
735 | atomic_set(&comp_alloc_workspace[i], 0); | ||
736 | init_waitqueue_head(&comp_workspace_wait[i]); | ||
737 | } | ||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * this finds an available workspace or allocates a new one | ||
743 | * ERR_PTR is returned if things go bad. | ||
744 | */ | ||
745 | static struct list_head *find_workspace(int type) | ||
746 | { | ||
747 | struct list_head *workspace; | ||
748 | int cpus = num_online_cpus(); | ||
749 | int idx = type - 1; | ||
750 | |||
751 | struct list_head *idle_workspace = &comp_idle_workspace[idx]; | ||
752 | spinlock_t *workspace_lock = &comp_workspace_lock[idx]; | ||
753 | atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; | ||
754 | wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; | ||
755 | int *num_workspace = &comp_num_workspace[idx]; | ||
756 | again: | ||
757 | spin_lock(workspace_lock); | ||
758 | if (!list_empty(idle_workspace)) { | ||
759 | workspace = idle_workspace->next; | ||
760 | list_del(workspace); | ||
761 | (*num_workspace)--; | ||
762 | spin_unlock(workspace_lock); | ||
763 | return workspace; | ||
764 | |||
765 | } | ||
766 | if (atomic_read(alloc_workspace) > cpus) { | ||
767 | DEFINE_WAIT(wait); | ||
768 | |||
769 | spin_unlock(workspace_lock); | ||
770 | prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
771 | if (atomic_read(alloc_workspace) > cpus && !*num_workspace) | ||
772 | schedule(); | ||
773 | finish_wait(workspace_wait, &wait); | ||
774 | goto again; | ||
775 | } | ||
776 | atomic_inc(alloc_workspace); | ||
777 | spin_unlock(workspace_lock); | ||
778 | |||
779 | workspace = btrfs_compress_op[idx]->alloc_workspace(); | ||
780 | if (IS_ERR(workspace)) { | ||
781 | atomic_dec(alloc_workspace); | ||
782 | wake_up(workspace_wait); | ||
783 | } | ||
784 | return workspace; | ||
785 | } | ||
786 | |||
787 | /* | ||
788 | * put a workspace struct back on the list or free it if we have enough | ||
789 | * idle ones sitting around | ||
790 | */ | ||
791 | static void free_workspace(int type, struct list_head *workspace) | ||
792 | { | ||
793 | int idx = type - 1; | ||
794 | struct list_head *idle_workspace = &comp_idle_workspace[idx]; | ||
795 | spinlock_t *workspace_lock = &comp_workspace_lock[idx]; | ||
796 | atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; | ||
797 | wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; | ||
798 | int *num_workspace = &comp_num_workspace[idx]; | ||
799 | |||
800 | spin_lock(workspace_lock); | ||
801 | if (*num_workspace < num_online_cpus()) { | ||
802 | list_add_tail(workspace, idle_workspace); | ||
803 | (*num_workspace)++; | ||
804 | spin_unlock(workspace_lock); | ||
805 | goto wake; | ||
806 | } | ||
807 | spin_unlock(workspace_lock); | ||
808 | |||
809 | btrfs_compress_op[idx]->free_workspace(workspace); | ||
810 | atomic_dec(alloc_workspace); | ||
811 | wake: | ||
812 | if (waitqueue_active(workspace_wait)) | ||
813 | wake_up(workspace_wait); | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * cleanup function for module exit | ||
818 | */ | ||
819 | static void free_workspaces(void) | ||
820 | { | ||
821 | struct list_head *workspace; | ||
822 | int i; | ||
823 | |||
824 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { | ||
825 | while (!list_empty(&comp_idle_workspace[i])) { | ||
826 | workspace = comp_idle_workspace[i].next; | ||
827 | list_del(workspace); | ||
828 | btrfs_compress_op[i]->free_workspace(workspace); | ||
829 | atomic_dec(&comp_alloc_workspace[i]); | ||
830 | } | ||
831 | } | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * given an address space and start/len, compress the bytes. | ||
836 | * | ||
837 | * pages are allocated to hold the compressed result and stored | ||
838 | * in 'pages' | ||
839 | * | ||
840 | * out_pages is used to return the number of pages allocated. There | ||
841 | * may be pages allocated even if we return an error | ||
842 | * | ||
843 | * total_in is used to return the number of bytes actually read. It | ||
844 | * may be smaller then len if we had to exit early because we | ||
845 | * ran out of room in the pages array or because we cross the | ||
846 | * max_out threshold. | ||
847 | * | ||
848 | * total_out is used to return the total number of compressed bytes | ||
849 | * | ||
850 | * max_out tells us the max number of bytes that we're allowed to | ||
851 | * stuff into pages | ||
852 | */ | ||
853 | int btrfs_compress_pages(int type, struct address_space *mapping, | ||
854 | u64 start, unsigned long len, | ||
855 | struct page **pages, | ||
856 | unsigned long nr_dest_pages, | ||
857 | unsigned long *out_pages, | ||
858 | unsigned long *total_in, | ||
859 | unsigned long *total_out, | ||
860 | unsigned long max_out) | ||
861 | { | ||
862 | struct list_head *workspace; | ||
863 | int ret; | ||
864 | |||
865 | workspace = find_workspace(type); | ||
866 | if (IS_ERR(workspace)) | ||
867 | return -1; | ||
868 | |||
869 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, | ||
870 | start, len, pages, | ||
871 | nr_dest_pages, out_pages, | ||
872 | total_in, total_out, | ||
873 | max_out); | ||
874 | free_workspace(type, workspace); | ||
875 | return ret; | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * pages_in is an array of pages with compressed data. | ||
880 | * | ||
881 | * disk_start is the starting logical offset of this array in the file | ||
882 | * | ||
883 | * bvec is a bio_vec of pages from the file that we want to decompress into | ||
884 | * | ||
885 | * vcnt is the count of pages in the biovec | ||
886 | * | ||
887 | * srclen is the number of bytes in pages_in | ||
888 | * | ||
889 | * The basic idea is that we have a bio that was created by readpages. | ||
890 | * The pages in the bio are for the uncompressed data, and they may not | ||
891 | * be contiguous. They all correspond to the range of bytes covered by | ||
892 | * the compressed extent. | ||
893 | */ | ||
894 | int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, | ||
895 | struct bio_vec *bvec, int vcnt, size_t srclen) | ||
896 | { | ||
897 | struct list_head *workspace; | ||
898 | int ret; | ||
899 | |||
900 | workspace = find_workspace(type); | ||
901 | if (IS_ERR(workspace)) | ||
902 | return -ENOMEM; | ||
903 | |||
904 | ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, | ||
905 | disk_start, | ||
906 | bvec, vcnt, srclen); | ||
907 | free_workspace(type, workspace); | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * a less complex decompression routine. Our compressed data fits in a | ||
913 | * single page, and we want to read a single page out of it. | ||
914 | * start_byte tells us the offset into the compressed data we're interested in | ||
915 | */ | ||
916 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | ||
917 | unsigned long start_byte, size_t srclen, size_t destlen) | ||
918 | { | ||
919 | struct list_head *workspace; | ||
920 | int ret; | ||
921 | |||
922 | workspace = find_workspace(type); | ||
923 | if (IS_ERR(workspace)) | ||
924 | return -ENOMEM; | ||
925 | |||
926 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, | ||
927 | dest_page, start_byte, | ||
928 | srclen, destlen); | ||
929 | |||
930 | free_workspace(type, workspace); | ||
931 | return ret; | ||
932 | } | ||
933 | |||
934 | void btrfs_exit_compress(void) | ||
935 | { | ||
936 | free_workspaces(); | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Copy uncompressed data from working buffer to pages. | ||
941 | * | ||
942 | * buf_start is the byte offset we're of the start of our workspace buffer. | ||
943 | * | ||
944 | * total_out is the last byte of the buffer | ||
945 | */ | ||
946 | int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | ||
947 | unsigned long total_out, u64 disk_start, | ||
948 | struct bio_vec *bvec, int vcnt, | ||
949 | unsigned long *pg_index, | ||
950 | unsigned long *pg_offset) | ||
951 | { | ||
952 | unsigned long buf_offset; | ||
953 | unsigned long current_buf_start; | ||
954 | unsigned long start_byte; | ||
955 | unsigned long working_bytes = total_out - buf_start; | ||
956 | unsigned long bytes; | ||
957 | char *kaddr; | ||
958 | struct page *page_out = bvec[*pg_index].bv_page; | ||
959 | |||
960 | /* | ||
961 | * start byte is the first byte of the page we're currently | ||
962 | * copying into relative to the start of the compressed data. | ||
963 | */ | ||
964 | start_byte = page_offset(page_out) - disk_start; | ||
965 | |||
966 | /* we haven't yet hit data corresponding to this page */ | ||
967 | if (total_out <= start_byte) | ||
968 | return 1; | ||
969 | |||
970 | /* | ||
971 | * the start of the data we care about is offset into | ||
972 | * the middle of our working buffer | ||
973 | */ | ||
974 | if (total_out > start_byte && buf_start < start_byte) { | ||
975 | buf_offset = start_byte - buf_start; | ||
976 | working_bytes -= buf_offset; | ||
977 | } else { | ||
978 | buf_offset = 0; | ||
979 | } | ||
980 | current_buf_start = buf_start; | ||
981 | |||
982 | /* copy bytes from the working buffer into the pages */ | ||
983 | while (working_bytes > 0) { | ||
984 | bytes = min(PAGE_CACHE_SIZE - *pg_offset, | ||
985 | PAGE_CACHE_SIZE - buf_offset); | ||
986 | bytes = min(bytes, working_bytes); | ||
987 | kaddr = kmap_atomic(page_out, KM_USER0); | ||
988 | memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); | ||
989 | kunmap_atomic(kaddr, KM_USER0); | ||
990 | flush_dcache_page(page_out); | ||
991 | |||
992 | *pg_offset += bytes; | ||
993 | buf_offset += bytes; | ||
994 | working_bytes -= bytes; | ||
995 | current_buf_start += bytes; | ||
996 | |||
997 | /* check if we need to pick another page */ | ||
998 | if (*pg_offset == PAGE_CACHE_SIZE) { | ||
999 | (*pg_index)++; | ||
1000 | if (*pg_index >= vcnt) | ||
1001 | return 0; | ||
1002 | |||
1003 | page_out = bvec[*pg_index].bv_page; | ||
1004 | *pg_offset = 0; | ||
1005 | start_byte = page_offset(page_out) - disk_start; | ||
1006 | |||
1007 | /* | ||
1008 | * make sure our new page is covered by this | ||
1009 | * working buffer | ||
1010 | */ | ||
1011 | if (total_out <= start_byte) | ||
1012 | return 1; | ||
1013 | |||
1014 | /* | ||
1015 | * the next page in the biovec might not be adjacent | ||
1016 | * to the last page, but it might still be found | ||
1017 | * inside this working buffer. bump our offset pointer | ||
1018 | */ | ||
1019 | if (total_out > start_byte && | ||
1020 | current_buf_start < start_byte) { | ||
1021 | buf_offset = start_byte - buf_start; | ||
1022 | working_bytes = total_out - start_byte; | ||
1023 | current_buf_start = buf_start + buf_offset; | ||
1024 | } | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | return 1; | ||
694 | } | 1029 | } |