diff options
-rw-r--r-- | fs/btrfs/btrfs_inode.h | 2 | ||||
-rw-r--r-- | fs/btrfs/compression.c | 236 | ||||
-rw-r--r-- | fs/btrfs/compression.h | 66 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 10 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 5 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 17 | ||||
-rw-r--r-- | fs/btrfs/extent_map.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent_map.h | 3 | ||||
-rw-r--r-- | fs/btrfs/file.c | 2 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 82 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 4 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.c | 18 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.h | 8 | ||||
-rw-r--r-- | fs/btrfs/super.c | 47 | ||||
-rw-r--r-- | fs/btrfs/zlib.c | 253 |
15 files changed, 473 insertions, 282 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 6ad63f17eca0..ccc991c542df 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -157,7 +157,7 @@ struct btrfs_inode { | |||
157 | /* | 157 | /* |
158 | * always compress this one file | 158 | * always compress this one file |
159 | */ | 159 | */ |
160 | unsigned force_compress:1; | 160 | unsigned force_compress:4; |
161 | 161 | ||
162 | struct inode vfs_inode; | 162 | struct inode vfs_inode; |
163 | }; | 163 | }; |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b50bc4bd5c56..6638c9877720 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -62,6 +62,9 @@ struct compressed_bio { | |||
62 | /* number of bytes on disk */ | 62 | /* number of bytes on disk */ |
63 | unsigned long compressed_len; | 63 | unsigned long compressed_len; |
64 | 64 | ||
65 | /* the compression algorithm for this bio */ | ||
66 | int compress_type; | ||
67 | |||
65 | /* number of compressed pages in the array */ | 68 | /* number of compressed pages in the array */ |
66 | unsigned long nr_pages; | 69 | unsigned long nr_pages; |
67 | 70 | ||
@@ -173,11 +176,12 @@ static void end_compressed_bio_read(struct bio *bio, int err) | |||
173 | /* ok, we're the last bio for this extent, lets start | 176 | /* ok, we're the last bio for this extent, lets start |
174 | * the decompression. | 177 | * the decompression. |
175 | */ | 178 | */ |
176 | ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, | 179 | ret = btrfs_decompress_biovec(cb->compress_type, |
177 | cb->start, | 180 | cb->compressed_pages, |
178 | cb->orig_bio->bi_io_vec, | 181 | cb->start, |
179 | cb->orig_bio->bi_vcnt, | 182 | cb->orig_bio->bi_io_vec, |
180 | cb->compressed_len); | 183 | cb->orig_bio->bi_vcnt, |
184 | cb->compressed_len); | ||
181 | csum_failed: | 185 | csum_failed: |
182 | if (ret) | 186 | if (ret) |
183 | cb->errors = 1; | 187 | cb->errors = 1; |
@@ -588,6 +592,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
588 | 592 | ||
589 | cb->len = uncompressed_len; | 593 | cb->len = uncompressed_len; |
590 | cb->compressed_len = compressed_len; | 594 | cb->compressed_len = compressed_len; |
595 | cb->compress_type = extent_compress_type(bio_flags); | ||
591 | cb->orig_bio = bio; | 596 | cb->orig_bio = bio; |
592 | 597 | ||
593 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / | 598 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / |
@@ -677,3 +682,224 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
677 | bio_put(comp_bio); | 682 | bio_put(comp_bio); |
678 | return 0; | 683 | return 0; |
679 | } | 684 | } |
685 | |||
686 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; | ||
687 | static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; | ||
688 | static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; | ||
689 | static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; | ||
690 | static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; | ||
691 | |||
692 | struct btrfs_compress_op *btrfs_compress_op[] = { | ||
693 | &btrfs_zlib_compress, | ||
694 | }; | ||
695 | |||
696 | int __init btrfs_init_compress(void) | ||
697 | { | ||
698 | int i; | ||
699 | |||
700 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { | ||
701 | INIT_LIST_HEAD(&comp_idle_workspace[i]); | ||
702 | spin_lock_init(&comp_workspace_lock[i]); | ||
703 | atomic_set(&comp_alloc_workspace[i], 0); | ||
704 | init_waitqueue_head(&comp_workspace_wait[i]); | ||
705 | } | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * this finds an available workspace or allocates a new one | ||
711 | * ERR_PTR is returned if things go bad. | ||
712 | */ | ||
713 | static struct list_head *find_workspace(int type) | ||
714 | { | ||
715 | struct list_head *workspace; | ||
716 | int cpus = num_online_cpus(); | ||
717 | int idx = type - 1; | ||
718 | |||
719 | struct list_head *idle_workspace = &comp_idle_workspace[idx]; | ||
720 | spinlock_t *workspace_lock = &comp_workspace_lock[idx]; | ||
721 | atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; | ||
722 | wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; | ||
723 | int *num_workspace = &comp_num_workspace[idx]; | ||
724 | again: | ||
725 | spin_lock(workspace_lock); | ||
726 | if (!list_empty(idle_workspace)) { | ||
727 | workspace = idle_workspace->next; | ||
728 | list_del(workspace); | ||
729 | (*num_workspace)--; | ||
730 | spin_unlock(workspace_lock); | ||
731 | return workspace; | ||
732 | |||
733 | } | ||
734 | if (atomic_read(alloc_workspace) > cpus) { | ||
735 | DEFINE_WAIT(wait); | ||
736 | |||
737 | spin_unlock(workspace_lock); | ||
738 | prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
739 | if (atomic_read(alloc_workspace) > cpus && !*num_workspace) | ||
740 | schedule(); | ||
741 | finish_wait(workspace_wait, &wait); | ||
742 | goto again; | ||
743 | } | ||
744 | atomic_inc(alloc_workspace); | ||
745 | spin_unlock(workspace_lock); | ||
746 | |||
747 | workspace = btrfs_compress_op[idx]->alloc_workspace(); | ||
748 | if (IS_ERR(workspace)) { | ||
749 | atomic_dec(alloc_workspace); | ||
750 | wake_up(workspace_wait); | ||
751 | } | ||
752 | return workspace; | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * put a workspace struct back on the list or free it if we have enough | ||
757 | * idle ones sitting around | ||
758 | */ | ||
759 | static void free_workspace(int type, struct list_head *workspace) | ||
760 | { | ||
761 | int idx = type - 1; | ||
762 | struct list_head *idle_workspace = &comp_idle_workspace[idx]; | ||
763 | spinlock_t *workspace_lock = &comp_workspace_lock[idx]; | ||
764 | atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; | ||
765 | wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; | ||
766 | int *num_workspace = &comp_num_workspace[idx]; | ||
767 | |||
768 | spin_lock(workspace_lock); | ||
769 | if (*num_workspace < num_online_cpus()) { | ||
770 | list_add_tail(workspace, idle_workspace); | ||
771 | (*num_workspace)++; | ||
772 | spin_unlock(workspace_lock); | ||
773 | goto wake; | ||
774 | } | ||
775 | spin_unlock(workspace_lock); | ||
776 | |||
777 | btrfs_compress_op[idx]->free_workspace(workspace); | ||
778 | atomic_dec(alloc_workspace); | ||
779 | wake: | ||
780 | if (waitqueue_active(workspace_wait)) | ||
781 | wake_up(workspace_wait); | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * cleanup function for module exit | ||
786 | */ | ||
787 | static void free_workspaces(void) | ||
788 | { | ||
789 | struct list_head *workspace; | ||
790 | int i; | ||
791 | |||
792 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { | ||
793 | while (!list_empty(&comp_idle_workspace[i])) { | ||
794 | workspace = comp_idle_workspace[i].next; | ||
795 | list_del(workspace); | ||
796 | btrfs_compress_op[i]->free_workspace(workspace); | ||
797 | atomic_dec(&comp_alloc_workspace[i]); | ||
798 | } | ||
799 | } | ||
800 | } | ||
801 | |||
802 | /* | ||
803 | * given an address space and start/len, compress the bytes. | ||
804 | * | ||
805 | * pages are allocated to hold the compressed result and stored | ||
806 | * in 'pages' | ||
807 | * | ||
808 | * out_pages is used to return the number of pages allocated. There | ||
809 | * may be pages allocated even if we return an error | ||
810 | * | ||
811 | * total_in is used to return the number of bytes actually read. It | ||
812 | * may be smaller then len if we had to exit early because we | ||
813 | * ran out of room in the pages array or because we cross the | ||
814 | * max_out threshold. | ||
815 | * | ||
816 | * total_out is used to return the total number of compressed bytes | ||
817 | * | ||
818 | * max_out tells us the max number of bytes that we're allowed to | ||
819 | * stuff into pages | ||
820 | */ | ||
821 | int btrfs_compress_pages(int type, struct address_space *mapping, | ||
822 | u64 start, unsigned long len, | ||
823 | struct page **pages, | ||
824 | unsigned long nr_dest_pages, | ||
825 | unsigned long *out_pages, | ||
826 | unsigned long *total_in, | ||
827 | unsigned long *total_out, | ||
828 | unsigned long max_out) | ||
829 | { | ||
830 | struct list_head *workspace; | ||
831 | int ret; | ||
832 | |||
833 | workspace = find_workspace(type); | ||
834 | if (IS_ERR(workspace)) | ||
835 | return -1; | ||
836 | |||
837 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, | ||
838 | start, len, pages, | ||
839 | nr_dest_pages, out_pages, | ||
840 | total_in, total_out, | ||
841 | max_out); | ||
842 | free_workspace(type, workspace); | ||
843 | return ret; | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * pages_in is an array of pages with compressed data. | ||
848 | * | ||
849 | * disk_start is the starting logical offset of this array in the file | ||
850 | * | ||
851 | * bvec is a bio_vec of pages from the file that we want to decompress into | ||
852 | * | ||
853 | * vcnt is the count of pages in the biovec | ||
854 | * | ||
855 | * srclen is the number of bytes in pages_in | ||
856 | * | ||
857 | * The basic idea is that we have a bio that was created by readpages. | ||
858 | * The pages in the bio are for the uncompressed data, and they may not | ||
859 | * be contiguous. They all correspond to the range of bytes covered by | ||
860 | * the compressed extent. | ||
861 | */ | ||
862 | int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, | ||
863 | struct bio_vec *bvec, int vcnt, size_t srclen) | ||
864 | { | ||
865 | struct list_head *workspace; | ||
866 | int ret; | ||
867 | |||
868 | workspace = find_workspace(type); | ||
869 | if (IS_ERR(workspace)) | ||
870 | return -ENOMEM; | ||
871 | |||
872 | ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, | ||
873 | disk_start, | ||
874 | bvec, vcnt, srclen); | ||
875 | free_workspace(type, workspace); | ||
876 | return ret; | ||
877 | } | ||
878 | |||
879 | /* | ||
880 | * a less complex decompression routine. Our compressed data fits in a | ||
881 | * single page, and we want to read a single page out of it. | ||
882 | * start_byte tells us the offset into the compressed data we're interested in | ||
883 | */ | ||
884 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | ||
885 | unsigned long start_byte, size_t srclen, size_t destlen) | ||
886 | { | ||
887 | struct list_head *workspace; | ||
888 | int ret; | ||
889 | |||
890 | workspace = find_workspace(type); | ||
891 | if (IS_ERR(workspace)) | ||
892 | return -ENOMEM; | ||
893 | |||
894 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, | ||
895 | dest_page, start_byte, | ||
896 | srclen, destlen); | ||
897 | |||
898 | free_workspace(type, workspace); | ||
899 | return ret; | ||
900 | } | ||
901 | |||
902 | void __exit btrfs_exit_compress(void) | ||
903 | { | ||
904 | free_workspaces(); | ||
905 | } | ||
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 421f5b4aa715..9b5f2f365b79 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h | |||
@@ -19,24 +19,22 @@ | |||
19 | #ifndef __BTRFS_COMPRESSION_ | 19 | #ifndef __BTRFS_COMPRESSION_ |
20 | #define __BTRFS_COMPRESSION_ | 20 | #define __BTRFS_COMPRESSION_ |
21 | 21 | ||
22 | int btrfs_zlib_decompress(unsigned char *data_in, | 22 | int btrfs_init_compress(void); |
23 | struct page *dest_page, | 23 | void btrfs_exit_compress(void); |
24 | unsigned long start_byte, | 24 | |
25 | size_t srclen, size_t destlen); | 25 | int btrfs_compress_pages(int type, struct address_space *mapping, |
26 | int btrfs_zlib_compress_pages(struct address_space *mapping, | 26 | u64 start, unsigned long len, |
27 | u64 start, unsigned long len, | 27 | struct page **pages, |
28 | struct page **pages, | 28 | unsigned long nr_dest_pages, |
29 | unsigned long nr_dest_pages, | 29 | unsigned long *out_pages, |
30 | unsigned long *out_pages, | 30 | unsigned long *total_in, |
31 | unsigned long *total_in, | 31 | unsigned long *total_out, |
32 | unsigned long *total_out, | 32 | unsigned long max_out); |
33 | unsigned long max_out); | 33 | int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, |
34 | int btrfs_zlib_decompress_biovec(struct page **pages_in, | 34 | struct bio_vec *bvec, int vcnt, size_t srclen); |
35 | u64 disk_start, | 35 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, |
36 | struct bio_vec *bvec, | 36 | unsigned long start_byte, size_t srclen, size_t destlen); |
37 | int vcnt, | 37 | |
38 | size_t srclen); | ||
39 | void btrfs_zlib_exit(void); | ||
40 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, | 38 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, |
41 | unsigned long len, u64 disk_start, | 39 | unsigned long len, u64 disk_start, |
42 | unsigned long compressed_len, | 40 | unsigned long compressed_len, |
@@ -44,4 +42,36 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
44 | unsigned long nr_pages); | 42 | unsigned long nr_pages); |
45 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | 43 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
46 | int mirror_num, unsigned long bio_flags); | 44 | int mirror_num, unsigned long bio_flags); |
45 | |||
46 | struct btrfs_compress_op { | ||
47 | struct list_head *(*alloc_workspace)(void); | ||
48 | |||
49 | void (*free_workspace)(struct list_head *workspace); | ||
50 | |||
51 | int (*compress_pages)(struct list_head *workspace, | ||
52 | struct address_space *mapping, | ||
53 | u64 start, unsigned long len, | ||
54 | struct page **pages, | ||
55 | unsigned long nr_dest_pages, | ||
56 | unsigned long *out_pages, | ||
57 | unsigned long *total_in, | ||
58 | unsigned long *total_out, | ||
59 | unsigned long max_out); | ||
60 | |||
61 | int (*decompress_biovec)(struct list_head *workspace, | ||
62 | struct page **pages_in, | ||
63 | u64 disk_start, | ||
64 | struct bio_vec *bvec, | ||
65 | int vcnt, | ||
66 | size_t srclen); | ||
67 | |||
68 | int (*decompress)(struct list_head *workspace, | ||
69 | unsigned char *data_in, | ||
70 | struct page *dest_page, | ||
71 | unsigned long start_byte, | ||
72 | size_t srclen, size_t destlen); | ||
73 | }; | ||
74 | |||
75 | extern struct btrfs_compress_op btrfs_zlib_compress; | ||
76 | |||
47 | #endif | 77 | #endif |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..e06534438592 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -551,9 +551,10 @@ struct btrfs_timespec { | |||
551 | } __attribute__ ((__packed__)); | 551 | } __attribute__ ((__packed__)); |
552 | 552 | ||
553 | enum btrfs_compression_type { | 553 | enum btrfs_compression_type { |
554 | BTRFS_COMPRESS_NONE = 0, | 554 | BTRFS_COMPRESS_NONE = 0, |
555 | BTRFS_COMPRESS_ZLIB = 1, | 555 | BTRFS_COMPRESS_ZLIB = 1, |
556 | BTRFS_COMPRESS_LAST = 2, | 556 | BTRFS_COMPRESS_TYPES = 1, |
557 | BTRFS_COMPRESS_LAST = 2, | ||
557 | }; | 558 | }; |
558 | 559 | ||
559 | struct btrfs_inode_item { | 560 | struct btrfs_inode_item { |
@@ -895,7 +896,8 @@ struct btrfs_fs_info { | |||
895 | */ | 896 | */ |
896 | u64 last_trans_log_full_commit; | 897 | u64 last_trans_log_full_commit; |
897 | u64 open_ioctl_trans; | 898 | u64 open_ioctl_trans; |
898 | unsigned long mount_opt; | 899 | unsigned long mount_opt:20; |
900 | unsigned long compress_type:4; | ||
899 | u64 max_inline; | 901 | u64 max_inline; |
900 | u64 alloc_start; | 902 | u64 alloc_start; |
901 | struct btrfs_transaction *running_transaction; | 903 | struct btrfs_transaction *running_transaction; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5e7a94d7da89..f1d198128959 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2028,8 +2028,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2028 | BUG_ON(extent_map_end(em) <= cur); | 2028 | BUG_ON(extent_map_end(em) <= cur); |
2029 | BUG_ON(end < cur); | 2029 | BUG_ON(end < cur); |
2030 | 2030 | ||
2031 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) | 2031 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
2032 | this_bio_flag = EXTENT_BIO_COMPRESSED; | 2032 | this_bio_flag = EXTENT_BIO_COMPRESSED; |
2033 | extent_set_compress_type(&this_bio_flag, | ||
2034 | em->compress_type); | ||
2035 | } | ||
2033 | 2036 | ||
2034 | iosize = min(extent_map_end(em) - cur, end - cur + 1); | 2037 | iosize = min(extent_map_end(em) - cur, end - cur + 1); |
2035 | cur_end = min(extent_map_end(em) - 1, end); | 2038 | cur_end = min(extent_map_end(em) - 1, end); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4183c8178f01..7083cfafd061 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -20,8 +20,12 @@ | |||
20 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) | 20 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) |
21 | #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) | 21 | #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) |
22 | 22 | ||
23 | /* flags for bio submission */ | 23 | /* |
24 | * flags for bio submission. The high bits indicate the compression | ||
25 | * type for this bio | ||
26 | */ | ||
24 | #define EXTENT_BIO_COMPRESSED 1 | 27 | #define EXTENT_BIO_COMPRESSED 1 |
28 | #define EXTENT_BIO_FLAG_SHIFT 16 | ||
25 | 29 | ||
26 | /* these are bit numbers for test/set bit */ | 30 | /* these are bit numbers for test/set bit */ |
27 | #define EXTENT_BUFFER_UPTODATE 0 | 31 | #define EXTENT_BUFFER_UPTODATE 0 |
@@ -135,6 +139,17 @@ struct extent_buffer { | |||
135 | wait_queue_head_t lock_wq; | 139 | wait_queue_head_t lock_wq; |
136 | }; | 140 | }; |
137 | 141 | ||
142 | static inline void extent_set_compress_type(unsigned long *bio_flags, | ||
143 | int compress_type) | ||
144 | { | ||
145 | *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT; | ||
146 | } | ||
147 | |||
148 | static inline int extent_compress_type(unsigned long bio_flags) | ||
149 | { | ||
150 | return bio_flags >> EXTENT_BIO_FLAG_SHIFT; | ||
151 | } | ||
152 | |||
138 | struct extent_map_tree; | 153 | struct extent_map_tree; |
139 | 154 | ||
140 | static inline struct extent_state *extent_state_next(struct extent_state *state) | 155 | static inline struct extent_state *extent_state_next(struct extent_state *state) |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 23cb8da3ff66..b0e1fce12530 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/hardirq.h> | 5 | #include <linux/hardirq.h> |
6 | #include "ctree.h" | ||
6 | #include "extent_map.h" | 7 | #include "extent_map.h" |
7 | 8 | ||
8 | 9 | ||
@@ -54,6 +55,7 @@ struct extent_map *alloc_extent_map(gfp_t mask) | |||
54 | return em; | 55 | return em; |
55 | em->in_tree = 0; | 56 | em->in_tree = 0; |
56 | em->flags = 0; | 57 | em->flags = 0; |
58 | em->compress_type = BTRFS_COMPRESS_NONE; | ||
57 | atomic_set(&em->refs, 1); | 59 | atomic_set(&em->refs, 1); |
58 | return em; | 60 | return em; |
59 | } | 61 | } |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index ab6d74b6e647..28b44dbd1e35 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -26,7 +26,8 @@ struct extent_map { | |||
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | struct block_device *bdev; | 27 | struct block_device *bdev; |
28 | atomic_t refs; | 28 | atomic_t refs; |
29 | int in_tree; | 29 | unsigned int in_tree:1; |
30 | unsigned int compress_type:4; | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | struct extent_map_tree { | 33 | struct extent_map_tree { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 66836d85763b..05df688c96f4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -224,6 +224,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
224 | 224 | ||
225 | split->bdev = em->bdev; | 225 | split->bdev = em->bdev; |
226 | split->flags = flags; | 226 | split->flags = flags; |
227 | split->compress_type = em->compress_type; | ||
227 | ret = add_extent_mapping(em_tree, split); | 228 | ret = add_extent_mapping(em_tree, split); |
228 | BUG_ON(ret); | 229 | BUG_ON(ret); |
229 | free_extent_map(split); | 230 | free_extent_map(split); |
@@ -238,6 +239,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
238 | split->len = em->start + em->len - (start + len); | 239 | split->len = em->start + em->len - (start + len); |
239 | split->bdev = em->bdev; | 240 | split->bdev = em->bdev; |
240 | split->flags = flags; | 241 | split->flags = flags; |
242 | split->compress_type = em->compress_type; | ||
241 | 243 | ||
242 | if (compressed) { | 244 | if (compressed) { |
243 | split->block_len = em->block_len; | 245 | split->block_len = em->block_len; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..ba563b2a5d6c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -122,10 +122,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |||
122 | size_t cur_size = size; | 122 | size_t cur_size = size; |
123 | size_t datasize; | 123 | size_t datasize; |
124 | unsigned long offset; | 124 | unsigned long offset; |
125 | int use_compress = 0; | 125 | int compress_type = BTRFS_COMPRESS_NONE; |
126 | 126 | ||
127 | if (compressed_size && compressed_pages) { | 127 | if (compressed_size && compressed_pages) { |
128 | use_compress = 1; | 128 | compress_type = root->fs_info->compress_type; |
129 | cur_size = compressed_size; | 129 | cur_size = compressed_size; |
130 | } | 130 | } |
131 | 131 | ||
@@ -159,7 +159,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |||
159 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | 159 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); |
160 | ptr = btrfs_file_extent_inline_start(ei); | 160 | ptr = btrfs_file_extent_inline_start(ei); |
161 | 161 | ||
162 | if (use_compress) { | 162 | if (compress_type != BTRFS_COMPRESS_NONE) { |
163 | struct page *cpage; | 163 | struct page *cpage; |
164 | int i = 0; | 164 | int i = 0; |
165 | while (compressed_size > 0) { | 165 | while (compressed_size > 0) { |
@@ -176,7 +176,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |||
176 | compressed_size -= cur_size; | 176 | compressed_size -= cur_size; |
177 | } | 177 | } |
178 | btrfs_set_file_extent_compression(leaf, ei, | 178 | btrfs_set_file_extent_compression(leaf, ei, |
179 | BTRFS_COMPRESS_ZLIB); | 179 | compress_type); |
180 | } else { | 180 | } else { |
181 | page = find_get_page(inode->i_mapping, | 181 | page = find_get_page(inode->i_mapping, |
182 | start >> PAGE_CACHE_SHIFT); | 182 | start >> PAGE_CACHE_SHIFT); |
@@ -263,6 +263,7 @@ struct async_extent { | |||
263 | u64 compressed_size; | 263 | u64 compressed_size; |
264 | struct page **pages; | 264 | struct page **pages; |
265 | unsigned long nr_pages; | 265 | unsigned long nr_pages; |
266 | int compress_type; | ||
266 | struct list_head list; | 267 | struct list_head list; |
267 | }; | 268 | }; |
268 | 269 | ||
@@ -280,7 +281,8 @@ static noinline int add_async_extent(struct async_cow *cow, | |||
280 | u64 start, u64 ram_size, | 281 | u64 start, u64 ram_size, |
281 | u64 compressed_size, | 282 | u64 compressed_size, |
282 | struct page **pages, | 283 | struct page **pages, |
283 | unsigned long nr_pages) | 284 | unsigned long nr_pages, |
285 | int compress_type) | ||
284 | { | 286 | { |
285 | struct async_extent *async_extent; | 287 | struct async_extent *async_extent; |
286 | 288 | ||
@@ -290,6 +292,7 @@ static noinline int add_async_extent(struct async_cow *cow, | |||
290 | async_extent->compressed_size = compressed_size; | 292 | async_extent->compressed_size = compressed_size; |
291 | async_extent->pages = pages; | 293 | async_extent->pages = pages; |
292 | async_extent->nr_pages = nr_pages; | 294 | async_extent->nr_pages = nr_pages; |
295 | async_extent->compress_type = compress_type; | ||
293 | list_add_tail(&async_extent->list, &cow->extents); | 296 | list_add_tail(&async_extent->list, &cow->extents); |
294 | return 0; | 297 | return 0; |
295 | } | 298 | } |
@@ -332,6 +335,7 @@ static noinline int compress_file_range(struct inode *inode, | |||
332 | unsigned long max_uncompressed = 128 * 1024; | 335 | unsigned long max_uncompressed = 128 * 1024; |
333 | int i; | 336 | int i; |
334 | int will_compress; | 337 | int will_compress; |
338 | int compress_type = root->fs_info->compress_type; | ||
335 | 339 | ||
336 | actual_end = min_t(u64, isize, end + 1); | 340 | actual_end = min_t(u64, isize, end + 1); |
337 | again: | 341 | again: |
@@ -381,12 +385,16 @@ again: | |||
381 | WARN_ON(pages); | 385 | WARN_ON(pages); |
382 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); | 386 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
383 | 387 | ||
384 | ret = btrfs_zlib_compress_pages(inode->i_mapping, start, | 388 | if (BTRFS_I(inode)->force_compress) |
385 | total_compressed, pages, | 389 | compress_type = BTRFS_I(inode)->force_compress; |
386 | nr_pages, &nr_pages_ret, | 390 | |
387 | &total_in, | 391 | ret = btrfs_compress_pages(compress_type, |
388 | &total_compressed, | 392 | inode->i_mapping, start, |
389 | max_compressed); | 393 | total_compressed, pages, |
394 | nr_pages, &nr_pages_ret, | ||
395 | &total_in, | ||
396 | &total_compressed, | ||
397 | max_compressed); | ||
390 | 398 | ||
391 | if (!ret) { | 399 | if (!ret) { |
392 | unsigned long offset = total_compressed & | 400 | unsigned long offset = total_compressed & |
@@ -493,7 +501,8 @@ again: | |||
493 | * and will submit them to the elevator. | 501 | * and will submit them to the elevator. |
494 | */ | 502 | */ |
495 | add_async_extent(async_cow, start, num_bytes, | 503 | add_async_extent(async_cow, start, num_bytes, |
496 | total_compressed, pages, nr_pages_ret); | 504 | total_compressed, pages, nr_pages_ret, |
505 | compress_type); | ||
497 | 506 | ||
498 | if (start + num_bytes < end) { | 507 | if (start + num_bytes < end) { |
499 | start += num_bytes; | 508 | start += num_bytes; |
@@ -515,7 +524,8 @@ cleanup_and_bail_uncompressed: | |||
515 | __set_page_dirty_nobuffers(locked_page); | 524 | __set_page_dirty_nobuffers(locked_page); |
516 | /* unlocked later on in the async handlers */ | 525 | /* unlocked later on in the async handlers */ |
517 | } | 526 | } |
518 | add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); | 527 | add_async_extent(async_cow, start, end - start + 1, |
528 | 0, NULL, 0, BTRFS_COMPRESS_NONE); | ||
519 | *num_added += 1; | 529 | *num_added += 1; |
520 | } | 530 | } |
521 | 531 | ||
@@ -640,6 +650,7 @@ retry: | |||
640 | em->block_start = ins.objectid; | 650 | em->block_start = ins.objectid; |
641 | em->block_len = ins.offset; | 651 | em->block_len = ins.offset; |
642 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 652 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
653 | em->compress_type = async_extent->compress_type; | ||
643 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | 654 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
644 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | 655 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
645 | 656 | ||
@@ -656,11 +667,13 @@ retry: | |||
656 | async_extent->ram_size - 1, 0); | 667 | async_extent->ram_size - 1, 0); |
657 | } | 668 | } |
658 | 669 | ||
659 | ret = btrfs_add_ordered_extent(inode, async_extent->start, | 670 | ret = btrfs_add_ordered_extent_compress(inode, |
660 | ins.objectid, | 671 | async_extent->start, |
661 | async_extent->ram_size, | 672 | ins.objectid, |
662 | ins.offset, | 673 | async_extent->ram_size, |
663 | BTRFS_ORDERED_COMPRESSED); | 674 | ins.offset, |
675 | BTRFS_ORDERED_COMPRESSED, | ||
676 | async_extent->compress_type); | ||
664 | BUG_ON(ret); | 677 | BUG_ON(ret); |
665 | 678 | ||
666 | /* | 679 | /* |
@@ -1670,7 +1683,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1670 | struct btrfs_ordered_extent *ordered_extent = NULL; | 1683 | struct btrfs_ordered_extent *ordered_extent = NULL; |
1671 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 1684 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
1672 | struct extent_state *cached_state = NULL; | 1685 | struct extent_state *cached_state = NULL; |
1673 | int compressed = 0; | 1686 | int compress_type = 0; |
1674 | int ret; | 1687 | int ret; |
1675 | bool nolock = false; | 1688 | bool nolock = false; |
1676 | 1689 | ||
@@ -1711,9 +1724,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1711 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1724 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1712 | 1725 | ||
1713 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) | 1726 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
1714 | compressed = 1; | 1727 | compress_type = ordered_extent->compress_type; |
1715 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { | 1728 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
1716 | BUG_ON(compressed); | 1729 | BUG_ON(compress_type); |
1717 | ret = btrfs_mark_extent_written(trans, inode, | 1730 | ret = btrfs_mark_extent_written(trans, inode, |
1718 | ordered_extent->file_offset, | 1731 | ordered_extent->file_offset, |
1719 | ordered_extent->file_offset + | 1732 | ordered_extent->file_offset + |
@@ -1727,7 +1740,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1727 | ordered_extent->disk_len, | 1740 | ordered_extent->disk_len, |
1728 | ordered_extent->len, | 1741 | ordered_extent->len, |
1729 | ordered_extent->len, | 1742 | ordered_extent->len, |
1730 | compressed, 0, 0, | 1743 | compress_type, 0, 0, |
1731 | BTRFS_FILE_EXTENT_REG); | 1744 | BTRFS_FILE_EXTENT_REG); |
1732 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | 1745 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
1733 | ordered_extent->file_offset, | 1746 | ordered_extent->file_offset, |
@@ -1829,6 +1842,8 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1829 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | 1842 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
1830 | logical = em->block_start; | 1843 | logical = em->block_start; |
1831 | failrec->bio_flags = EXTENT_BIO_COMPRESSED; | 1844 | failrec->bio_flags = EXTENT_BIO_COMPRESSED; |
1845 | extent_set_compress_type(&failrec->bio_flags, | ||
1846 | em->compress_type); | ||
1832 | } | 1847 | } |
1833 | failrec->logical = logical; | 1848 | failrec->logical = logical; |
1834 | free_extent_map(em); | 1849 | free_extent_map(em); |
@@ -4930,8 +4945,10 @@ static noinline int uncompress_inline(struct btrfs_path *path, | |||
4930 | size_t max_size; | 4945 | size_t max_size; |
4931 | unsigned long inline_size; | 4946 | unsigned long inline_size; |
4932 | unsigned long ptr; | 4947 | unsigned long ptr; |
4948 | int compress_type; | ||
4933 | 4949 | ||
4934 | WARN_ON(pg_offset != 0); | 4950 | WARN_ON(pg_offset != 0); |
4951 | compress_type = btrfs_file_extent_compression(leaf, item); | ||
4935 | max_size = btrfs_file_extent_ram_bytes(leaf, item); | 4952 | max_size = btrfs_file_extent_ram_bytes(leaf, item); |
4936 | inline_size = btrfs_file_extent_inline_item_len(leaf, | 4953 | inline_size = btrfs_file_extent_inline_item_len(leaf, |
4937 | btrfs_item_nr(leaf, path->slots[0])); | 4954 | btrfs_item_nr(leaf, path->slots[0])); |
@@ -4941,8 +4958,8 @@ static noinline int uncompress_inline(struct btrfs_path *path, | |||
4941 | read_extent_buffer(leaf, tmp, ptr, inline_size); | 4958 | read_extent_buffer(leaf, tmp, ptr, inline_size); |
4942 | 4959 | ||
4943 | max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); | 4960 | max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); |
4944 | ret = btrfs_zlib_decompress(tmp, page, extent_offset, | 4961 | ret = btrfs_decompress(compress_type, tmp, page, |
4945 | inline_size, max_size); | 4962 | extent_offset, inline_size, max_size); |
4946 | if (ret) { | 4963 | if (ret) { |
4947 | char *kaddr = kmap_atomic(page, KM_USER0); | 4964 | char *kaddr = kmap_atomic(page, KM_USER0); |
4948 | unsigned long copy_size = min_t(u64, | 4965 | unsigned long copy_size = min_t(u64, |
@@ -4984,7 +5001,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | |||
4984 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 5001 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
4985 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 5002 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
4986 | struct btrfs_trans_handle *trans = NULL; | 5003 | struct btrfs_trans_handle *trans = NULL; |
4987 | int compressed; | 5004 | int compress_type; |
4988 | 5005 | ||
4989 | again: | 5006 | again: |
4990 | read_lock(&em_tree->lock); | 5007 | read_lock(&em_tree->lock); |
@@ -5043,7 +5060,7 @@ again: | |||
5043 | 5060 | ||
5044 | found_type = btrfs_file_extent_type(leaf, item); | 5061 | found_type = btrfs_file_extent_type(leaf, item); |
5045 | extent_start = found_key.offset; | 5062 | extent_start = found_key.offset; |
5046 | compressed = btrfs_file_extent_compression(leaf, item); | 5063 | compress_type = btrfs_file_extent_compression(leaf, item); |
5047 | if (found_type == BTRFS_FILE_EXTENT_REG || | 5064 | if (found_type == BTRFS_FILE_EXTENT_REG || |
5048 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | 5065 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { |
5049 | extent_end = extent_start + | 5066 | extent_end = extent_start + |
@@ -5089,8 +5106,9 @@ again: | |||
5089 | em->block_start = EXTENT_MAP_HOLE; | 5106 | em->block_start = EXTENT_MAP_HOLE; |
5090 | goto insert; | 5107 | goto insert; |
5091 | } | 5108 | } |
5092 | if (compressed) { | 5109 | if (compress_type != BTRFS_COMPRESS_NONE) { |
5093 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | 5110 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
5111 | em->compress_type = compress_type; | ||
5094 | em->block_start = bytenr; | 5112 | em->block_start = bytenr; |
5095 | em->block_len = btrfs_file_extent_disk_num_bytes(leaf, | 5113 | em->block_len = btrfs_file_extent_disk_num_bytes(leaf, |
5096 | item); | 5114 | item); |
@@ -5124,12 +5142,14 @@ again: | |||
5124 | em->len = (copy_size + root->sectorsize - 1) & | 5142 | em->len = (copy_size + root->sectorsize - 1) & |
5125 | ~((u64)root->sectorsize - 1); | 5143 | ~((u64)root->sectorsize - 1); |
5126 | em->orig_start = EXTENT_MAP_INLINE; | 5144 | em->orig_start = EXTENT_MAP_INLINE; |
5127 | if (compressed) | 5145 | if (compress_type) { |
5128 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | 5146 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
5147 | em->compress_type = compress_type; | ||
5148 | } | ||
5129 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; | 5149 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; |
5130 | if (create == 0 && !PageUptodate(page)) { | 5150 | if (create == 0 && !PageUptodate(page)) { |
5131 | if (btrfs_file_extent_compression(leaf, item) == | 5151 | if (btrfs_file_extent_compression(leaf, item) != |
5132 | BTRFS_COMPRESS_ZLIB) { | 5152 | BTRFS_COMPRESS_NONE) { |
5133 | ret = uncompress_inline(path, inode, page, | 5153 | ret = uncompress_inline(path, inode, page, |
5134 | pg_offset, | 5154 | pg_offset, |
5135 | extent_offset, item); | 5155 | extent_offset, item); |
@@ -6479,7 +6499,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) | |||
6479 | ei->ordered_data_close = 0; | 6499 | ei->ordered_data_close = 0; |
6480 | ei->orphan_meta_reserved = 0; | 6500 | ei->orphan_meta_reserved = 0; |
6481 | ei->dummy_inode = 0; | 6501 | ei->dummy_inode = 0; |
6482 | ei->force_compress = 0; | 6502 | ei->force_compress = BTRFS_COMPRESS_NONE; |
6483 | 6503 | ||
6484 | inode = &ei->vfs_inode; | 6504 | inode = &ei->vfs_inode; |
6485 | extent_map_tree_init(&ei->extent_tree, GFP_NOFS); | 6505 | extent_map_tree_init(&ei->extent_tree, GFP_NOFS); |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..8cb86d4d763c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -683,7 +683,7 @@ static int btrfs_defrag_file(struct file *file, | |||
683 | total_read++; | 683 | total_read++; |
684 | mutex_lock(&inode->i_mutex); | 684 | mutex_lock(&inode->i_mutex); |
685 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) | 685 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) |
686 | BTRFS_I(inode)->force_compress = 1; | 686 | BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_ZLIB; |
687 | 687 | ||
688 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | 688 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
689 | if (ret) | 689 | if (ret) |
@@ -781,7 +781,7 @@ loop_unlock: | |||
781 | atomic_dec(&root->fs_info->async_submit_draining); | 781 | atomic_dec(&root->fs_info->async_submit_draining); |
782 | 782 | ||
783 | mutex_lock(&inode->i_mutex); | 783 | mutex_lock(&inode->i_mutex); |
784 | BTRFS_I(inode)->force_compress = 0; | 784 | BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; |
785 | mutex_unlock(&inode->i_mutex); | 785 | mutex_unlock(&inode->i_mutex); |
786 | } | 786 | } |
787 | 787 | ||
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index ae7737e352c9..2b61e1ddcd99 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -172,7 +172,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, | |||
172 | */ | 172 | */ |
173 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | 173 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
174 | u64 start, u64 len, u64 disk_len, | 174 | u64 start, u64 len, u64 disk_len, |
175 | int type, int dio) | 175 | int type, int dio, int compress_type) |
176 | { | 176 | { |
177 | struct btrfs_ordered_inode_tree *tree; | 177 | struct btrfs_ordered_inode_tree *tree; |
178 | struct rb_node *node; | 178 | struct rb_node *node; |
@@ -189,6 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
189 | entry->disk_len = disk_len; | 189 | entry->disk_len = disk_len; |
190 | entry->bytes_left = len; | 190 | entry->bytes_left = len; |
191 | entry->inode = inode; | 191 | entry->inode = inode; |
192 | entry->compress_type = compress_type; | ||
192 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) | 193 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
193 | set_bit(type, &entry->flags); | 194 | set_bit(type, &entry->flags); |
194 | 195 | ||
@@ -220,14 +221,25 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
220 | u64 start, u64 len, u64 disk_len, int type) | 221 | u64 start, u64 len, u64 disk_len, int type) |
221 | { | 222 | { |
222 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | 223 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
223 | disk_len, type, 0); | 224 | disk_len, type, 0, |
225 | BTRFS_COMPRESS_NONE); | ||
224 | } | 226 | } |
225 | 227 | ||
226 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | 228 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, |
227 | u64 start, u64 len, u64 disk_len, int type) | 229 | u64 start, u64 len, u64 disk_len, int type) |
228 | { | 230 | { |
229 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | 231 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
230 | disk_len, type, 1); | 232 | disk_len, type, 1, |
233 | BTRFS_COMPRESS_NONE); | ||
234 | } | ||
235 | |||
236 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | ||
237 | u64 start, u64 len, u64 disk_len, | ||
238 | int type, int compress_type) | ||
239 | { | ||
240 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | ||
241 | disk_len, type, 0, | ||
242 | compress_type); | ||
231 | } | 243 | } |
232 | 244 | ||
233 | /* | 245 | /* |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 61dca83119dd..ff1f69aa1883 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
@@ -68,7 +68,7 @@ struct btrfs_ordered_sum { | |||
68 | 68 | ||
69 | #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ | 69 | #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ |
70 | 70 | ||
71 | #define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ | 71 | #define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ |
72 | 72 | ||
73 | #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ | 73 | #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ |
74 | 74 | ||
@@ -93,6 +93,9 @@ struct btrfs_ordered_extent { | |||
93 | /* flags (described above) */ | 93 | /* flags (described above) */ |
94 | unsigned long flags; | 94 | unsigned long flags; |
95 | 95 | ||
96 | /* compression algorithm */ | ||
97 | int compress_type; | ||
98 | |||
96 | /* reference count */ | 99 | /* reference count */ |
97 | atomic_t refs; | 100 | atomic_t refs; |
98 | 101 | ||
@@ -148,6 +151,9 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
148 | u64 start, u64 len, u64 disk_len, int type); | 151 | u64 start, u64 len, u64 disk_len, int type); |
149 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | 152 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, |
150 | u64 start, u64 len, u64 disk_len, int type); | 153 | u64 start, u64 len, u64 disk_len, int type); |
154 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | ||
155 | u64 start, u64 len, u64 disk_len, | ||
156 | int type, int compress_type); | ||
151 | int btrfs_add_ordered_sum(struct inode *inode, | 157 | int btrfs_add_ordered_sum(struct inode *inode, |
152 | struct btrfs_ordered_extent *entry, | 158 | struct btrfs_ordered_extent *entry, |
153 | struct btrfs_ordered_sum *sum); | 159 | struct btrfs_ordered_sum *sum); |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61bd79abb805..f348f2b93164 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -69,9 +69,9 @@ enum { | |||
69 | Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, | 69 | Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, |
70 | Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, | 70 | Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, |
71 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, | 71 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, |
72 | Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, | 72 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
73 | Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err, | 73 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
74 | Opt_user_subvol_rm_allowed, | 74 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static match_table_t tokens = { | 77 | static match_table_t tokens = { |
@@ -86,7 +86,9 @@ static match_table_t tokens = { | |||
86 | {Opt_alloc_start, "alloc_start=%s"}, | 86 | {Opt_alloc_start, "alloc_start=%s"}, |
87 | {Opt_thread_pool, "thread_pool=%d"}, | 87 | {Opt_thread_pool, "thread_pool=%d"}, |
88 | {Opt_compress, "compress"}, | 88 | {Opt_compress, "compress"}, |
89 | {Opt_compress_type, "compress=%s"}, | ||
89 | {Opt_compress_force, "compress-force"}, | 90 | {Opt_compress_force, "compress-force"}, |
91 | {Opt_compress_force_type, "compress-force=%s"}, | ||
90 | {Opt_ssd, "ssd"}, | 92 | {Opt_ssd, "ssd"}, |
91 | {Opt_ssd_spread, "ssd_spread"}, | 93 | {Opt_ssd_spread, "ssd_spread"}, |
92 | {Opt_nossd, "nossd"}, | 94 | {Opt_nossd, "nossd"}, |
@@ -112,6 +114,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
112 | char *p, *num, *orig; | 114 | char *p, *num, *orig; |
113 | int intarg; | 115 | int intarg; |
114 | int ret = 0; | 116 | int ret = 0; |
117 | char *compress_type; | ||
118 | bool compress_force = false; | ||
115 | 119 | ||
116 | if (!options) | 120 | if (!options) |
117 | return 0; | 121 | return 0; |
@@ -154,14 +158,29 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
154 | btrfs_set_opt(info->mount_opt, NODATACOW); | 158 | btrfs_set_opt(info->mount_opt, NODATACOW); |
155 | btrfs_set_opt(info->mount_opt, NODATASUM); | 159 | btrfs_set_opt(info->mount_opt, NODATASUM); |
156 | break; | 160 | break; |
157 | case Opt_compress: | ||
158 | printk(KERN_INFO "btrfs: use compression\n"); | ||
159 | btrfs_set_opt(info->mount_opt, COMPRESS); | ||
160 | break; | ||
161 | case Opt_compress_force: | 161 | case Opt_compress_force: |
162 | printk(KERN_INFO "btrfs: forcing compression\n"); | 162 | case Opt_compress_force_type: |
163 | btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); | 163 | compress_force = true; |
164 | case Opt_compress: | ||
165 | case Opt_compress_type: | ||
166 | if (token == Opt_compress || | ||
167 | token == Opt_compress_force || | ||
168 | strcmp(args[0].from, "zlib") == 0) { | ||
169 | compress_type = "zlib"; | ||
170 | info->compress_type = BTRFS_COMPRESS_ZLIB; | ||
171 | } else { | ||
172 | ret = -EINVAL; | ||
173 | goto out; | ||
174 | } | ||
175 | |||
164 | btrfs_set_opt(info->mount_opt, COMPRESS); | 176 | btrfs_set_opt(info->mount_opt, COMPRESS); |
177 | if (compress_force) { | ||
178 | btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); | ||
179 | pr_info("btrfs: force %s compression\n", | ||
180 | compress_type); | ||
181 | } else | ||
182 | pr_info("btrfs: use %s compression\n", | ||
183 | compress_type); | ||
165 | break; | 184 | break; |
166 | case Opt_ssd: | 185 | case Opt_ssd: |
167 | printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); | 186 | printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); |
@@ -898,10 +917,14 @@ static int __init init_btrfs_fs(void) | |||
898 | if (err) | 917 | if (err) |
899 | return err; | 918 | return err; |
900 | 919 | ||
901 | err = btrfs_init_cachep(); | 920 | err = btrfs_init_compress(); |
902 | if (err) | 921 | if (err) |
903 | goto free_sysfs; | 922 | goto free_sysfs; |
904 | 923 | ||
924 | err = btrfs_init_cachep(); | ||
925 | if (err) | ||
926 | goto free_compress; | ||
927 | |||
905 | err = extent_io_init(); | 928 | err = extent_io_init(); |
906 | if (err) | 929 | if (err) |
907 | goto free_cachep; | 930 | goto free_cachep; |
@@ -929,6 +952,8 @@ free_extent_io: | |||
929 | extent_io_exit(); | 952 | extent_io_exit(); |
930 | free_cachep: | 953 | free_cachep: |
931 | btrfs_destroy_cachep(); | 954 | btrfs_destroy_cachep(); |
955 | free_compress: | ||
956 | btrfs_exit_compress(); | ||
932 | free_sysfs: | 957 | free_sysfs: |
933 | btrfs_exit_sysfs(); | 958 | btrfs_exit_sysfs(); |
934 | return err; | 959 | return err; |
@@ -943,7 +968,7 @@ static void __exit exit_btrfs_fs(void) | |||
943 | unregister_filesystem(&btrfs_fs_type); | 968 | unregister_filesystem(&btrfs_fs_type); |
944 | btrfs_exit_sysfs(); | 969 | btrfs_exit_sysfs(); |
945 | btrfs_cleanup_fs_uuids(); | 970 | btrfs_cleanup_fs_uuids(); |
946 | btrfs_zlib_exit(); | 971 | btrfs_exit_compress(); |
947 | } | 972 | } |
948 | 973 | ||
949 | module_init(init_btrfs_fs) | 974 | module_init(init_btrfs_fs) |
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b01558661e3b..9a3e693917f2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
@@ -32,15 +32,6 @@ | |||
32 | #include <linux/bio.h> | 32 | #include <linux/bio.h> |
33 | #include "compression.h" | 33 | #include "compression.h" |
34 | 34 | ||
35 | /* Plan: call deflate() with avail_in == *sourcelen, | ||
36 | avail_out = *dstlen - 12 and flush == Z_FINISH. | ||
37 | If it doesn't manage to finish, call it again with | ||
38 | avail_in == 0 and avail_out set to the remaining 12 | ||
39 | bytes for it to clean up. | ||
40 | Q: Is 12 bytes sufficient? | ||
41 | */ | ||
42 | #define STREAM_END_SPACE 12 | ||
43 | |||
44 | struct workspace { | 35 | struct workspace { |
45 | z_stream inf_strm; | 36 | z_stream inf_strm; |
46 | z_stream def_strm; | 37 | z_stream def_strm; |
@@ -48,155 +39,51 @@ struct workspace { | |||
48 | struct list_head list; | 39 | struct list_head list; |
49 | }; | 40 | }; |
50 | 41 | ||
51 | static LIST_HEAD(idle_workspace); | 42 | static void zlib_free_workspace(struct list_head *ws) |
52 | static DEFINE_SPINLOCK(workspace_lock); | 43 | { |
53 | static unsigned long num_workspace; | 44 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
54 | static atomic_t alloc_workspace = ATOMIC_INIT(0); | ||
55 | static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); | ||
56 | 45 | ||
57 | /* | 46 | vfree(workspace->def_strm.workspace); |
58 | * this finds an available zlib workspace or allocates a new one | 47 | vfree(workspace->inf_strm.workspace); |
59 | * NULL or an ERR_PTR is returned if things go bad. | 48 | kfree(workspace->buf); |
60 | */ | 49 | kfree(workspace); |
61 | static struct workspace *find_zlib_workspace(void) | 50 | } |
51 | |||
52 | static struct list_head *zlib_alloc_workspace(void) | ||
62 | { | 53 | { |
63 | struct workspace *workspace; | 54 | struct workspace *workspace; |
64 | int ret; | ||
65 | int cpus = num_online_cpus(); | ||
66 | |||
67 | again: | ||
68 | spin_lock(&workspace_lock); | ||
69 | if (!list_empty(&idle_workspace)) { | ||
70 | workspace = list_entry(idle_workspace.next, struct workspace, | ||
71 | list); | ||
72 | list_del(&workspace->list); | ||
73 | num_workspace--; | ||
74 | spin_unlock(&workspace_lock); | ||
75 | return workspace; | ||
76 | |||
77 | } | ||
78 | if (atomic_read(&alloc_workspace) > cpus) { | ||
79 | DEFINE_WAIT(wait); | ||
80 | |||
81 | spin_unlock(&workspace_lock); | ||
82 | prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
83 | if (atomic_read(&alloc_workspace) > cpus && !num_workspace) | ||
84 | schedule(); | ||
85 | finish_wait(&workspace_wait, &wait); | ||
86 | goto again; | ||
87 | } | ||
88 | atomic_inc(&alloc_workspace); | ||
89 | spin_unlock(&workspace_lock); | ||
90 | 55 | ||
91 | workspace = kzalloc(sizeof(*workspace), GFP_NOFS); | 56 | workspace = kzalloc(sizeof(*workspace), GFP_NOFS); |
92 | if (!workspace) { | 57 | if (!workspace) |
93 | ret = -ENOMEM; | 58 | return ERR_PTR(-ENOMEM); |
94 | goto fail; | ||
95 | } | ||
96 | 59 | ||
97 | workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); | 60 | workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); |
98 | if (!workspace->def_strm.workspace) { | ||
99 | ret = -ENOMEM; | ||
100 | goto fail; | ||
101 | } | ||
102 | workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); | 61 | workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); |
103 | if (!workspace->inf_strm.workspace) { | ||
104 | ret = -ENOMEM; | ||
105 | goto fail_inflate; | ||
106 | } | ||
107 | workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 62 | workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
108 | if (!workspace->buf) { | 63 | if (!workspace->def_strm.workspace || |
109 | ret = -ENOMEM; | 64 | !workspace->inf_strm.workspace || !workspace->buf) |
110 | goto fail_kmalloc; | 65 | goto fail; |
111 | } | ||
112 | return workspace; | ||
113 | |||
114 | fail_kmalloc: | ||
115 | vfree(workspace->inf_strm.workspace); | ||
116 | fail_inflate: | ||
117 | vfree(workspace->def_strm.workspace); | ||
118 | fail: | ||
119 | kfree(workspace); | ||
120 | atomic_dec(&alloc_workspace); | ||
121 | wake_up(&workspace_wait); | ||
122 | return ERR_PTR(ret); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * put a workspace struct back on the list or free it if we have enough | ||
127 | * idle ones sitting around | ||
128 | */ | ||
129 | static int free_workspace(struct workspace *workspace) | ||
130 | { | ||
131 | spin_lock(&workspace_lock); | ||
132 | if (num_workspace < num_online_cpus()) { | ||
133 | list_add_tail(&workspace->list, &idle_workspace); | ||
134 | num_workspace++; | ||
135 | spin_unlock(&workspace_lock); | ||
136 | if (waitqueue_active(&workspace_wait)) | ||
137 | wake_up(&workspace_wait); | ||
138 | return 0; | ||
139 | } | ||
140 | spin_unlock(&workspace_lock); | ||
141 | vfree(workspace->def_strm.workspace); | ||
142 | vfree(workspace->inf_strm.workspace); | ||
143 | kfree(workspace->buf); | ||
144 | kfree(workspace); | ||
145 | 66 | ||
146 | atomic_dec(&alloc_workspace); | 67 | INIT_LIST_HEAD(&workspace->list); |
147 | if (waitqueue_active(&workspace_wait)) | ||
148 | wake_up(&workspace_wait); | ||
149 | return 0; | ||
150 | } | ||
151 | 68 | ||
152 | /* | 69 | return &workspace->list; |
153 | * cleanup function for module exit | 70 | fail: |
154 | */ | 71 | zlib_free_workspace(&workspace->list); |
155 | static void free_workspaces(void) | 72 | return ERR_PTR(-ENOMEM); |
156 | { | ||
157 | struct workspace *workspace; | ||
158 | while (!list_empty(&idle_workspace)) { | ||
159 | workspace = list_entry(idle_workspace.next, struct workspace, | ||
160 | list); | ||
161 | list_del(&workspace->list); | ||
162 | vfree(workspace->def_strm.workspace); | ||
163 | vfree(workspace->inf_strm.workspace); | ||
164 | kfree(workspace->buf); | ||
165 | kfree(workspace); | ||
166 | atomic_dec(&alloc_workspace); | ||
167 | } | ||
168 | } | 73 | } |
169 | 74 | ||
170 | /* | 75 | static int zlib_compress_pages(struct list_head *ws, |
171 | * given an address space and start/len, compress the bytes. | 76 | struct address_space *mapping, |
172 | * | 77 | u64 start, unsigned long len, |
173 | * pages are allocated to hold the compressed result and stored | 78 | struct page **pages, |
174 | * in 'pages' | 79 | unsigned long nr_dest_pages, |
175 | * | 80 | unsigned long *out_pages, |
176 | * out_pages is used to return the number of pages allocated. There | 81 | unsigned long *total_in, |
177 | * may be pages allocated even if we return an error | 82 | unsigned long *total_out, |
178 | * | 83 | unsigned long max_out) |
179 | * total_in is used to return the number of bytes actually read. It | ||
180 | * may be smaller then len if we had to exit early because we | ||
181 | * ran out of room in the pages array or because we cross the | ||
182 | * max_out threshold. | ||
183 | * | ||
184 | * total_out is used to return the total number of compressed bytes | ||
185 | * | ||
186 | * max_out tells us the max number of bytes that we're allowed to | ||
187 | * stuff into pages | ||
188 | */ | ||
189 | int btrfs_zlib_compress_pages(struct address_space *mapping, | ||
190 | u64 start, unsigned long len, | ||
191 | struct page **pages, | ||
192 | unsigned long nr_dest_pages, | ||
193 | unsigned long *out_pages, | ||
194 | unsigned long *total_in, | ||
195 | unsigned long *total_out, | ||
196 | unsigned long max_out) | ||
197 | { | 84 | { |
85 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
198 | int ret; | 86 | int ret; |
199 | struct workspace *workspace; | ||
200 | char *data_in; | 87 | char *data_in; |
201 | char *cpage_out; | 88 | char *cpage_out; |
202 | int nr_pages = 0; | 89 | int nr_pages = 0; |
@@ -208,10 +95,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, | |||
208 | *total_out = 0; | 95 | *total_out = 0; |
209 | *total_in = 0; | 96 | *total_in = 0; |
210 | 97 | ||
211 | workspace = find_zlib_workspace(); | ||
212 | if (IS_ERR(workspace)) | ||
213 | return -1; | ||
214 | |||
215 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { | 98 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { |
216 | printk(KERN_WARNING "deflateInit failed\n"); | 99 | printk(KERN_WARNING "deflateInit failed\n"); |
217 | ret = -1; | 100 | ret = -1; |
@@ -325,35 +208,18 @@ out: | |||
325 | kunmap(in_page); | 208 | kunmap(in_page); |
326 | page_cache_release(in_page); | 209 | page_cache_release(in_page); |
327 | } | 210 | } |
328 | free_workspace(workspace); | ||
329 | return ret; | 211 | return ret; |
330 | } | 212 | } |
331 | 213 | ||
332 | /* | 214 | static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, |
333 | * pages_in is an array of pages with compressed data. | 215 | u64 disk_start, |
334 | * | 216 | struct bio_vec *bvec, |
335 | * disk_start is the starting logical offset of this array in the file | 217 | int vcnt, |
336 | * | 218 | size_t srclen) |
337 | * bvec is a bio_vec of pages from the file that we want to decompress into | ||
338 | * | ||
339 | * vcnt is the count of pages in the biovec | ||
340 | * | ||
341 | * srclen is the number of bytes in pages_in | ||
342 | * | ||
343 | * The basic idea is that we have a bio that was created by readpages. | ||
344 | * The pages in the bio are for the uncompressed data, and they may not | ||
345 | * be contiguous. They all correspond to the range of bytes covered by | ||
346 | * the compressed extent. | ||
347 | */ | ||
348 | int btrfs_zlib_decompress_biovec(struct page **pages_in, | ||
349 | u64 disk_start, | ||
350 | struct bio_vec *bvec, | ||
351 | int vcnt, | ||
352 | size_t srclen) | ||
353 | { | 219 | { |
220 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
354 | int ret = 0; | 221 | int ret = 0; |
355 | int wbits = MAX_WBITS; | 222 | int wbits = MAX_WBITS; |
356 | struct workspace *workspace; | ||
357 | char *data_in; | 223 | char *data_in; |
358 | size_t total_out = 0; | 224 | size_t total_out = 0; |
359 | unsigned long page_bytes_left; | 225 | unsigned long page_bytes_left; |
@@ -371,10 +237,6 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
371 | unsigned long current_buf_start; | 237 | unsigned long current_buf_start; |
372 | char *kaddr; | 238 | char *kaddr; |
373 | 239 | ||
374 | workspace = find_zlib_workspace(); | ||
375 | if (IS_ERR(workspace)) | ||
376 | return -ENOMEM; | ||
377 | |||
378 | data_in = kmap(pages_in[page_in_index]); | 240 | data_in = kmap(pages_in[page_in_index]); |
379 | workspace->inf_strm.next_in = data_in; | 241 | workspace->inf_strm.next_in = data_in; |
380 | workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); | 242 | workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); |
@@ -400,8 +262,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
400 | 262 | ||
401 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 263 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
402 | printk(KERN_WARNING "inflateInit failed\n"); | 264 | printk(KERN_WARNING "inflateInit failed\n"); |
403 | ret = -1; | 265 | return -1; |
404 | goto out; | ||
405 | } | 266 | } |
406 | while (workspace->inf_strm.total_in < srclen) { | 267 | while (workspace->inf_strm.total_in < srclen) { |
407 | ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); | 268 | ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); |
@@ -527,35 +388,21 @@ done: | |||
527 | zlib_inflateEnd(&workspace->inf_strm); | 388 | zlib_inflateEnd(&workspace->inf_strm); |
528 | if (data_in) | 389 | if (data_in) |
529 | kunmap(pages_in[page_in_index]); | 390 | kunmap(pages_in[page_in_index]); |
530 | out: | ||
531 | free_workspace(workspace); | ||
532 | return ret; | 391 | return ret; |
533 | } | 392 | } |
534 | 393 | ||
535 | /* | 394 | static int zlib_decompress(struct list_head *ws, unsigned char *data_in, |
536 | * a less complex decompression routine. Our compressed data fits in a | 395 | struct page *dest_page, |
537 | * single page, and we want to read a single page out of it. | 396 | unsigned long start_byte, |
538 | * start_byte tells us the offset into the compressed data we're interested in | 397 | size_t srclen, size_t destlen) |
539 | */ | ||
540 | int btrfs_zlib_decompress(unsigned char *data_in, | ||
541 | struct page *dest_page, | ||
542 | unsigned long start_byte, | ||
543 | size_t srclen, size_t destlen) | ||
544 | { | 398 | { |
399 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
545 | int ret = 0; | 400 | int ret = 0; |
546 | int wbits = MAX_WBITS; | 401 | int wbits = MAX_WBITS; |
547 | struct workspace *workspace; | ||
548 | unsigned long bytes_left = destlen; | 402 | unsigned long bytes_left = destlen; |
549 | unsigned long total_out = 0; | 403 | unsigned long total_out = 0; |
550 | char *kaddr; | 404 | char *kaddr; |
551 | 405 | ||
552 | if (destlen > PAGE_CACHE_SIZE) | ||
553 | return -ENOMEM; | ||
554 | |||
555 | workspace = find_zlib_workspace(); | ||
556 | if (IS_ERR(workspace)) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | workspace->inf_strm.next_in = data_in; | 406 | workspace->inf_strm.next_in = data_in; |
560 | workspace->inf_strm.avail_in = srclen; | 407 | workspace->inf_strm.avail_in = srclen; |
561 | workspace->inf_strm.total_in = 0; | 408 | workspace->inf_strm.total_in = 0; |
@@ -576,8 +423,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, | |||
576 | 423 | ||
577 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 424 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
578 | printk(KERN_WARNING "inflateInit failed\n"); | 425 | printk(KERN_WARNING "inflateInit failed\n"); |
579 | ret = -1; | 426 | return -1; |
580 | goto out; | ||
581 | } | 427 | } |
582 | 428 | ||
583 | while (bytes_left > 0) { | 429 | while (bytes_left > 0) { |
@@ -627,12 +473,13 @@ next: | |||
627 | ret = 0; | 473 | ret = 0; |
628 | 474 | ||
629 | zlib_inflateEnd(&workspace->inf_strm); | 475 | zlib_inflateEnd(&workspace->inf_strm); |
630 | out: | ||
631 | free_workspace(workspace); | ||
632 | return ret; | 476 | return ret; |
633 | } | 477 | } |
634 | 478 | ||
635 | void btrfs_zlib_exit(void) | 479 | struct btrfs_compress_op btrfs_zlib_compress = { |
636 | { | 480 | .alloc_workspace = zlib_alloc_workspace, |
637 | free_workspaces(); | 481 | .free_workspace = zlib_free_workspace, |
638 | } | 482 | .compress_pages = zlib_compress_pages, |
483 | .decompress_biovec = zlib_decompress_biovec, | ||
484 | .decompress = zlib_decompress, | ||
485 | }; | ||