aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2010-12-17 01:21:50 -0500
committerLi Zefan <lizf@cn.fujitsu.com>2010-12-22 10:15:45 -0500
commit261507a02ccba9afda919852263b6bc1581ce1ef (patch)
treec16bc657ff4e29a87042ceb379487f24dff01035 /fs/btrfs/compression.c
parent4b72029dc3fd6ba7dc45ccd1cf0aa0ebfa209bd3 (diff)
btrfs: Allow to add new compression algorithm
Make the code aware of compression type, instead of always assuming zlib compression. Also make the zlib workspace function as common code for all compression types. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c236
1 files changed, 231 insertions, 5 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b50bc4bd5c56..6638c9877720 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -62,6 +62,9 @@ struct compressed_bio {
62 /* number of bytes on disk */ 62 /* number of bytes on disk */
63 unsigned long compressed_len; 63 unsigned long compressed_len;
64 64
65 /* the compression algorithm for this bio */
66 int compress_type;
67
65 /* number of compressed pages in the array */ 68 /* number of compressed pages in the array */
66 unsigned long nr_pages; 69 unsigned long nr_pages;
67 70
@@ -173,11 +176,12 @@ static void end_compressed_bio_read(struct bio *bio, int err)
173 /* ok, we're the last bio for this extent, lets start 176 /* ok, we're the last bio for this extent, lets start
174 * the decompression. 177 * the decompression.
175 */ 178 */
176 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, 179 ret = btrfs_decompress_biovec(cb->compress_type,
177 cb->start, 180 cb->compressed_pages,
178 cb->orig_bio->bi_io_vec, 181 cb->start,
179 cb->orig_bio->bi_vcnt, 182 cb->orig_bio->bi_io_vec,
180 cb->compressed_len); 183 cb->orig_bio->bi_vcnt,
184 cb->compressed_len);
181csum_failed: 185csum_failed:
182 if (ret) 186 if (ret)
183 cb->errors = 1; 187 cb->errors = 1;
@@ -588,6 +592,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
588 592
589 cb->len = uncompressed_len; 593 cb->len = uncompressed_len;
590 cb->compressed_len = compressed_len; 594 cb->compressed_len = compressed_len;
595 cb->compress_type = extent_compress_type(bio_flags);
591 cb->orig_bio = bio; 596 cb->orig_bio = bio;
592 597
593 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / 598 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
@@ -677,3 +682,224 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
677 bio_put(comp_bio); 682 bio_put(comp_bio);
678 return 0; 683 return 0;
679} 684}
685
686static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
687static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
688static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
689static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
690static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
691
692struct btrfs_compress_op *btrfs_compress_op[] = {
693 &btrfs_zlib_compress,
694};
695
696int __init btrfs_init_compress(void)
697{
698 int i;
699
700 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
701 INIT_LIST_HEAD(&comp_idle_workspace[i]);
702 spin_lock_init(&comp_workspace_lock[i]);
703 atomic_set(&comp_alloc_workspace[i], 0);
704 init_waitqueue_head(&comp_workspace_wait[i]);
705 }
706 return 0;
707}
708
709/*
710 * this finds an available workspace or allocates a new one
711 * ERR_PTR is returned if things go bad.
712 */
713static struct list_head *find_workspace(int type)
714{
715 struct list_head *workspace;
716 int cpus = num_online_cpus();
717 int idx = type - 1;
718
719 struct list_head *idle_workspace = &comp_idle_workspace[idx];
720 spinlock_t *workspace_lock = &comp_workspace_lock[idx];
721 atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
722 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
723 int *num_workspace = &comp_num_workspace[idx];
724again:
725 spin_lock(workspace_lock);
726 if (!list_empty(idle_workspace)) {
727 workspace = idle_workspace->next;
728 list_del(workspace);
729 (*num_workspace)--;
730 spin_unlock(workspace_lock);
731 return workspace;
732
733 }
734 if (atomic_read(alloc_workspace) > cpus) {
735 DEFINE_WAIT(wait);
736
737 spin_unlock(workspace_lock);
738 prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
739 if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
740 schedule();
741 finish_wait(workspace_wait, &wait);
742 goto again;
743 }
744 atomic_inc(alloc_workspace);
745 spin_unlock(workspace_lock);
746
747 workspace = btrfs_compress_op[idx]->alloc_workspace();
748 if (IS_ERR(workspace)) {
749 atomic_dec(alloc_workspace);
750 wake_up(workspace_wait);
751 }
752 return workspace;
753}
754
755/*
756 * put a workspace struct back on the list or free it if we have enough
757 * idle ones sitting around
758 */
759static void free_workspace(int type, struct list_head *workspace)
760{
761 int idx = type - 1;
762 struct list_head *idle_workspace = &comp_idle_workspace[idx];
763 spinlock_t *workspace_lock = &comp_workspace_lock[idx];
764 atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
765 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
766 int *num_workspace = &comp_num_workspace[idx];
767
768 spin_lock(workspace_lock);
769 if (*num_workspace < num_online_cpus()) {
770 list_add_tail(workspace, idle_workspace);
771 (*num_workspace)++;
772 spin_unlock(workspace_lock);
773 goto wake;
774 }
775 spin_unlock(workspace_lock);
776
777 btrfs_compress_op[idx]->free_workspace(workspace);
778 atomic_dec(alloc_workspace);
779wake:
780 if (waitqueue_active(workspace_wait))
781 wake_up(workspace_wait);
782}
783
784/*
785 * cleanup function for module exit
786 */
787static void free_workspaces(void)
788{
789 struct list_head *workspace;
790 int i;
791
792 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
793 while (!list_empty(&comp_idle_workspace[i])) {
794 workspace = comp_idle_workspace[i].next;
795 list_del(workspace);
796 btrfs_compress_op[i]->free_workspace(workspace);
797 atomic_dec(&comp_alloc_workspace[i]);
798 }
799 }
800}
801
802/*
803 * given an address space and start/len, compress the bytes.
804 *
805 * pages are allocated to hold the compressed result and stored
806 * in 'pages'
807 *
808 * out_pages is used to return the number of pages allocated. There
809 * may be pages allocated even if we return an error
810 *
811 * total_in is used to return the number of bytes actually read. It
812 * may be smaller then len if we had to exit early because we
813 * ran out of room in the pages array or because we cross the
814 * max_out threshold.
815 *
816 * total_out is used to return the total number of compressed bytes
817 *
818 * max_out tells us the max number of bytes that we're allowed to
819 * stuff into pages
820 */
821int btrfs_compress_pages(int type, struct address_space *mapping,
822 u64 start, unsigned long len,
823 struct page **pages,
824 unsigned long nr_dest_pages,
825 unsigned long *out_pages,
826 unsigned long *total_in,
827 unsigned long *total_out,
828 unsigned long max_out)
829{
830 struct list_head *workspace;
831 int ret;
832
833 workspace = find_workspace(type);
834 if (IS_ERR(workspace))
835 return -1;
836
837 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
838 start, len, pages,
839 nr_dest_pages, out_pages,
840 total_in, total_out,
841 max_out);
842 free_workspace(type, workspace);
843 return ret;
844}
845
846/*
847 * pages_in is an array of pages with compressed data.
848 *
849 * disk_start is the starting logical offset of this array in the file
850 *
851 * bvec is a bio_vec of pages from the file that we want to decompress into
852 *
853 * vcnt is the count of pages in the biovec
854 *
855 * srclen is the number of bytes in pages_in
856 *
857 * The basic idea is that we have a bio that was created by readpages.
858 * The pages in the bio are for the uncompressed data, and they may not
859 * be contiguous. They all correspond to the range of bytes covered by
860 * the compressed extent.
861 */
862int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
863 struct bio_vec *bvec, int vcnt, size_t srclen)
864{
865 struct list_head *workspace;
866 int ret;
867
868 workspace = find_workspace(type);
869 if (IS_ERR(workspace))
870 return -ENOMEM;
871
872 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
873 disk_start,
874 bvec, vcnt, srclen);
875 free_workspace(type, workspace);
876 return ret;
877}
878
879/*
880 * a less complex decompression routine. Our compressed data fits in a
881 * single page, and we want to read a single page out of it.
882 * start_byte tells us the offset into the compressed data we're interested in
883 */
884int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
885 unsigned long start_byte, size_t srclen, size_t destlen)
886{
887 struct list_head *workspace;
888 int ret;
889
890 workspace = find_workspace(type);
891 if (IS_ERR(workspace))
892 return -ENOMEM;
893
894 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
895 dest_page, start_byte,
896 srclen, destlen);
897
898 free_workspace(type, workspace);
899 return ret;
900}
901
902void __exit btrfs_exit_compress(void)
903{
904 free_workspaces();
905}