diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2010-12-17 01:21:50 -0500 |
---|---|---|
committer | Li Zefan <lizf@cn.fujitsu.com> | 2010-12-22 10:15:45 -0500 |
commit | 261507a02ccba9afda919852263b6bc1581ce1ef (patch) | |
tree | c16bc657ff4e29a87042ceb379487f24dff01035 /fs/btrfs/zlib.c | |
parent | 4b72029dc3fd6ba7dc45ccd1cf0aa0ebfa209bd3 (diff) |
btrfs: Allow to add new compression algorithm
Make the code aware of compression type, instead of always assuming
zlib compression.
Also make the zlib workspace function as common code for all
compression types.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/zlib.c')
-rw-r--r-- | fs/btrfs/zlib.c | 253 |
1 files changed, 50 insertions, 203 deletions
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b01558661e3b..9a3e693917f2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
@@ -32,15 +32,6 @@ | |||
32 | #include <linux/bio.h> | 32 | #include <linux/bio.h> |
33 | #include "compression.h" | 33 | #include "compression.h" |
34 | 34 | ||
35 | /* Plan: call deflate() with avail_in == *sourcelen, | ||
36 | avail_out = *dstlen - 12 and flush == Z_FINISH. | ||
37 | If it doesn't manage to finish, call it again with | ||
38 | avail_in == 0 and avail_out set to the remaining 12 | ||
39 | bytes for it to clean up. | ||
40 | Q: Is 12 bytes sufficient? | ||
41 | */ | ||
42 | #define STREAM_END_SPACE 12 | ||
43 | |||
44 | struct workspace { | 35 | struct workspace { |
45 | z_stream inf_strm; | 36 | z_stream inf_strm; |
46 | z_stream def_strm; | 37 | z_stream def_strm; |
@@ -48,155 +39,51 @@ struct workspace { | |||
48 | struct list_head list; | 39 | struct list_head list; |
49 | }; | 40 | }; |
50 | 41 | ||
51 | static LIST_HEAD(idle_workspace); | 42 | static void zlib_free_workspace(struct list_head *ws) |
52 | static DEFINE_SPINLOCK(workspace_lock); | 43 | { |
53 | static unsigned long num_workspace; | 44 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
54 | static atomic_t alloc_workspace = ATOMIC_INIT(0); | ||
55 | static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); | ||
56 | 45 | ||
57 | /* | 46 | vfree(workspace->def_strm.workspace); |
58 | * this finds an available zlib workspace or allocates a new one | 47 | vfree(workspace->inf_strm.workspace); |
59 | * NULL or an ERR_PTR is returned if things go bad. | 48 | kfree(workspace->buf); |
60 | */ | 49 | kfree(workspace); |
61 | static struct workspace *find_zlib_workspace(void) | 50 | } |
51 | |||
52 | static struct list_head *zlib_alloc_workspace(void) | ||
62 | { | 53 | { |
63 | struct workspace *workspace; | 54 | struct workspace *workspace; |
64 | int ret; | ||
65 | int cpus = num_online_cpus(); | ||
66 | |||
67 | again: | ||
68 | spin_lock(&workspace_lock); | ||
69 | if (!list_empty(&idle_workspace)) { | ||
70 | workspace = list_entry(idle_workspace.next, struct workspace, | ||
71 | list); | ||
72 | list_del(&workspace->list); | ||
73 | num_workspace--; | ||
74 | spin_unlock(&workspace_lock); | ||
75 | return workspace; | ||
76 | |||
77 | } | ||
78 | if (atomic_read(&alloc_workspace) > cpus) { | ||
79 | DEFINE_WAIT(wait); | ||
80 | |||
81 | spin_unlock(&workspace_lock); | ||
82 | prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
83 | if (atomic_read(&alloc_workspace) > cpus && !num_workspace) | ||
84 | schedule(); | ||
85 | finish_wait(&workspace_wait, &wait); | ||
86 | goto again; | ||
87 | } | ||
88 | atomic_inc(&alloc_workspace); | ||
89 | spin_unlock(&workspace_lock); | ||
90 | 55 | ||
91 | workspace = kzalloc(sizeof(*workspace), GFP_NOFS); | 56 | workspace = kzalloc(sizeof(*workspace), GFP_NOFS); |
92 | if (!workspace) { | 57 | if (!workspace) |
93 | ret = -ENOMEM; | 58 | return ERR_PTR(-ENOMEM); |
94 | goto fail; | ||
95 | } | ||
96 | 59 | ||
97 | workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); | 60 | workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); |
98 | if (!workspace->def_strm.workspace) { | ||
99 | ret = -ENOMEM; | ||
100 | goto fail; | ||
101 | } | ||
102 | workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); | 61 | workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); |
103 | if (!workspace->inf_strm.workspace) { | ||
104 | ret = -ENOMEM; | ||
105 | goto fail_inflate; | ||
106 | } | ||
107 | workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 62 | workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
108 | if (!workspace->buf) { | 63 | if (!workspace->def_strm.workspace || |
109 | ret = -ENOMEM; | 64 | !workspace->inf_strm.workspace || !workspace->buf) |
110 | goto fail_kmalloc; | 65 | goto fail; |
111 | } | ||
112 | return workspace; | ||
113 | |||
114 | fail_kmalloc: | ||
115 | vfree(workspace->inf_strm.workspace); | ||
116 | fail_inflate: | ||
117 | vfree(workspace->def_strm.workspace); | ||
118 | fail: | ||
119 | kfree(workspace); | ||
120 | atomic_dec(&alloc_workspace); | ||
121 | wake_up(&workspace_wait); | ||
122 | return ERR_PTR(ret); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * put a workspace struct back on the list or free it if we have enough | ||
127 | * idle ones sitting around | ||
128 | */ | ||
129 | static int free_workspace(struct workspace *workspace) | ||
130 | { | ||
131 | spin_lock(&workspace_lock); | ||
132 | if (num_workspace < num_online_cpus()) { | ||
133 | list_add_tail(&workspace->list, &idle_workspace); | ||
134 | num_workspace++; | ||
135 | spin_unlock(&workspace_lock); | ||
136 | if (waitqueue_active(&workspace_wait)) | ||
137 | wake_up(&workspace_wait); | ||
138 | return 0; | ||
139 | } | ||
140 | spin_unlock(&workspace_lock); | ||
141 | vfree(workspace->def_strm.workspace); | ||
142 | vfree(workspace->inf_strm.workspace); | ||
143 | kfree(workspace->buf); | ||
144 | kfree(workspace); | ||
145 | 66 | ||
146 | atomic_dec(&alloc_workspace); | 67 | INIT_LIST_HEAD(&workspace->list); |
147 | if (waitqueue_active(&workspace_wait)) | ||
148 | wake_up(&workspace_wait); | ||
149 | return 0; | ||
150 | } | ||
151 | 68 | ||
152 | /* | 69 | return &workspace->list; |
153 | * cleanup function for module exit | 70 | fail: |
154 | */ | 71 | zlib_free_workspace(&workspace->list); |
155 | static void free_workspaces(void) | 72 | return ERR_PTR(-ENOMEM); |
156 | { | ||
157 | struct workspace *workspace; | ||
158 | while (!list_empty(&idle_workspace)) { | ||
159 | workspace = list_entry(idle_workspace.next, struct workspace, | ||
160 | list); | ||
161 | list_del(&workspace->list); | ||
162 | vfree(workspace->def_strm.workspace); | ||
163 | vfree(workspace->inf_strm.workspace); | ||
164 | kfree(workspace->buf); | ||
165 | kfree(workspace); | ||
166 | atomic_dec(&alloc_workspace); | ||
167 | } | ||
168 | } | 73 | } |
169 | 74 | ||
170 | /* | 75 | static int zlib_compress_pages(struct list_head *ws, |
171 | * given an address space and start/len, compress the bytes. | 76 | struct address_space *mapping, |
172 | * | 77 | u64 start, unsigned long len, |
173 | * pages are allocated to hold the compressed result and stored | 78 | struct page **pages, |
174 | * in 'pages' | 79 | unsigned long nr_dest_pages, |
175 | * | 80 | unsigned long *out_pages, |
176 | * out_pages is used to return the number of pages allocated. There | 81 | unsigned long *total_in, |
177 | * may be pages allocated even if we return an error | 82 | unsigned long *total_out, |
178 | * | 83 | unsigned long max_out) |
179 | * total_in is used to return the number of bytes actually read. It | ||
180 | * may be smaller then len if we had to exit early because we | ||
181 | * ran out of room in the pages array or because we cross the | ||
182 | * max_out threshold. | ||
183 | * | ||
184 | * total_out is used to return the total number of compressed bytes | ||
185 | * | ||
186 | * max_out tells us the max number of bytes that we're allowed to | ||
187 | * stuff into pages | ||
188 | */ | ||
189 | int btrfs_zlib_compress_pages(struct address_space *mapping, | ||
190 | u64 start, unsigned long len, | ||
191 | struct page **pages, | ||
192 | unsigned long nr_dest_pages, | ||
193 | unsigned long *out_pages, | ||
194 | unsigned long *total_in, | ||
195 | unsigned long *total_out, | ||
196 | unsigned long max_out) | ||
197 | { | 84 | { |
85 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
198 | int ret; | 86 | int ret; |
199 | struct workspace *workspace; | ||
200 | char *data_in; | 87 | char *data_in; |
201 | char *cpage_out; | 88 | char *cpage_out; |
202 | int nr_pages = 0; | 89 | int nr_pages = 0; |
@@ -208,10 +95,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, | |||
208 | *total_out = 0; | 95 | *total_out = 0; |
209 | *total_in = 0; | 96 | *total_in = 0; |
210 | 97 | ||
211 | workspace = find_zlib_workspace(); | ||
212 | if (IS_ERR(workspace)) | ||
213 | return -1; | ||
214 | |||
215 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { | 98 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { |
216 | printk(KERN_WARNING "deflateInit failed\n"); | 99 | printk(KERN_WARNING "deflateInit failed\n"); |
217 | ret = -1; | 100 | ret = -1; |
@@ -325,35 +208,18 @@ out: | |||
325 | kunmap(in_page); | 208 | kunmap(in_page); |
326 | page_cache_release(in_page); | 209 | page_cache_release(in_page); |
327 | } | 210 | } |
328 | free_workspace(workspace); | ||
329 | return ret; | 211 | return ret; |
330 | } | 212 | } |
331 | 213 | ||
332 | /* | 214 | static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, |
333 | * pages_in is an array of pages with compressed data. | 215 | u64 disk_start, |
334 | * | 216 | struct bio_vec *bvec, |
335 | * disk_start is the starting logical offset of this array in the file | 217 | int vcnt, |
336 | * | 218 | size_t srclen) |
337 | * bvec is a bio_vec of pages from the file that we want to decompress into | ||
338 | * | ||
339 | * vcnt is the count of pages in the biovec | ||
340 | * | ||
341 | * srclen is the number of bytes in pages_in | ||
342 | * | ||
343 | * The basic idea is that we have a bio that was created by readpages. | ||
344 | * The pages in the bio are for the uncompressed data, and they may not | ||
345 | * be contiguous. They all correspond to the range of bytes covered by | ||
346 | * the compressed extent. | ||
347 | */ | ||
348 | int btrfs_zlib_decompress_biovec(struct page **pages_in, | ||
349 | u64 disk_start, | ||
350 | struct bio_vec *bvec, | ||
351 | int vcnt, | ||
352 | size_t srclen) | ||
353 | { | 219 | { |
220 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
354 | int ret = 0; | 221 | int ret = 0; |
355 | int wbits = MAX_WBITS; | 222 | int wbits = MAX_WBITS; |
356 | struct workspace *workspace; | ||
357 | char *data_in; | 223 | char *data_in; |
358 | size_t total_out = 0; | 224 | size_t total_out = 0; |
359 | unsigned long page_bytes_left; | 225 | unsigned long page_bytes_left; |
@@ -371,10 +237,6 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
371 | unsigned long current_buf_start; | 237 | unsigned long current_buf_start; |
372 | char *kaddr; | 238 | char *kaddr; |
373 | 239 | ||
374 | workspace = find_zlib_workspace(); | ||
375 | if (IS_ERR(workspace)) | ||
376 | return -ENOMEM; | ||
377 | |||
378 | data_in = kmap(pages_in[page_in_index]); | 240 | data_in = kmap(pages_in[page_in_index]); |
379 | workspace->inf_strm.next_in = data_in; | 241 | workspace->inf_strm.next_in = data_in; |
380 | workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); | 242 | workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); |
@@ -400,8 +262,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
400 | 262 | ||
401 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 263 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
402 | printk(KERN_WARNING "inflateInit failed\n"); | 264 | printk(KERN_WARNING "inflateInit failed\n"); |
403 | ret = -1; | 265 | return -1; |
404 | goto out; | ||
405 | } | 266 | } |
406 | while (workspace->inf_strm.total_in < srclen) { | 267 | while (workspace->inf_strm.total_in < srclen) { |
407 | ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); | 268 | ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); |
@@ -527,35 +388,21 @@ done: | |||
527 | zlib_inflateEnd(&workspace->inf_strm); | 388 | zlib_inflateEnd(&workspace->inf_strm); |
528 | if (data_in) | 389 | if (data_in) |
529 | kunmap(pages_in[page_in_index]); | 390 | kunmap(pages_in[page_in_index]); |
530 | out: | ||
531 | free_workspace(workspace); | ||
532 | return ret; | 391 | return ret; |
533 | } | 392 | } |
534 | 393 | ||
535 | /* | 394 | static int zlib_decompress(struct list_head *ws, unsigned char *data_in, |
536 | * a less complex decompression routine. Our compressed data fits in a | 395 | struct page *dest_page, |
537 | * single page, and we want to read a single page out of it. | 396 | unsigned long start_byte, |
538 | * start_byte tells us the offset into the compressed data we're interested in | 397 | size_t srclen, size_t destlen) |
539 | */ | ||
540 | int btrfs_zlib_decompress(unsigned char *data_in, | ||
541 | struct page *dest_page, | ||
542 | unsigned long start_byte, | ||
543 | size_t srclen, size_t destlen) | ||
544 | { | 398 | { |
399 | struct workspace *workspace = list_entry(ws, struct workspace, list); | ||
545 | int ret = 0; | 400 | int ret = 0; |
546 | int wbits = MAX_WBITS; | 401 | int wbits = MAX_WBITS; |
547 | struct workspace *workspace; | ||
548 | unsigned long bytes_left = destlen; | 402 | unsigned long bytes_left = destlen; |
549 | unsigned long total_out = 0; | 403 | unsigned long total_out = 0; |
550 | char *kaddr; | 404 | char *kaddr; |
551 | 405 | ||
552 | if (destlen > PAGE_CACHE_SIZE) | ||
553 | return -ENOMEM; | ||
554 | |||
555 | workspace = find_zlib_workspace(); | ||
556 | if (IS_ERR(workspace)) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | workspace->inf_strm.next_in = data_in; | 406 | workspace->inf_strm.next_in = data_in; |
560 | workspace->inf_strm.avail_in = srclen; | 407 | workspace->inf_strm.avail_in = srclen; |
561 | workspace->inf_strm.total_in = 0; | 408 | workspace->inf_strm.total_in = 0; |
@@ -576,8 +423,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, | |||
576 | 423 | ||
577 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { | 424 | if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { |
578 | printk(KERN_WARNING "inflateInit failed\n"); | 425 | printk(KERN_WARNING "inflateInit failed\n"); |
579 | ret = -1; | 426 | return -1; |
580 | goto out; | ||
581 | } | 427 | } |
582 | 428 | ||
583 | while (bytes_left > 0) { | 429 | while (bytes_left > 0) { |
@@ -627,12 +473,13 @@ next: | |||
627 | ret = 0; | 473 | ret = 0; |
628 | 474 | ||
629 | zlib_inflateEnd(&workspace->inf_strm); | 475 | zlib_inflateEnd(&workspace->inf_strm); |
630 | out: | ||
631 | free_workspace(workspace); | ||
632 | return ret; | 476 | return ret; |
633 | } | 477 | } |
634 | 478 | ||
635 | void btrfs_zlib_exit(void) | 479 | struct btrfs_compress_op btrfs_zlib_compress = { |
636 | { | 480 | .alloc_workspace = zlib_alloc_workspace, |
637 | free_workspaces(); | 481 | .free_workspace = zlib_free_workspace, |
638 | } | 482 | .compress_pages = zlib_compress_pages, |
483 | .decompress_biovec = zlib_decompress_biovec, | ||
484 | .decompress = zlib_decompress, | ||
485 | }; | ||