diff options
author | Sergey Senozhatsky <sergey.senozhatsky@gmail.com> | 2014-04-07 18:38:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:36:01 -0400 |
commit | b7ca232ee7e85ed3b18e39eb20a7f458ee1d6047 (patch) | |
tree | 6bb05970e4ae792d7c071d1e1b479159acc02716 /drivers/block | |
parent | e7e1ef439d18f9a21521116ea9f2b976d7230e54 (diff) |
zram: use zcomp compressing backends
Do not perform direct LZO compress/decompress calls, initialise
and use zcomp LZO backend (single compression stream) instead.
[akpm@linux-foundation.org: resolve conflicts with zram-delete-zram_init_device-fix.patch]
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/zram/Makefile | 2 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 69 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.h | 8 |
3 files changed, 36 insertions, 43 deletions
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index cb0f9ced6a93..757c6a5cadff 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | zram-y := zram_drv.o | 1 | zram-y := zcomp_lzo.o zcomp.o zram_drv.o |
2 | 2 | ||
3 | obj-$(CONFIG_ZRAM) += zram.o | 3 | obj-$(CONFIG_ZRAM) += zram.o |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 76ba67673a90..98823f9ca8b1 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/genhd.h> | 29 | #include <linux/genhd.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/lzo.h> | ||
33 | #include <linux/string.h> | 32 | #include <linux/string.h> |
34 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
35 | 34 | ||
@@ -38,6 +37,7 @@ | |||
38 | /* Globals */ | 37 | /* Globals */ |
39 | static int zram_major; | 38 | static int zram_major; |
40 | static struct zram *zram_devices; | 39 | static struct zram *zram_devices; |
40 | static const char *default_compressor = "lzo"; | ||
41 | 41 | ||
42 | /* Module params (documentation at end) */ | 42 | /* Module params (documentation at end) */ |
43 | static unsigned int num_devices = 1; | 43 | static unsigned int num_devices = 1; |
@@ -160,8 +160,6 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) | |||
160 | static void zram_meta_free(struct zram_meta *meta) | 160 | static void zram_meta_free(struct zram_meta *meta) |
161 | { | 161 | { |
162 | zs_destroy_pool(meta->mem_pool); | 162 | zs_destroy_pool(meta->mem_pool); |
163 | kfree(meta->compress_workmem); | ||
164 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
165 | vfree(meta->table); | 163 | vfree(meta->table); |
166 | kfree(meta); | 164 | kfree(meta); |
167 | } | 165 | } |
@@ -173,22 +171,11 @@ static struct zram_meta *zram_meta_alloc(u64 disksize) | |||
173 | if (!meta) | 171 | if (!meta) |
174 | goto out; | 172 | goto out; |
175 | 173 | ||
176 | meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); | ||
177 | if (!meta->compress_workmem) | ||
178 | goto free_meta; | ||
179 | |||
180 | meta->compress_buffer = | ||
181 | (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); | ||
182 | if (!meta->compress_buffer) { | ||
183 | pr_err("Error allocating compressor buffer space\n"); | ||
184 | goto free_workmem; | ||
185 | } | ||
186 | |||
187 | num_pages = disksize >> PAGE_SHIFT; | 174 | num_pages = disksize >> PAGE_SHIFT; |
188 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | 175 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); |
189 | if (!meta->table) { | 176 | if (!meta->table) { |
190 | pr_err("Error allocating zram address table\n"); | 177 | pr_err("Error allocating zram address table\n"); |
191 | goto free_buffer; | 178 | goto free_meta; |
192 | } | 179 | } |
193 | 180 | ||
194 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | 181 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); |
@@ -198,15 +185,10 @@ static struct zram_meta *zram_meta_alloc(u64 disksize) | |||
198 | } | 185 | } |
199 | 186 | ||
200 | rwlock_init(&meta->tb_lock); | 187 | rwlock_init(&meta->tb_lock); |
201 | mutex_init(&meta->buffer_lock); | ||
202 | return meta; | 188 | return meta; |
203 | 189 | ||
204 | free_table: | 190 | free_table: |
205 | vfree(meta->table); | 191 | vfree(meta->table); |
206 | free_buffer: | ||
207 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
208 | free_workmem: | ||
209 | kfree(meta->compress_workmem); | ||
210 | free_meta: | 192 | free_meta: |
211 | kfree(meta); | 193 | kfree(meta); |
212 | meta = NULL; | 194 | meta = NULL; |
@@ -280,8 +262,7 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
280 | 262 | ||
281 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | 263 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
282 | { | 264 | { |
283 | int ret = LZO_E_OK; | 265 | int ret = 0; |
284 | size_t clen = PAGE_SIZE; | ||
285 | unsigned char *cmem; | 266 | unsigned char *cmem; |
286 | struct zram_meta *meta = zram->meta; | 267 | struct zram_meta *meta = zram->meta; |
287 | unsigned long handle; | 268 | unsigned long handle; |
@@ -301,12 +282,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | |||
301 | if (size == PAGE_SIZE) | 282 | if (size == PAGE_SIZE) |
302 | copy_page(mem, cmem); | 283 | copy_page(mem, cmem); |
303 | else | 284 | else |
304 | ret = lzo1x_decompress_safe(cmem, size, mem, &clen); | 285 | ret = zcomp_decompress(zram->comp, cmem, size, mem); |
305 | zs_unmap_object(meta->mem_pool, handle); | 286 | zs_unmap_object(meta->mem_pool, handle); |
306 | read_unlock(&meta->tb_lock); | 287 | read_unlock(&meta->tb_lock); |
307 | 288 | ||
308 | /* Should NEVER happen. Return bio error if it does. */ | 289 | /* Should NEVER happen. Return bio error if it does. */ |
309 | if (unlikely(ret != LZO_E_OK)) { | 290 | if (unlikely(ret)) { |
310 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); | 291 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
311 | atomic64_inc(&zram->stats.failed_reads); | 292 | atomic64_inc(&zram->stats.failed_reads); |
312 | return ret; | 293 | return ret; |
@@ -349,7 +330,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |||
349 | 330 | ||
350 | ret = zram_decompress_page(zram, uncmem, index); | 331 | ret = zram_decompress_page(zram, uncmem, index); |
351 | /* Should NEVER happen. Return bio error if it does. */ | 332 | /* Should NEVER happen. Return bio error if it does. */ |
352 | if (unlikely(ret != LZO_E_OK)) | 333 | if (unlikely(ret)) |
353 | goto out_cleanup; | 334 | goto out_cleanup; |
354 | 335 | ||
355 | if (is_partial_io(bvec)) | 336 | if (is_partial_io(bvec)) |
@@ -374,11 +355,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
374 | struct page *page; | 355 | struct page *page; |
375 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; | 356 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
376 | struct zram_meta *meta = zram->meta; | 357 | struct zram_meta *meta = zram->meta; |
358 | struct zcomp_strm *zstrm; | ||
377 | bool locked = false; | 359 | bool locked = false; |
378 | 360 | ||
379 | page = bvec->bv_page; | 361 | page = bvec->bv_page; |
380 | src = meta->compress_buffer; | ||
381 | |||
382 | if (is_partial_io(bvec)) { | 362 | if (is_partial_io(bvec)) { |
383 | /* | 363 | /* |
384 | * This is a partial IO. We need to read the full page | 364 | * This is a partial IO. We need to read the full page |
@@ -394,7 +374,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
394 | goto out; | 374 | goto out; |
395 | } | 375 | } |
396 | 376 | ||
397 | mutex_lock(&meta->buffer_lock); | 377 | zstrm = zcomp_strm_find(zram->comp); |
398 | locked = true; | 378 | locked = true; |
399 | user_mem = kmap_atomic(page); | 379 | user_mem = kmap_atomic(page); |
400 | 380 | ||
@@ -420,22 +400,20 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
420 | goto out; | 400 | goto out; |
421 | } | 401 | } |
422 | 402 | ||
423 | ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, | 403 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); |
424 | meta->compress_workmem); | ||
425 | if (!is_partial_io(bvec)) { | 404 | if (!is_partial_io(bvec)) { |
426 | kunmap_atomic(user_mem); | 405 | kunmap_atomic(user_mem); |
427 | user_mem = NULL; | 406 | user_mem = NULL; |
428 | uncmem = NULL; | 407 | uncmem = NULL; |
429 | } | 408 | } |
430 | 409 | ||
431 | if (unlikely(ret != LZO_E_OK)) { | 410 | if (unlikely(ret)) { |
432 | pr_err("Compression failed! err=%d\n", ret); | 411 | pr_err("Compression failed! err=%d\n", ret); |
433 | goto out; | 412 | goto out; |
434 | } | 413 | } |
435 | 414 | src = zstrm->buffer; | |
436 | if (unlikely(clen > max_zpage_size)) { | 415 | if (unlikely(clen > max_zpage_size)) { |
437 | clen = PAGE_SIZE; | 416 | clen = PAGE_SIZE; |
438 | src = NULL; | ||
439 | if (is_partial_io(bvec)) | 417 | if (is_partial_io(bvec)) |
440 | src = uncmem; | 418 | src = uncmem; |
441 | } | 419 | } |
@@ -457,6 +435,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
457 | memcpy(cmem, src, clen); | 435 | memcpy(cmem, src, clen); |
458 | } | 436 | } |
459 | 437 | ||
438 | zcomp_strm_release(zram->comp, zstrm); | ||
439 | locked = false; | ||
460 | zs_unmap_object(meta->mem_pool, handle); | 440 | zs_unmap_object(meta->mem_pool, handle); |
461 | 441 | ||
462 | /* | 442 | /* |
@@ -475,10 +455,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
475 | atomic64_inc(&zram->stats.pages_stored); | 455 | atomic64_inc(&zram->stats.pages_stored); |
476 | out: | 456 | out: |
477 | if (locked) | 457 | if (locked) |
478 | mutex_unlock(&meta->buffer_lock); | 458 | zcomp_strm_release(zram->comp, zstrm); |
479 | if (is_partial_io(bvec)) | 459 | if (is_partial_io(bvec)) |
480 | kfree(uncmem); | 460 | kfree(uncmem); |
481 | |||
482 | if (ret) | 461 | if (ret) |
483 | atomic64_inc(&zram->stats.failed_writes); | 462 | atomic64_inc(&zram->stats.failed_writes); |
484 | return ret; | 463 | return ret; |
@@ -522,6 +501,7 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) | |||
522 | zs_free(meta->mem_pool, handle); | 501 | zs_free(meta->mem_pool, handle); |
523 | } | 502 | } |
524 | 503 | ||
504 | zcomp_destroy(zram->comp); | ||
525 | zram_meta_free(zram->meta); | 505 | zram_meta_free(zram->meta); |
526 | zram->meta = NULL; | 506 | zram->meta = NULL; |
527 | /* Reset stats */ | 507 | /* Reset stats */ |
@@ -539,6 +519,7 @@ static ssize_t disksize_store(struct device *dev, | |||
539 | u64 disksize; | 519 | u64 disksize; |
540 | struct zram_meta *meta; | 520 | struct zram_meta *meta; |
541 | struct zram *zram = dev_to_zram(dev); | 521 | struct zram *zram = dev_to_zram(dev); |
522 | int err; | ||
542 | 523 | ||
543 | disksize = memparse(buf, NULL); | 524 | disksize = memparse(buf, NULL); |
544 | if (!disksize) | 525 | if (!disksize) |
@@ -551,10 +532,17 @@ static ssize_t disksize_store(struct device *dev, | |||
551 | 532 | ||
552 | down_write(&zram->init_lock); | 533 | down_write(&zram->init_lock); |
553 | if (init_done(zram)) { | 534 | if (init_done(zram)) { |
554 | zram_meta_free(meta); | ||
555 | up_write(&zram->init_lock); | ||
556 | pr_info("Cannot change disksize for initialized device\n"); | 535 | pr_info("Cannot change disksize for initialized device\n"); |
557 | return -EBUSY; | 536 | err = -EBUSY; |
537 | goto out_free_meta; | ||
538 | } | ||
539 | |||
540 | zram->comp = zcomp_create(default_compressor); | ||
541 | if (!zram->comp) { | ||
542 | pr_info("Cannot initialise %s compressing backend\n", | ||
543 | default_compressor); | ||
544 | err = -EINVAL; | ||
545 | goto out_free_meta; | ||
558 | } | 546 | } |
559 | 547 | ||
560 | zram->meta = meta; | 548 | zram->meta = meta; |
@@ -563,6 +551,11 @@ static ssize_t disksize_store(struct device *dev, | |||
563 | up_write(&zram->init_lock); | 551 | up_write(&zram->init_lock); |
564 | 552 | ||
565 | return len; | 553 | return len; |
554 | |||
555 | out_free_meta: | ||
556 | up_write(&zram->init_lock); | ||
557 | zram_meta_free(meta); | ||
558 | return err; | ||
566 | } | 559 | } |
567 | 560 | ||
568 | static ssize_t reset_store(struct device *dev, | 561 | static ssize_t reset_store(struct device *dev, |
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 1d5b1f5786a8..45e04f7b713f 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h | |||
@@ -16,9 +16,10 @@ | |||
16 | #define _ZRAM_DRV_H_ | 16 | #define _ZRAM_DRV_H_ |
17 | 17 | ||
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/mutex.h> | ||
20 | #include <linux/zsmalloc.h> | 19 | #include <linux/zsmalloc.h> |
21 | 20 | ||
21 | #include "zcomp.h" | ||
22 | |||
22 | /* | 23 | /* |
23 | * Some arbitrary value. This is just to catch | 24 | * Some arbitrary value. This is just to catch |
24 | * invalid value for num_devices module parameter. | 25 | * invalid value for num_devices module parameter. |
@@ -81,17 +82,16 @@ struct zram_stats { | |||
81 | 82 | ||
82 | struct zram_meta { | 83 | struct zram_meta { |
83 | rwlock_t tb_lock; /* protect table */ | 84 | rwlock_t tb_lock; /* protect table */ |
84 | void *compress_workmem; | ||
85 | void *compress_buffer; | ||
86 | struct table *table; | 85 | struct table *table; |
87 | struct zs_pool *mem_pool; | 86 | struct zs_pool *mem_pool; |
88 | struct mutex buffer_lock; /* protect compress buffers */ | ||
89 | }; | 87 | }; |
90 | 88 | ||
91 | struct zram { | 89 | struct zram { |
92 | struct zram_meta *meta; | 90 | struct zram_meta *meta; |
93 | struct request_queue *queue; | 91 | struct request_queue *queue; |
94 | struct gendisk *disk; | 92 | struct gendisk *disk; |
93 | struct zcomp *comp; | ||
94 | |||
95 | /* Prevent concurrent execution of device init, reset and R/W request */ | 95 | /* Prevent concurrent execution of device init, reset and R/W request */ |
96 | struct rw_semaphore init_lock; | 96 | struct rw_semaphore init_lock; |
97 | /* | 97 | /* |