diff options
author | Sergey Senozhatsky <sergey.senozhatsky@gmail.com> | 2013-06-21 20:21:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-06-24 18:59:03 -0400 |
commit | 9b3bb7abcdf2df0f1b2657e6cbc9d06bc2b3b36f (patch) | |
tree | ed10f3a6d7e26d16bc480416ac5869e3b6ed1237 | |
parent | 95cd1860b8cddb69db91908b548294b567937551 (diff) |
zram: remove zram_sysfs file (v2)
Move zram sysfs code to zram drv and remove zram_sysfs.c
file. This gives ability to make static a number of previously
exported zram functions, used from zram sysfs, e.g. internal zram
zram_meta_alloc/free(). We also can drop zram_drv wrapper
functions, used from zram sysfs:
e.g. zram_reset_device()/__zram_reset_device() pair.
v2: as suggested by Greg K-H, move MODULE description to the
bottom of the file.
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/staging/zram/Makefile | 2 | ||||
-rw-r--r-- | drivers/staging/zram/zram_drv.c | 516 | ||||
-rw-r--r-- | drivers/staging/zram/zram_drv.h | 10 | ||||
-rw-r--r-- | drivers/staging/zram/zram_sysfs.c | 209 |
4 files changed, 350 insertions, 387 deletions
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile index 7f4a3019e9c4..cb0f9ced6a93 100644 --- a/drivers/staging/zram/Makefile +++ b/drivers/staging/zram/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | zram-y := zram_drv.o zram_sysfs.o | 1 | zram-y := zram_drv.o |
2 | 2 | ||
3 | obj-$(CONFIG_ZRAM) += zram.o | 3 | obj-$(CONFIG_ZRAM) += zram.o |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index ec2b2b5a122e..753877431b5f 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -42,6 +42,104 @@ static struct zram *zram_devices; | |||
42 | /* Module params (documentation at end) */ | 42 | /* Module params (documentation at end) */ |
43 | static unsigned int num_devices = 1; | 43 | static unsigned int num_devices = 1; |
44 | 44 | ||
45 | static inline struct zram *dev_to_zram(struct device *dev) | ||
46 | { | ||
47 | return (struct zram *)dev_to_disk(dev)->private_data; | ||
48 | } | ||
49 | |||
50 | static ssize_t disksize_show(struct device *dev, | ||
51 | struct device_attribute *attr, char *buf) | ||
52 | { | ||
53 | struct zram *zram = dev_to_zram(dev); | ||
54 | |||
55 | return sprintf(buf, "%llu\n", zram->disksize); | ||
56 | } | ||
57 | |||
58 | static ssize_t initstate_show(struct device *dev, | ||
59 | struct device_attribute *attr, char *buf) | ||
60 | { | ||
61 | struct zram *zram = dev_to_zram(dev); | ||
62 | |||
63 | return sprintf(buf, "%u\n", zram->init_done); | ||
64 | } | ||
65 | |||
66 | static ssize_t num_reads_show(struct device *dev, | ||
67 | struct device_attribute *attr, char *buf) | ||
68 | { | ||
69 | struct zram *zram = dev_to_zram(dev); | ||
70 | |||
71 | return sprintf(buf, "%llu\n", | ||
72 | (u64)atomic64_read(&zram->stats.num_reads)); | ||
73 | } | ||
74 | |||
75 | static ssize_t num_writes_show(struct device *dev, | ||
76 | struct device_attribute *attr, char *buf) | ||
77 | { | ||
78 | struct zram *zram = dev_to_zram(dev); | ||
79 | |||
80 | return sprintf(buf, "%llu\n", | ||
81 | (u64)atomic64_read(&zram->stats.num_writes)); | ||
82 | } | ||
83 | |||
84 | static ssize_t invalid_io_show(struct device *dev, | ||
85 | struct device_attribute *attr, char *buf) | ||
86 | { | ||
87 | struct zram *zram = dev_to_zram(dev); | ||
88 | |||
89 | return sprintf(buf, "%llu\n", | ||
90 | (u64)atomic64_read(&zram->stats.invalid_io)); | ||
91 | } | ||
92 | |||
93 | static ssize_t notify_free_show(struct device *dev, | ||
94 | struct device_attribute *attr, char *buf) | ||
95 | { | ||
96 | struct zram *zram = dev_to_zram(dev); | ||
97 | |||
98 | return sprintf(buf, "%llu\n", | ||
99 | (u64)atomic64_read(&zram->stats.notify_free)); | ||
100 | } | ||
101 | |||
102 | static ssize_t zero_pages_show(struct device *dev, | ||
103 | struct device_attribute *attr, char *buf) | ||
104 | { | ||
105 | struct zram *zram = dev_to_zram(dev); | ||
106 | |||
107 | return sprintf(buf, "%u\n", zram->stats.pages_zero); | ||
108 | } | ||
109 | |||
110 | static ssize_t orig_data_size_show(struct device *dev, | ||
111 | struct device_attribute *attr, char *buf) | ||
112 | { | ||
113 | struct zram *zram = dev_to_zram(dev); | ||
114 | |||
115 | return sprintf(buf, "%llu\n", | ||
116 | (u64)(zram->stats.pages_stored) << PAGE_SHIFT); | ||
117 | } | ||
118 | |||
119 | static ssize_t compr_data_size_show(struct device *dev, | ||
120 | struct device_attribute *attr, char *buf) | ||
121 | { | ||
122 | struct zram *zram = dev_to_zram(dev); | ||
123 | |||
124 | return sprintf(buf, "%llu\n", | ||
125 | (u64)atomic64_read(&zram->stats.compr_size)); | ||
126 | } | ||
127 | |||
128 | static ssize_t mem_used_total_show(struct device *dev, | ||
129 | struct device_attribute *attr, char *buf) | ||
130 | { | ||
131 | u64 val = 0; | ||
132 | struct zram *zram = dev_to_zram(dev); | ||
133 | struct zram_meta *meta = zram->meta; | ||
134 | |||
135 | down_read(&zram->init_lock); | ||
136 | if (zram->init_done) | ||
137 | val = zs_get_total_size_bytes(meta->mem_pool); | ||
138 | up_read(&zram->init_lock); | ||
139 | |||
140 | return sprintf(buf, "%llu\n", val); | ||
141 | } | ||
142 | |||
45 | static int zram_test_flag(struct zram_meta *meta, u32 index, | 143 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
46 | enum zram_pageflags flag) | 144 | enum zram_pageflags flag) |
47 | { | 145 | { |
@@ -60,6 +158,97 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index, | |||
60 | meta->table[index].flags &= ~BIT(flag); | 158 | meta->table[index].flags &= ~BIT(flag); |
61 | } | 159 | } |
62 | 160 | ||
161 | static inline int is_partial_io(struct bio_vec *bvec) | ||
162 | { | ||
163 | return bvec->bv_len != PAGE_SIZE; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Check if request is within bounds and aligned on zram logical blocks. | ||
168 | */ | ||
169 | static inline int valid_io_request(struct zram *zram, struct bio *bio) | ||
170 | { | ||
171 | u64 start, end, bound; | ||
172 | |||
173 | /* unaligned request */ | ||
174 | if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | ||
175 | return 0; | ||
176 | if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | ||
177 | return 0; | ||
178 | |||
179 | start = bio->bi_sector; | ||
180 | end = start + (bio->bi_size >> SECTOR_SHIFT); | ||
181 | bound = zram->disksize >> SECTOR_SHIFT; | ||
182 | /* out of range range */ | ||
183 | if (unlikely(start >= bound || end >= bound || start > end)) | ||
184 | return 0; | ||
185 | |||
186 | /* I/O request is valid */ | ||
187 | return 1; | ||
188 | } | ||
189 | |||
190 | static void zram_meta_free(struct zram_meta *meta) | ||
191 | { | ||
192 | zs_destroy_pool(meta->mem_pool); | ||
193 | kfree(meta->compress_workmem); | ||
194 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
195 | vfree(meta->table); | ||
196 | kfree(meta); | ||
197 | } | ||
198 | |||
199 | static struct zram_meta *zram_meta_alloc(u64 disksize) | ||
200 | { | ||
201 | size_t num_pages; | ||
202 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | ||
203 | if (!meta) | ||
204 | goto out; | ||
205 | |||
206 | meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); | ||
207 | if (!meta->compress_workmem) | ||
208 | goto free_meta; | ||
209 | |||
210 | meta->compress_buffer = | ||
211 | (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); | ||
212 | if (!meta->compress_buffer) { | ||
213 | pr_err("Error allocating compressor buffer space\n"); | ||
214 | goto free_workmem; | ||
215 | } | ||
216 | |||
217 | num_pages = disksize >> PAGE_SHIFT; | ||
218 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | ||
219 | if (!meta->table) { | ||
220 | pr_err("Error allocating zram address table\n"); | ||
221 | goto free_buffer; | ||
222 | } | ||
223 | |||
224 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | ||
225 | if (!meta->mem_pool) { | ||
226 | pr_err("Error creating memory pool\n"); | ||
227 | goto free_table; | ||
228 | } | ||
229 | |||
230 | return meta; | ||
231 | |||
232 | free_table: | ||
233 | vfree(meta->table); | ||
234 | free_buffer: | ||
235 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
236 | free_workmem: | ||
237 | kfree(meta->compress_workmem); | ||
238 | free_meta: | ||
239 | kfree(meta); | ||
240 | meta = NULL; | ||
241 | out: | ||
242 | return meta; | ||
243 | } | ||
244 | |||
245 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | ||
246 | { | ||
247 | if (*offset + bvec->bv_len >= PAGE_SIZE) | ||
248 | (*index)++; | ||
249 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | ||
250 | } | ||
251 | |||
63 | static int page_zero_filled(void *ptr) | 252 | static int page_zero_filled(void *ptr) |
64 | { | 253 | { |
65 | unsigned int pos; | 254 | unsigned int pos; |
@@ -75,6 +264,21 @@ static int page_zero_filled(void *ptr) | |||
75 | return 1; | 264 | return 1; |
76 | } | 265 | } |
77 | 266 | ||
267 | static void handle_zero_page(struct bio_vec *bvec) | ||
268 | { | ||
269 | struct page *page = bvec->bv_page; | ||
270 | void *user_mem; | ||
271 | |||
272 | user_mem = kmap_atomic(page); | ||
273 | if (is_partial_io(bvec)) | ||
274 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | ||
275 | else | ||
276 | clear_page(user_mem); | ||
277 | kunmap_atomic(user_mem); | ||
278 | |||
279 | flush_dcache_page(page); | ||
280 | } | ||
281 | |||
78 | static void zram_free_page(struct zram *zram, size_t index) | 282 | static void zram_free_page(struct zram *zram, size_t index) |
79 | { | 283 | { |
80 | struct zram_meta *meta = zram->meta; | 284 | struct zram_meta *meta = zram->meta; |
@@ -108,26 +312,6 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
108 | meta->table[index].size = 0; | 312 | meta->table[index].size = 0; |
109 | } | 313 | } |
110 | 314 | ||
111 | static inline int is_partial_io(struct bio_vec *bvec) | ||
112 | { | ||
113 | return bvec->bv_len != PAGE_SIZE; | ||
114 | } | ||
115 | |||
116 | static void handle_zero_page(struct bio_vec *bvec) | ||
117 | { | ||
118 | struct page *page = bvec->bv_page; | ||
119 | void *user_mem; | ||
120 | |||
121 | user_mem = kmap_atomic(page); | ||
122 | if (is_partial_io(bvec)) | ||
123 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | ||
124 | else | ||
125 | clear_page(user_mem); | ||
126 | kunmap_atomic(user_mem); | ||
127 | |||
128 | flush_dcache_page(page); | ||
129 | } | ||
130 | |||
131 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | 315 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
132 | { | 316 | { |
133 | int ret = LZO_E_OK; | 317 | int ret = LZO_E_OK; |
@@ -338,11 +522,117 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
338 | return ret; | 522 | return ret; |
339 | } | 523 | } |
340 | 524 | ||
341 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | 525 | static void zram_reset_device(struct zram *zram) |
342 | { | 526 | { |
343 | if (*offset + bvec->bv_len >= PAGE_SIZE) | 527 | size_t index; |
344 | (*index)++; | 528 | struct zram_meta *meta; |
345 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | 529 | |
530 | if (!zram->init_done) | ||
531 | return; | ||
532 | |||
533 | meta = zram->meta; | ||
534 | zram->init_done = 0; | ||
535 | |||
536 | /* Free all pages that are still in this zram device */ | ||
537 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | ||
538 | unsigned long handle = meta->table[index].handle; | ||
539 | if (!handle) | ||
540 | continue; | ||
541 | |||
542 | zs_free(meta->mem_pool, handle); | ||
543 | } | ||
544 | |||
545 | zram_meta_free(zram->meta); | ||
546 | zram->meta = NULL; | ||
547 | /* Reset stats */ | ||
548 | memset(&zram->stats, 0, sizeof(zram->stats)); | ||
549 | |||
550 | zram->disksize = 0; | ||
551 | set_capacity(zram->disk, 0); | ||
552 | } | ||
553 | |||
554 | static void zram_init_device(struct zram *zram, struct zram_meta *meta) | ||
555 | { | ||
556 | if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) { | ||
557 | pr_info( | ||
558 | "There is little point creating a zram of greater than " | ||
559 | "twice the size of memory since we expect a 2:1 compression " | ||
560 | "ratio. Note that zram uses about 0.1%% of the size of " | ||
561 | "the disk when not in use so a huge zram is " | ||
562 | "wasteful.\n" | ||
563 | "\tMemory Size: %lu kB\n" | ||
564 | "\tSize you selected: %llu kB\n" | ||
565 | "Continuing anyway ...\n", | ||
566 | (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10 | ||
567 | ); | ||
568 | } | ||
569 | |||
570 | /* zram devices sort of resembles non-rotational disks */ | ||
571 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | ||
572 | |||
573 | zram->meta = meta; | ||
574 | zram->init_done = 1; | ||
575 | |||
576 | pr_debug("Initialization done!\n"); | ||
577 | } | ||
578 | |||
579 | static ssize_t disksize_store(struct device *dev, | ||
580 | struct device_attribute *attr, const char *buf, size_t len) | ||
581 | { | ||
582 | u64 disksize; | ||
583 | struct zram_meta *meta; | ||
584 | struct zram *zram = dev_to_zram(dev); | ||
585 | |||
586 | disksize = memparse(buf, NULL); | ||
587 | if (!disksize) | ||
588 | return -EINVAL; | ||
589 | |||
590 | disksize = PAGE_ALIGN(disksize); | ||
591 | meta = zram_meta_alloc(disksize); | ||
592 | down_write(&zram->init_lock); | ||
593 | if (zram->init_done) { | ||
594 | up_write(&zram->init_lock); | ||
595 | zram_meta_free(meta); | ||
596 | pr_info("Cannot change disksize for initialized device\n"); | ||
597 | return -EBUSY; | ||
598 | } | ||
599 | |||
600 | zram->disksize = disksize; | ||
601 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | ||
602 | zram_init_device(zram, meta); | ||
603 | up_write(&zram->init_lock); | ||
604 | |||
605 | return len; | ||
606 | } | ||
607 | |||
608 | static ssize_t reset_store(struct device *dev, | ||
609 | struct device_attribute *attr, const char *buf, size_t len) | ||
610 | { | ||
611 | int ret; | ||
612 | unsigned short do_reset; | ||
613 | struct zram *zram; | ||
614 | struct block_device *bdev; | ||
615 | |||
616 | zram = dev_to_zram(dev); | ||
617 | bdev = bdget_disk(zram->disk, 0); | ||
618 | |||
619 | /* Do not reset an active device! */ | ||
620 | if (bdev->bd_holders) | ||
621 | return -EBUSY; | ||
622 | |||
623 | ret = kstrtou16(buf, 10, &do_reset); | ||
624 | if (ret) | ||
625 | return ret; | ||
626 | |||
627 | if (!do_reset) | ||
628 | return -EINVAL; | ||
629 | |||
630 | /* Make sure all pending I/O is finished */ | ||
631 | if (bdev) | ||
632 | fsync_bdev(bdev); | ||
633 | |||
634 | zram_reset_device(zram); | ||
635 | return len; | ||
346 | } | 636 | } |
347 | 637 | ||
348 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | 638 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) |
@@ -401,30 +691,6 @@ out: | |||
401 | } | 691 | } |
402 | 692 | ||
403 | /* | 693 | /* |
404 | * Check if request is within bounds and aligned on zram logical blocks. | ||
405 | */ | ||
406 | static inline int valid_io_request(struct zram *zram, struct bio *bio) | ||
407 | { | ||
408 | u64 start, end, bound; | ||
409 | |||
410 | /* unaligned request */ | ||
411 | if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | ||
412 | return 0; | ||
413 | if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | ||
414 | return 0; | ||
415 | |||
416 | start = bio->bi_sector; | ||
417 | end = start + (bio->bi_size >> SECTOR_SHIFT); | ||
418 | bound = zram->disksize >> SECTOR_SHIFT; | ||
419 | /* out of range range */ | ||
420 | if (unlikely(start >= bound || end >= bound || start > end)) | ||
421 | return 0; | ||
422 | |||
423 | /* I/O request is valid */ | ||
424 | return 1; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Handler function for all zram I/O requests. | 694 | * Handler function for all zram I/O requests. |
429 | */ | 695 | */ |
430 | static void zram_make_request(struct request_queue *queue, struct bio *bio) | 696 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
@@ -450,122 +716,6 @@ error: | |||
450 | bio_io_error(bio); | 716 | bio_io_error(bio); |
451 | } | 717 | } |
452 | 718 | ||
453 | static void __zram_reset_device(struct zram *zram) | ||
454 | { | ||
455 | size_t index; | ||
456 | struct zram_meta *meta; | ||
457 | |||
458 | if (!zram->init_done) | ||
459 | return; | ||
460 | |||
461 | meta = zram->meta; | ||
462 | zram->init_done = 0; | ||
463 | |||
464 | /* Free all pages that are still in this zram device */ | ||
465 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { | ||
466 | unsigned long handle = meta->table[index].handle; | ||
467 | if (!handle) | ||
468 | continue; | ||
469 | |||
470 | zs_free(meta->mem_pool, handle); | ||
471 | } | ||
472 | |||
473 | zram_meta_free(zram->meta); | ||
474 | zram->meta = NULL; | ||
475 | /* Reset stats */ | ||
476 | memset(&zram->stats, 0, sizeof(zram->stats)); | ||
477 | |||
478 | zram->disksize = 0; | ||
479 | set_capacity(zram->disk, 0); | ||
480 | } | ||
481 | |||
482 | void zram_reset_device(struct zram *zram) | ||
483 | { | ||
484 | down_write(&zram->init_lock); | ||
485 | __zram_reset_device(zram); | ||
486 | up_write(&zram->init_lock); | ||
487 | } | ||
488 | |||
489 | void zram_meta_free(struct zram_meta *meta) | ||
490 | { | ||
491 | zs_destroy_pool(meta->mem_pool); | ||
492 | kfree(meta->compress_workmem); | ||
493 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
494 | vfree(meta->table); | ||
495 | kfree(meta); | ||
496 | } | ||
497 | |||
498 | struct zram_meta *zram_meta_alloc(u64 disksize) | ||
499 | { | ||
500 | size_t num_pages; | ||
501 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); | ||
502 | if (!meta) | ||
503 | goto out; | ||
504 | |||
505 | meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); | ||
506 | if (!meta->compress_workmem) | ||
507 | goto free_meta; | ||
508 | |||
509 | meta->compress_buffer = | ||
510 | (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); | ||
511 | if (!meta->compress_buffer) { | ||
512 | pr_err("Error allocating compressor buffer space\n"); | ||
513 | goto free_workmem; | ||
514 | } | ||
515 | |||
516 | num_pages = disksize >> PAGE_SHIFT; | ||
517 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | ||
518 | if (!meta->table) { | ||
519 | pr_err("Error allocating zram address table\n"); | ||
520 | goto free_buffer; | ||
521 | } | ||
522 | |||
523 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); | ||
524 | if (!meta->mem_pool) { | ||
525 | pr_err("Error creating memory pool\n"); | ||
526 | goto free_table; | ||
527 | } | ||
528 | |||
529 | return meta; | ||
530 | |||
531 | free_table: | ||
532 | vfree(meta->table); | ||
533 | free_buffer: | ||
534 | free_pages((unsigned long)meta->compress_buffer, 1); | ||
535 | free_workmem: | ||
536 | kfree(meta->compress_workmem); | ||
537 | free_meta: | ||
538 | kfree(meta); | ||
539 | meta = NULL; | ||
540 | out: | ||
541 | return meta; | ||
542 | } | ||
543 | |||
544 | void zram_init_device(struct zram *zram, struct zram_meta *meta) | ||
545 | { | ||
546 | if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) { | ||
547 | pr_info( | ||
548 | "There is little point creating a zram of greater than " | ||
549 | "twice the size of memory since we expect a 2:1 compression " | ||
550 | "ratio. Note that zram uses about 0.1%% of the size of " | ||
551 | "the disk when not in use so a huge zram is " | ||
552 | "wasteful.\n" | ||
553 | "\tMemory Size: %lu kB\n" | ||
554 | "\tSize you selected: %llu kB\n" | ||
555 | "Continuing anyway ...\n", | ||
556 | (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10 | ||
557 | ); | ||
558 | } | ||
559 | |||
560 | /* zram devices sort of resembles non-rotational disks */ | ||
561 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | ||
562 | |||
563 | zram->meta = meta; | ||
564 | zram->init_done = 1; | ||
565 | |||
566 | pr_debug("Initialization done!\n"); | ||
567 | } | ||
568 | |||
569 | static void zram_slot_free_notify(struct block_device *bdev, | 719 | static void zram_slot_free_notify(struct block_device *bdev, |
570 | unsigned long index) | 720 | unsigned long index) |
571 | { | 721 | { |
@@ -583,6 +733,38 @@ static const struct block_device_operations zram_devops = { | |||
583 | .owner = THIS_MODULE | 733 | .owner = THIS_MODULE |
584 | }; | 734 | }; |
585 | 735 | ||
736 | static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, | ||
737 | disksize_show, disksize_store); | ||
738 | static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); | ||
739 | static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); | ||
740 | static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); | ||
741 | static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); | ||
742 | static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); | ||
743 | static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL); | ||
744 | static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL); | ||
745 | static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); | ||
746 | static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL); | ||
747 | static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); | ||
748 | |||
749 | static struct attribute *zram_disk_attrs[] = { | ||
750 | &dev_attr_disksize.attr, | ||
751 | &dev_attr_initstate.attr, | ||
752 | &dev_attr_reset.attr, | ||
753 | &dev_attr_num_reads.attr, | ||
754 | &dev_attr_num_writes.attr, | ||
755 | &dev_attr_invalid_io.attr, | ||
756 | &dev_attr_notify_free.attr, | ||
757 | &dev_attr_zero_pages.attr, | ||
758 | &dev_attr_orig_data_size.attr, | ||
759 | &dev_attr_compr_data_size.attr, | ||
760 | &dev_attr_mem_used_total.attr, | ||
761 | NULL, | ||
762 | }; | ||
763 | |||
764 | static struct attribute_group zram_disk_attr_group = { | ||
765 | .attrs = zram_disk_attrs, | ||
766 | }; | ||
767 | |||
586 | static int create_device(struct zram *zram, int device_id) | 768 | static int create_device(struct zram *zram, int device_id) |
587 | { | 769 | { |
588 | int ret = -ENOMEM; | 770 | int ret = -ENOMEM; |
@@ -728,12 +910,12 @@ static void __exit zram_exit(void) | |||
728 | pr_debug("Cleanup done!\n"); | 910 | pr_debug("Cleanup done!\n"); |
729 | } | 911 | } |
730 | 912 | ||
731 | module_param(num_devices, uint, 0); | ||
732 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | ||
733 | |||
734 | module_init(zram_init); | 913 | module_init(zram_init); |
735 | module_exit(zram_exit); | 914 | module_exit(zram_exit); |
736 | 915 | ||
916 | module_param(num_devices, uint, 0); | ||
917 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | ||
918 | |||
737 | MODULE_LICENSE("Dual BSD/GPL"); | 919 | MODULE_LICENSE("Dual BSD/GPL"); |
738 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | 920 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
739 | MODULE_DESCRIPTION("Compressed RAM Block Device"); | 921 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h index 11b09fc25953..9e57bfb29b4f 100644 --- a/drivers/staging/zram/zram_drv.h +++ b/drivers/staging/zram/zram_drv.h | |||
@@ -112,14 +112,4 @@ struct zram { | |||
112 | 112 | ||
113 | struct zram_stats stats; | 113 | struct zram_stats stats; |
114 | }; | 114 | }; |
115 | |||
116 | #ifdef CONFIG_SYSFS | ||
117 | extern struct attribute_group zram_disk_attr_group; | ||
118 | #endif | ||
119 | |||
120 | extern void zram_reset_device(struct zram *zram); | ||
121 | extern struct zram_meta *zram_meta_alloc(u64 disksize); | ||
122 | extern void zram_meta_free(struct zram_meta *meta); | ||
123 | extern void zram_init_device(struct zram *zram, struct zram_meta *meta); | ||
124 | |||
125 | #endif | 115 | #endif |
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c deleted file mode 100644 index 93a2f9cafd7c..000000000000 --- a/drivers/staging/zram/zram_sysfs.c +++ /dev/null | |||
@@ -1,209 +0,0 @@ | |||
1 | /* | ||
2 | * Compressed RAM block device | ||
3 | * | ||
4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta | ||
5 | * | ||
6 | * This code is released using a dual license strategy: BSD/GPL | ||
7 | * You can choose the licence that better fits your requirements. | ||
8 | * | ||
9 | * Released under the terms of 3-clause BSD License | ||
10 | * Released under the terms of GNU General Public License Version 2.0 | ||
11 | * | ||
12 | * Project home: http://compcache.googlecode.com/ | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/genhd.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/kernel.h> | ||
19 | |||
20 | #include "zram_drv.h" | ||
21 | |||
22 | static inline struct zram *dev_to_zram(struct device *dev) | ||
23 | { | ||
24 | return (struct zram *)dev_to_disk(dev)->private_data; | ||
25 | } | ||
26 | |||
27 | static ssize_t disksize_show(struct device *dev, | ||
28 | struct device_attribute *attr, char *buf) | ||
29 | { | ||
30 | struct zram *zram = dev_to_zram(dev); | ||
31 | |||
32 | return sprintf(buf, "%llu\n", zram->disksize); | ||
33 | } | ||
34 | |||
35 | static ssize_t disksize_store(struct device *dev, | ||
36 | struct device_attribute *attr, const char *buf, size_t len) | ||
37 | { | ||
38 | u64 disksize; | ||
39 | struct zram_meta *meta; | ||
40 | struct zram *zram = dev_to_zram(dev); | ||
41 | |||
42 | disksize = memparse(buf, NULL); | ||
43 | if (!disksize) | ||
44 | return -EINVAL; | ||
45 | |||
46 | disksize = PAGE_ALIGN(disksize); | ||
47 | meta = zram_meta_alloc(disksize); | ||
48 | down_write(&zram->init_lock); | ||
49 | if (zram->init_done) { | ||
50 | up_write(&zram->init_lock); | ||
51 | zram_meta_free(meta); | ||
52 | pr_info("Cannot change disksize for initialized device\n"); | ||
53 | return -EBUSY; | ||
54 | } | ||
55 | |||
56 | zram->disksize = disksize; | ||
57 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | ||
58 | zram_init_device(zram, meta); | ||
59 | up_write(&zram->init_lock); | ||
60 | |||
61 | return len; | ||
62 | } | ||
63 | |||
64 | static ssize_t initstate_show(struct device *dev, | ||
65 | struct device_attribute *attr, char *buf) | ||
66 | { | ||
67 | struct zram *zram = dev_to_zram(dev); | ||
68 | |||
69 | return sprintf(buf, "%u\n", zram->init_done); | ||
70 | } | ||
71 | |||
72 | static ssize_t reset_store(struct device *dev, | ||
73 | struct device_attribute *attr, const char *buf, size_t len) | ||
74 | { | ||
75 | int ret; | ||
76 | unsigned short do_reset; | ||
77 | struct zram *zram; | ||
78 | struct block_device *bdev; | ||
79 | |||
80 | zram = dev_to_zram(dev); | ||
81 | bdev = bdget_disk(zram->disk, 0); | ||
82 | |||
83 | /* Do not reset an active device! */ | ||
84 | if (bdev->bd_holders) | ||
85 | return -EBUSY; | ||
86 | |||
87 | ret = kstrtou16(buf, 10, &do_reset); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | |||
91 | if (!do_reset) | ||
92 | return -EINVAL; | ||
93 | |||
94 | /* Make sure all pending I/O is finished */ | ||
95 | if (bdev) | ||
96 | fsync_bdev(bdev); | ||
97 | |||
98 | zram_reset_device(zram); | ||
99 | return len; | ||
100 | } | ||
101 | |||
102 | static ssize_t num_reads_show(struct device *dev, | ||
103 | struct device_attribute *attr, char *buf) | ||
104 | { | ||
105 | struct zram *zram = dev_to_zram(dev); | ||
106 | |||
107 | return sprintf(buf, "%llu\n", | ||
108 | (u64)atomic64_read(&zram->stats.num_reads)); | ||
109 | } | ||
110 | |||
111 | static ssize_t num_writes_show(struct device *dev, | ||
112 | struct device_attribute *attr, char *buf) | ||
113 | { | ||
114 | struct zram *zram = dev_to_zram(dev); | ||
115 | |||
116 | return sprintf(buf, "%llu\n", | ||
117 | (u64)atomic64_read(&zram->stats.num_writes)); | ||
118 | } | ||
119 | |||
120 | static ssize_t invalid_io_show(struct device *dev, | ||
121 | struct device_attribute *attr, char *buf) | ||
122 | { | ||
123 | struct zram *zram = dev_to_zram(dev); | ||
124 | |||
125 | return sprintf(buf, "%llu\n", | ||
126 | (u64)atomic64_read(&zram->stats.invalid_io)); | ||
127 | } | ||
128 | |||
129 | static ssize_t notify_free_show(struct device *dev, | ||
130 | struct device_attribute *attr, char *buf) | ||
131 | { | ||
132 | struct zram *zram = dev_to_zram(dev); | ||
133 | |||
134 | return sprintf(buf, "%llu\n", | ||
135 | (u64)atomic64_read(&zram->stats.notify_free)); | ||
136 | } | ||
137 | |||
138 | static ssize_t zero_pages_show(struct device *dev, | ||
139 | struct device_attribute *attr, char *buf) | ||
140 | { | ||
141 | struct zram *zram = dev_to_zram(dev); | ||
142 | |||
143 | return sprintf(buf, "%u\n", zram->stats.pages_zero); | ||
144 | } | ||
145 | |||
146 | static ssize_t orig_data_size_show(struct device *dev, | ||
147 | struct device_attribute *attr, char *buf) | ||
148 | { | ||
149 | struct zram *zram = dev_to_zram(dev); | ||
150 | |||
151 | return sprintf(buf, "%llu\n", | ||
152 | (u64)(zram->stats.pages_stored) << PAGE_SHIFT); | ||
153 | } | ||
154 | |||
155 | static ssize_t compr_data_size_show(struct device *dev, | ||
156 | struct device_attribute *attr, char *buf) | ||
157 | { | ||
158 | struct zram *zram = dev_to_zram(dev); | ||
159 | |||
160 | return sprintf(buf, "%llu\n", | ||
161 | (u64)atomic64_read(&zram->stats.compr_size)); | ||
162 | } | ||
163 | |||
164 | static ssize_t mem_used_total_show(struct device *dev, | ||
165 | struct device_attribute *attr, char *buf) | ||
166 | { | ||
167 | u64 val = 0; | ||
168 | struct zram *zram = dev_to_zram(dev); | ||
169 | struct zram_meta *meta = zram->meta; | ||
170 | |||
171 | down_read(&zram->init_lock); | ||
172 | if (zram->init_done) | ||
173 | val = zs_get_total_size_bytes(meta->mem_pool); | ||
174 | up_read(&zram->init_lock); | ||
175 | |||
176 | return sprintf(buf, "%llu\n", val); | ||
177 | } | ||
178 | |||
179 | static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, | ||
180 | disksize_show, disksize_store); | ||
181 | static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); | ||
182 | static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); | ||
183 | static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); | ||
184 | static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); | ||
185 | static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); | ||
186 | static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL); | ||
187 | static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL); | ||
188 | static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); | ||
189 | static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL); | ||
190 | static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); | ||
191 | |||
192 | static struct attribute *zram_disk_attrs[] = { | ||
193 | &dev_attr_disksize.attr, | ||
194 | &dev_attr_initstate.attr, | ||
195 | &dev_attr_reset.attr, | ||
196 | &dev_attr_num_reads.attr, | ||
197 | &dev_attr_num_writes.attr, | ||
198 | &dev_attr_invalid_io.attr, | ||
199 | &dev_attr_notify_free.attr, | ||
200 | &dev_attr_zero_pages.attr, | ||
201 | &dev_attr_orig_data_size.attr, | ||
202 | &dev_attr_compr_data_size.attr, | ||
203 | &dev_attr_mem_used_total.attr, | ||
204 | NULL, | ||
205 | }; | ||
206 | |||
207 | struct attribute_group zram_disk_attr_group = { | ||
208 | .attrs = zram_disk_attrs, | ||
209 | }; | ||