diff options
author | Sergey Senozhatsky <sergey.senozhatsky@gmail.com> | 2015-06-25 18:00:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-25 20:00:36 -0400 |
commit | 522698d7cadb5d208429c934f673713b7a42e925 (patch) | |
tree | 1dacbbe5ca237f1e36412c8ea907970dbec4448e /drivers/block/zram/zram_drv.c | |
parent | 85508ec6cbc21645927b6ac05e3b2748119a3e23 (diff) |
zram: reorganize code layout
This patch looks big, but basically it just moves code blocks.
No functional changes.
Our current code layout looks like a sandwitch.
For example,
a) between read/write handlers, we have update_used_max() helper function:
static int zram_decompress_page
static int zram_bvec_read
static inline void update_used_max
static int zram_bvec_write
static int zram_bvec_rw
b) RW request handlers __zram_make_request/zram_bio_discard are divided by
sysfs attr reset_store() function and corresponding zram_reset_device()
handler:
static void zram_bio_discard
static void zram_reset_device
static ssize_t disksize_store
static ssize_t reset_store
static void __zram_make_request
c) we first a bunch of sysfs read/store functions. then a number of
one-liners, then helper functions, RW functions, sysfs functions, helper
functions again, and so on.
Reorganize layout to be more logically grouped (a brief description,
`cat zram_drv.c | grep static` gives a bigger picture):
-- one-liners: zram_test_flag/etc.
-- helpers: is_partial_io/update_position/etc
-- sysfs attr show/store functions + ZRAM_ATTR_RO() generated stats
show() functions
exception: reset and disksize store functions are required to be after
meta() functions. because we do device create/destroy actions in these
sysfs handlers.
-- "mm" functions: meta get/put, meta alloc/free, page free
static inline bool zram_meta_get
static inline void zram_meta_put
static void zram_meta_free
static struct zram_meta *zram_meta_alloc
static void zram_free_page
-- a block of I/O functions
static int zram_decompress_page
static int zram_bvec_read
static int zram_bvec_write
static void zram_bio_discard
static int zram_bvec_rw
static void __zram_make_request
static void zram_make_request
static void zram_slot_free_notify
static int zram_rw_page
-- device contol: add/remove/init/reset functions (+zram-control class
will sit here)
static int zram_reset_device
static ssize_t reset_store
static ssize_t disksize_store
static int zram_add
static void zram_remove
static int __init zram_init
static void __exit zram_exit
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 725 |
1 files changed, 362 insertions, 363 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index addb18d1b8d7..82bc2ff9bd81 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -70,33 +70,117 @@ static inline struct zram *dev_to_zram(struct device *dev) | |||
70 | return (struct zram *)dev_to_disk(dev)->private_data; | 70 | return (struct zram *)dev_to_disk(dev)->private_data; |
71 | } | 71 | } |
72 | 72 | ||
73 | static ssize_t compact_store(struct device *dev, | 73 | /* flag operations needs meta->tb_lock */ |
74 | struct device_attribute *attr, const char *buf, size_t len) | 74 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
75 | enum zram_pageflags flag) | ||
75 | { | 76 | { |
76 | unsigned long nr_migrated; | 77 | return meta->table[index].value & BIT(flag); |
77 | struct zram *zram = dev_to_zram(dev); | 78 | } |
78 | struct zram_meta *meta; | ||
79 | 79 | ||
80 | down_read(&zram->init_lock); | 80 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
81 | if (!init_done(zram)) { | 81 | enum zram_pageflags flag) |
82 | up_read(&zram->init_lock); | 82 | { |
83 | return -EINVAL; | 83 | meta->table[index].value |= BIT(flag); |
84 | } | 84 | } |
85 | 85 | ||
86 | meta = zram->meta; | 86 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
87 | nr_migrated = zs_compact(meta->mem_pool); | 87 | enum zram_pageflags flag) |
88 | atomic64_add(nr_migrated, &zram->stats.num_migrated); | 88 | { |
89 | up_read(&zram->init_lock); | 89 | meta->table[index].value &= ~BIT(flag); |
90 | } | ||
90 | 91 | ||
91 | return len; | 92 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) |
93 | { | ||
94 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | ||
92 | } | 95 | } |
93 | 96 | ||
94 | static ssize_t disksize_show(struct device *dev, | 97 | static void zram_set_obj_size(struct zram_meta *meta, |
95 | struct device_attribute *attr, char *buf) | 98 | u32 index, size_t size) |
96 | { | 99 | { |
97 | struct zram *zram = dev_to_zram(dev); | 100 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; |
98 | 101 | ||
99 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); | 102 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; |
103 | } | ||
104 | |||
105 | static inline int is_partial_io(struct bio_vec *bvec) | ||
106 | { | ||
107 | return bvec->bv_len != PAGE_SIZE; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Check if request is within bounds and aligned on zram logical blocks. | ||
112 | */ | ||
113 | static inline int valid_io_request(struct zram *zram, | ||
114 | sector_t start, unsigned int size) | ||
115 | { | ||
116 | u64 end, bound; | ||
117 | |||
118 | /* unaligned request */ | ||
119 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | ||
120 | return 0; | ||
121 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | ||
122 | return 0; | ||
123 | |||
124 | end = start + (size >> SECTOR_SHIFT); | ||
125 | bound = zram->disksize >> SECTOR_SHIFT; | ||
126 | /* out of range range */ | ||
127 | if (unlikely(start >= bound || end > bound || start > end)) | ||
128 | return 0; | ||
129 | |||
130 | /* I/O request is valid */ | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | ||
135 | { | ||
136 | if (*offset + bvec->bv_len >= PAGE_SIZE) | ||
137 | (*index)++; | ||
138 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | ||
139 | } | ||
140 | |||
141 | static inline void update_used_max(struct zram *zram, | ||
142 | const unsigned long pages) | ||
143 | { | ||
144 | unsigned long old_max, cur_max; | ||
145 | |||
146 | old_max = atomic_long_read(&zram->stats.max_used_pages); | ||
147 | |||
148 | do { | ||
149 | cur_max = old_max; | ||
150 | if (pages > cur_max) | ||
151 | old_max = atomic_long_cmpxchg( | ||
152 | &zram->stats.max_used_pages, cur_max, pages); | ||
153 | } while (old_max != cur_max); | ||
154 | } | ||
155 | |||
156 | static int page_zero_filled(void *ptr) | ||
157 | { | ||
158 | unsigned int pos; | ||
159 | unsigned long *page; | ||
160 | |||
161 | page = (unsigned long *)ptr; | ||
162 | |||
163 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | ||
164 | if (page[pos]) | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | static void handle_zero_page(struct bio_vec *bvec) | ||
172 | { | ||
173 | struct page *page = bvec->bv_page; | ||
174 | void *user_mem; | ||
175 | |||
176 | user_mem = kmap_atomic(page); | ||
177 | if (is_partial_io(bvec)) | ||
178 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | ||
179 | else | ||
180 | clear_page(user_mem); | ||
181 | kunmap_atomic(user_mem); | ||
182 | |||
183 | flush_dcache_page(page); | ||
100 | } | 184 | } |
101 | 185 | ||
102 | static ssize_t initstate_show(struct device *dev, | 186 | static ssize_t initstate_show(struct device *dev, |
@@ -112,6 +196,14 @@ static ssize_t initstate_show(struct device *dev, | |||
112 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); | 196 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
113 | } | 197 | } |
114 | 198 | ||
199 | static ssize_t disksize_show(struct device *dev, | ||
200 | struct device_attribute *attr, char *buf) | ||
201 | { | ||
202 | struct zram *zram = dev_to_zram(dev); | ||
203 | |||
204 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); | ||
205 | } | ||
206 | |||
115 | static ssize_t orig_data_size_show(struct device *dev, | 207 | static ssize_t orig_data_size_show(struct device *dev, |
116 | struct device_attribute *attr, char *buf) | 208 | struct device_attribute *attr, char *buf) |
117 | { | 209 | { |
@@ -139,19 +231,6 @@ static ssize_t mem_used_total_show(struct device *dev, | |||
139 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | 231 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
140 | } | 232 | } |
141 | 233 | ||
142 | static ssize_t max_comp_streams_show(struct device *dev, | ||
143 | struct device_attribute *attr, char *buf) | ||
144 | { | ||
145 | int val; | ||
146 | struct zram *zram = dev_to_zram(dev); | ||
147 | |||
148 | down_read(&zram->init_lock); | ||
149 | val = zram->max_comp_streams; | ||
150 | up_read(&zram->init_lock); | ||
151 | |||
152 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
153 | } | ||
154 | |||
155 | static ssize_t mem_limit_show(struct device *dev, | 234 | static ssize_t mem_limit_show(struct device *dev, |
156 | struct device_attribute *attr, char *buf) | 235 | struct device_attribute *attr, char *buf) |
157 | { | 236 | { |
@@ -221,6 +300,19 @@ static ssize_t mem_used_max_store(struct device *dev, | |||
221 | return len; | 300 | return len; |
222 | } | 301 | } |
223 | 302 | ||
303 | static ssize_t max_comp_streams_show(struct device *dev, | ||
304 | struct device_attribute *attr, char *buf) | ||
305 | { | ||
306 | int val; | ||
307 | struct zram *zram = dev_to_zram(dev); | ||
308 | |||
309 | down_read(&zram->init_lock); | ||
310 | val = zram->max_comp_streams; | ||
311 | up_read(&zram->init_lock); | ||
312 | |||
313 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
314 | } | ||
315 | |||
224 | static ssize_t max_comp_streams_store(struct device *dev, | 316 | static ssize_t max_comp_streams_store(struct device *dev, |
225 | struct device_attribute *attr, const char *buf, size_t len) | 317 | struct device_attribute *attr, const char *buf, size_t len) |
226 | { | 318 | { |
@@ -278,65 +370,95 @@ static ssize_t comp_algorithm_store(struct device *dev, | |||
278 | return len; | 370 | return len; |
279 | } | 371 | } |
280 | 372 | ||
281 | /* flag operations needs meta->tb_lock */ | 373 | static ssize_t compact_store(struct device *dev, |
282 | static int zram_test_flag(struct zram_meta *meta, u32 index, | 374 | struct device_attribute *attr, const char *buf, size_t len) |
283 | enum zram_pageflags flag) | ||
284 | { | 375 | { |
285 | return meta->table[index].value & BIT(flag); | 376 | unsigned long nr_migrated; |
286 | } | 377 | struct zram *zram = dev_to_zram(dev); |
378 | struct zram_meta *meta; | ||
287 | 379 | ||
288 | static void zram_set_flag(struct zram_meta *meta, u32 index, | 380 | down_read(&zram->init_lock); |
289 | enum zram_pageflags flag) | 381 | if (!init_done(zram)) { |
290 | { | 382 | up_read(&zram->init_lock); |
291 | meta->table[index].value |= BIT(flag); | 383 | return -EINVAL; |
292 | } | 384 | } |
293 | 385 | ||
294 | static void zram_clear_flag(struct zram_meta *meta, u32 index, | 386 | meta = zram->meta; |
295 | enum zram_pageflags flag) | 387 | nr_migrated = zs_compact(meta->mem_pool); |
296 | { | 388 | atomic64_add(nr_migrated, &zram->stats.num_migrated); |
297 | meta->table[index].value &= ~BIT(flag); | 389 | up_read(&zram->init_lock); |
390 | |||
391 | return len; | ||
298 | } | 392 | } |
299 | 393 | ||
300 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | 394 | static ssize_t io_stat_show(struct device *dev, |
395 | struct device_attribute *attr, char *buf) | ||
301 | { | 396 | { |
302 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | 397 | struct zram *zram = dev_to_zram(dev); |
398 | ssize_t ret; | ||
399 | |||
400 | down_read(&zram->init_lock); | ||
401 | ret = scnprintf(buf, PAGE_SIZE, | ||
402 | "%8llu %8llu %8llu %8llu\n", | ||
403 | (u64)atomic64_read(&zram->stats.failed_reads), | ||
404 | (u64)atomic64_read(&zram->stats.failed_writes), | ||
405 | (u64)atomic64_read(&zram->stats.invalid_io), | ||
406 | (u64)atomic64_read(&zram->stats.notify_free)); | ||
407 | up_read(&zram->init_lock); | ||
408 | |||
409 | return ret; | ||
303 | } | 410 | } |
304 | 411 | ||
305 | static void zram_set_obj_size(struct zram_meta *meta, | 412 | static ssize_t mm_stat_show(struct device *dev, |
306 | u32 index, size_t size) | 413 | struct device_attribute *attr, char *buf) |
307 | { | 414 | { |
308 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; | 415 | struct zram *zram = dev_to_zram(dev); |
416 | u64 orig_size, mem_used = 0; | ||
417 | long max_used; | ||
418 | ssize_t ret; | ||
309 | 419 | ||
310 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | 420 | down_read(&zram->init_lock); |
421 | if (init_done(zram)) | ||
422 | mem_used = zs_get_total_pages(zram->meta->mem_pool); | ||
423 | |||
424 | orig_size = atomic64_read(&zram->stats.pages_stored); | ||
425 | max_used = atomic_long_read(&zram->stats.max_used_pages); | ||
426 | |||
427 | ret = scnprintf(buf, PAGE_SIZE, | ||
428 | "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", | ||
429 | orig_size << PAGE_SHIFT, | ||
430 | (u64)atomic64_read(&zram->stats.compr_data_size), | ||
431 | mem_used << PAGE_SHIFT, | ||
432 | zram->limit_pages << PAGE_SHIFT, | ||
433 | max_used << PAGE_SHIFT, | ||
434 | (u64)atomic64_read(&zram->stats.zero_pages), | ||
435 | (u64)atomic64_read(&zram->stats.num_migrated)); | ||
436 | up_read(&zram->init_lock); | ||
437 | |||
438 | return ret; | ||
311 | } | 439 | } |
312 | 440 | ||
313 | static inline int is_partial_io(struct bio_vec *bvec) | 441 | static DEVICE_ATTR_RO(io_stat); |
442 | static DEVICE_ATTR_RO(mm_stat); | ||
443 | ZRAM_ATTR_RO(num_reads); | ||
444 | ZRAM_ATTR_RO(num_writes); | ||
445 | ZRAM_ATTR_RO(failed_reads); | ||
446 | ZRAM_ATTR_RO(failed_writes); | ||
447 | ZRAM_ATTR_RO(invalid_io); | ||
448 | ZRAM_ATTR_RO(notify_free); | ||
449 | ZRAM_ATTR_RO(zero_pages); | ||
450 | ZRAM_ATTR_RO(compr_data_size); | ||
451 | |||
452 | static inline bool zram_meta_get(struct zram *zram) | ||
314 | { | 453 | { |
315 | return bvec->bv_len != PAGE_SIZE; | 454 | if (atomic_inc_not_zero(&zram->refcount)) |
455 | return true; | ||
456 | return false; | ||
316 | } | 457 | } |
317 | 458 | ||
318 | /* | 459 | static inline void zram_meta_put(struct zram *zram) |
319 | * Check if request is within bounds and aligned on zram logical blocks. | ||
320 | */ | ||
321 | static inline int valid_io_request(struct zram *zram, | ||
322 | sector_t start, unsigned int size) | ||
323 | { | 460 | { |
324 | u64 end, bound; | 461 | atomic_dec(&zram->refcount); |
325 | |||
326 | /* unaligned request */ | ||
327 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) | ||
328 | return 0; | ||
329 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) | ||
330 | return 0; | ||
331 | |||
332 | end = start + (size >> SECTOR_SHIFT); | ||
333 | bound = zram->disksize >> SECTOR_SHIFT; | ||
334 | /* out of range range */ | ||
335 | if (unlikely(start >= bound || end > bound || start > end)) | ||
336 | return 0; | ||
337 | |||
338 | /* I/O request is valid */ | ||
339 | return 1; | ||
340 | } | 462 | } |
341 | 463 | ||
342 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) | 464 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
@@ -390,56 +512,6 @@ out_error: | |||
390 | return NULL; | 512 | return NULL; |
391 | } | 513 | } |
392 | 514 | ||
393 | static inline bool zram_meta_get(struct zram *zram) | ||
394 | { | ||
395 | if (atomic_inc_not_zero(&zram->refcount)) | ||
396 | return true; | ||
397 | return false; | ||
398 | } | ||
399 | |||
400 | static inline void zram_meta_put(struct zram *zram) | ||
401 | { | ||
402 | atomic_dec(&zram->refcount); | ||
403 | } | ||
404 | |||
405 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) | ||
406 | { | ||
407 | if (*offset + bvec->bv_len >= PAGE_SIZE) | ||
408 | (*index)++; | ||
409 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | ||
410 | } | ||
411 | |||
412 | static int page_zero_filled(void *ptr) | ||
413 | { | ||
414 | unsigned int pos; | ||
415 | unsigned long *page; | ||
416 | |||
417 | page = (unsigned long *)ptr; | ||
418 | |||
419 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | ||
420 | if (page[pos]) | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return 1; | ||
425 | } | ||
426 | |||
427 | static void handle_zero_page(struct bio_vec *bvec) | ||
428 | { | ||
429 | struct page *page = bvec->bv_page; | ||
430 | void *user_mem; | ||
431 | |||
432 | user_mem = kmap_atomic(page); | ||
433 | if (is_partial_io(bvec)) | ||
434 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | ||
435 | else | ||
436 | clear_page(user_mem); | ||
437 | kunmap_atomic(user_mem); | ||
438 | |||
439 | flush_dcache_page(page); | ||
440 | } | ||
441 | |||
442 | |||
443 | /* | 515 | /* |
444 | * To protect concurrent access to the same index entry, | 516 | * To protect concurrent access to the same index entry, |
445 | * caller should hold this table index entry's bit_spinlock to | 517 | * caller should hold this table index entry's bit_spinlock to |
@@ -557,21 +629,6 @@ out_cleanup: | |||
557 | return ret; | 629 | return ret; |
558 | } | 630 | } |
559 | 631 | ||
560 | static inline void update_used_max(struct zram *zram, | ||
561 | const unsigned long pages) | ||
562 | { | ||
563 | unsigned long old_max, cur_max; | ||
564 | |||
565 | old_max = atomic_long_read(&zram->stats.max_used_pages); | ||
566 | |||
567 | do { | ||
568 | cur_max = old_max; | ||
569 | if (pages > cur_max) | ||
570 | old_max = atomic_long_cmpxchg( | ||
571 | &zram->stats.max_used_pages, cur_max, pages); | ||
572 | } while (old_max != cur_max); | ||
573 | } | ||
574 | |||
575 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | 632 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
576 | int offset) | 633 | int offset) |
577 | { | 634 | { |
@@ -699,35 +756,6 @@ out: | |||
699 | return ret; | 756 | return ret; |
700 | } | 757 | } |
701 | 758 | ||
702 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | ||
703 | int offset, int rw) | ||
704 | { | ||
705 | unsigned long start_time = jiffies; | ||
706 | int ret; | ||
707 | |||
708 | generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, | ||
709 | &zram->disk->part0); | ||
710 | |||
711 | if (rw == READ) { | ||
712 | atomic64_inc(&zram->stats.num_reads); | ||
713 | ret = zram_bvec_read(zram, bvec, index, offset); | ||
714 | } else { | ||
715 | atomic64_inc(&zram->stats.num_writes); | ||
716 | ret = zram_bvec_write(zram, bvec, index, offset); | ||
717 | } | ||
718 | |||
719 | generic_end_io_acct(rw, &zram->disk->part0, start_time); | ||
720 | |||
721 | if (unlikely(ret)) { | ||
722 | if (rw == READ) | ||
723 | atomic64_inc(&zram->stats.failed_reads); | ||
724 | else | ||
725 | atomic64_inc(&zram->stats.failed_writes); | ||
726 | } | ||
727 | |||
728 | return ret; | ||
729 | } | ||
730 | |||
731 | /* | 759 | /* |
732 | * zram_bio_discard - handler on discard request | 760 | * zram_bio_discard - handler on discard request |
733 | * @index: physical block index in PAGE_SIZE units | 761 | * @index: physical block index in PAGE_SIZE units |
@@ -767,151 +795,32 @@ static void zram_bio_discard(struct zram *zram, u32 index, | |||
767 | } | 795 | } |
768 | } | 796 | } |
769 | 797 | ||
770 | static void zram_reset_device(struct zram *zram) | 798 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
771 | { | 799 | int offset, int rw) |
772 | struct zram_meta *meta; | ||
773 | struct zcomp *comp; | ||
774 | u64 disksize; | ||
775 | |||
776 | down_write(&zram->init_lock); | ||
777 | |||
778 | zram->limit_pages = 0; | ||
779 | |||
780 | if (!init_done(zram)) { | ||
781 | up_write(&zram->init_lock); | ||
782 | return; | ||
783 | } | ||
784 | |||
785 | meta = zram->meta; | ||
786 | comp = zram->comp; | ||
787 | disksize = zram->disksize; | ||
788 | /* | ||
789 | * Refcount will go down to 0 eventually and r/w handler | ||
790 | * cannot handle further I/O so it will bail out by | ||
791 | * check zram_meta_get. | ||
792 | */ | ||
793 | zram_meta_put(zram); | ||
794 | /* | ||
795 | * We want to free zram_meta in process context to avoid | ||
796 | * deadlock between reclaim path and any other locks. | ||
797 | */ | ||
798 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); | ||
799 | |||
800 | /* Reset stats */ | ||
801 | memset(&zram->stats, 0, sizeof(zram->stats)); | ||
802 | zram->disksize = 0; | ||
803 | zram->max_comp_streams = 1; | ||
804 | |||
805 | set_capacity(zram->disk, 0); | ||
806 | part_stat_set_all(&zram->disk->part0, 0); | ||
807 | |||
808 | up_write(&zram->init_lock); | ||
809 | /* I/O operation under all of CPU are done so let's free */ | ||
810 | zram_meta_free(meta, disksize); | ||
811 | zcomp_destroy(comp); | ||
812 | } | ||
813 | |||
814 | static ssize_t disksize_store(struct device *dev, | ||
815 | struct device_attribute *attr, const char *buf, size_t len) | ||
816 | { | ||
817 | u64 disksize; | ||
818 | struct zcomp *comp; | ||
819 | struct zram_meta *meta; | ||
820 | struct zram *zram = dev_to_zram(dev); | ||
821 | int err; | ||
822 | |||
823 | disksize = memparse(buf, NULL); | ||
824 | if (!disksize) | ||
825 | return -EINVAL; | ||
826 | |||
827 | disksize = PAGE_ALIGN(disksize); | ||
828 | meta = zram_meta_alloc(zram->disk->first_minor, disksize); | ||
829 | if (!meta) | ||
830 | return -ENOMEM; | ||
831 | |||
832 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); | ||
833 | if (IS_ERR(comp)) { | ||
834 | pr_info("Cannot initialise %s compressing backend\n", | ||
835 | zram->compressor); | ||
836 | err = PTR_ERR(comp); | ||
837 | goto out_free_meta; | ||
838 | } | ||
839 | |||
840 | down_write(&zram->init_lock); | ||
841 | if (init_done(zram)) { | ||
842 | pr_info("Cannot change disksize for initialized device\n"); | ||
843 | err = -EBUSY; | ||
844 | goto out_destroy_comp; | ||
845 | } | ||
846 | |||
847 | init_waitqueue_head(&zram->io_done); | ||
848 | atomic_set(&zram->refcount, 1); | ||
849 | zram->meta = meta; | ||
850 | zram->comp = comp; | ||
851 | zram->disksize = disksize; | ||
852 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | ||
853 | up_write(&zram->init_lock); | ||
854 | |||
855 | /* | ||
856 | * Revalidate disk out of the init_lock to avoid lockdep splat. | ||
857 | * It's okay because disk's capacity is protected by init_lock | ||
858 | * so that revalidate_disk always sees up-to-date capacity. | ||
859 | */ | ||
860 | revalidate_disk(zram->disk); | ||
861 | |||
862 | return len; | ||
863 | |||
864 | out_destroy_comp: | ||
865 | up_write(&zram->init_lock); | ||
866 | zcomp_destroy(comp); | ||
867 | out_free_meta: | ||
868 | zram_meta_free(meta, disksize); | ||
869 | return err; | ||
870 | } | ||
871 | |||
872 | static ssize_t reset_store(struct device *dev, | ||
873 | struct device_attribute *attr, const char *buf, size_t len) | ||
874 | { | 800 | { |
801 | unsigned long start_time = jiffies; | ||
875 | int ret; | 802 | int ret; |
876 | unsigned short do_reset; | ||
877 | struct zram *zram; | ||
878 | struct block_device *bdev; | ||
879 | 803 | ||
880 | zram = dev_to_zram(dev); | 804 | generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, |
881 | bdev = bdget_disk(zram->disk, 0); | 805 | &zram->disk->part0); |
882 | |||
883 | if (!bdev) | ||
884 | return -ENOMEM; | ||
885 | 806 | ||
886 | mutex_lock(&bdev->bd_mutex); | 807 | if (rw == READ) { |
887 | /* Do not reset an active device! */ | 808 | atomic64_inc(&zram->stats.num_reads); |
888 | if (bdev->bd_openers) { | 809 | ret = zram_bvec_read(zram, bvec, index, offset); |
889 | ret = -EBUSY; | 810 | } else { |
890 | goto out; | 811 | atomic64_inc(&zram->stats.num_writes); |
812 | ret = zram_bvec_write(zram, bvec, index, offset); | ||
891 | } | 813 | } |
892 | 814 | ||
893 | ret = kstrtou16(buf, 10, &do_reset); | 815 | generic_end_io_acct(rw, &zram->disk->part0, start_time); |
894 | if (ret) | ||
895 | goto out; | ||
896 | 816 | ||
897 | if (!do_reset) { | 817 | if (unlikely(ret)) { |
898 | ret = -EINVAL; | 818 | if (rw == READ) |
899 | goto out; | 819 | atomic64_inc(&zram->stats.failed_reads); |
820 | else | ||
821 | atomic64_inc(&zram->stats.failed_writes); | ||
900 | } | 822 | } |
901 | 823 | ||
902 | /* Make sure all pending I/O is finished */ | ||
903 | fsync_bdev(bdev); | ||
904 | zram_reset_device(zram); | ||
905 | |||
906 | mutex_unlock(&bdev->bd_mutex); | ||
907 | revalidate_disk(zram->disk); | ||
908 | bdput(bdev); | ||
909 | |||
910 | return len; | ||
911 | |||
912 | out: | ||
913 | mutex_unlock(&bdev->bd_mutex); | ||
914 | bdput(bdev); | ||
915 | return ret; | 824 | return ret; |
916 | } | 825 | } |
917 | 826 | ||
@@ -1051,80 +960,170 @@ out: | |||
1051 | return err; | 960 | return err; |
1052 | } | 961 | } |
1053 | 962 | ||
1054 | static const struct block_device_operations zram_devops = { | 963 | static void zram_reset_device(struct zram *zram) |
1055 | .swap_slot_free_notify = zram_slot_free_notify, | 964 | { |
1056 | .rw_page = zram_rw_page, | 965 | struct zram_meta *meta; |
1057 | .owner = THIS_MODULE | 966 | struct zcomp *comp; |
1058 | }; | 967 | u64 disksize; |
1059 | 968 | ||
1060 | static DEVICE_ATTR_WO(compact); | 969 | down_write(&zram->init_lock); |
1061 | static DEVICE_ATTR_RW(disksize); | ||
1062 | static DEVICE_ATTR_RO(initstate); | ||
1063 | static DEVICE_ATTR_WO(reset); | ||
1064 | static DEVICE_ATTR_RO(orig_data_size); | ||
1065 | static DEVICE_ATTR_RO(mem_used_total); | ||
1066 | static DEVICE_ATTR_RW(mem_limit); | ||
1067 | static DEVICE_ATTR_RW(mem_used_max); | ||
1068 | static DEVICE_ATTR_RW(max_comp_streams); | ||
1069 | static DEVICE_ATTR_RW(comp_algorithm); | ||
1070 | 970 | ||
1071 | static ssize_t io_stat_show(struct device *dev, | 971 | zram->limit_pages = 0; |
1072 | struct device_attribute *attr, char *buf) | 972 | |
973 | if (!init_done(zram)) { | ||
974 | up_write(&zram->init_lock); | ||
975 | return; | ||
976 | } | ||
977 | |||
978 | meta = zram->meta; | ||
979 | comp = zram->comp; | ||
980 | disksize = zram->disksize; | ||
981 | /* | ||
982 | * Refcount will go down to 0 eventually and r/w handler | ||
983 | * cannot handle further I/O so it will bail out by | ||
984 | * check zram_meta_get. | ||
985 | */ | ||
986 | zram_meta_put(zram); | ||
987 | /* | ||
988 | * We want to free zram_meta in process context to avoid | ||
989 | * deadlock between reclaim path and any other locks. | ||
990 | */ | ||
991 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); | ||
992 | |||
993 | /* Reset stats */ | ||
994 | memset(&zram->stats, 0, sizeof(zram->stats)); | ||
995 | zram->disksize = 0; | ||
996 | zram->max_comp_streams = 1; | ||
997 | |||
998 | set_capacity(zram->disk, 0); | ||
999 | part_stat_set_all(&zram->disk->part0, 0); | ||
1000 | |||
1001 | up_write(&zram->init_lock); | ||
1002 | /* I/O operation under all of CPU are done so let's free */ | ||
1003 | zram_meta_free(meta, disksize); | ||
1004 | zcomp_destroy(comp); | ||
1005 | } | ||
1006 | |||
1007 | static ssize_t disksize_store(struct device *dev, | ||
1008 | struct device_attribute *attr, const char *buf, size_t len) | ||
1073 | { | 1009 | { |
1010 | u64 disksize; | ||
1011 | struct zcomp *comp; | ||
1012 | struct zram_meta *meta; | ||
1074 | struct zram *zram = dev_to_zram(dev); | 1013 | struct zram *zram = dev_to_zram(dev); |
1075 | ssize_t ret; | 1014 | int err; |
1076 | 1015 | ||
1077 | down_read(&zram->init_lock); | 1016 | disksize = memparse(buf, NULL); |
1078 | ret = scnprintf(buf, PAGE_SIZE, | 1017 | if (!disksize) |
1079 | "%8llu %8llu %8llu %8llu\n", | 1018 | return -EINVAL; |
1080 | (u64)atomic64_read(&zram->stats.failed_reads), | ||
1081 | (u64)atomic64_read(&zram->stats.failed_writes), | ||
1082 | (u64)atomic64_read(&zram->stats.invalid_io), | ||
1083 | (u64)atomic64_read(&zram->stats.notify_free)); | ||
1084 | up_read(&zram->init_lock); | ||
1085 | 1019 | ||
1086 | return ret; | 1020 | disksize = PAGE_ALIGN(disksize); |
1021 | meta = zram_meta_alloc(zram->disk->first_minor, disksize); | ||
1022 | if (!meta) | ||
1023 | return -ENOMEM; | ||
1024 | |||
1025 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); | ||
1026 | if (IS_ERR(comp)) { | ||
1027 | pr_info("Cannot initialise %s compressing backend\n", | ||
1028 | zram->compressor); | ||
1029 | err = PTR_ERR(comp); | ||
1030 | goto out_free_meta; | ||
1031 | } | ||
1032 | |||
1033 | down_write(&zram->init_lock); | ||
1034 | if (init_done(zram)) { | ||
1035 | pr_info("Cannot change disksize for initialized device\n"); | ||
1036 | err = -EBUSY; | ||
1037 | goto out_destroy_comp; | ||
1038 | } | ||
1039 | |||
1040 | init_waitqueue_head(&zram->io_done); | ||
1041 | atomic_set(&zram->refcount, 1); | ||
1042 | zram->meta = meta; | ||
1043 | zram->comp = comp; | ||
1044 | zram->disksize = disksize; | ||
1045 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | ||
1046 | up_write(&zram->init_lock); | ||
1047 | |||
1048 | /* | ||
1049 | * Revalidate disk out of the init_lock to avoid lockdep splat. | ||
1050 | * It's okay because disk's capacity is protected by init_lock | ||
1051 | * so that revalidate_disk always sees up-to-date capacity. | ||
1052 | */ | ||
1053 | revalidate_disk(zram->disk); | ||
1054 | |||
1055 | return len; | ||
1056 | |||
1057 | out_destroy_comp: | ||
1058 | up_write(&zram->init_lock); | ||
1059 | zcomp_destroy(comp); | ||
1060 | out_free_meta: | ||
1061 | zram_meta_free(meta, disksize); | ||
1062 | return err; | ||
1087 | } | 1063 | } |
1088 | 1064 | ||
1089 | static ssize_t mm_stat_show(struct device *dev, | 1065 | static ssize_t reset_store(struct device *dev, |
1090 | struct device_attribute *attr, char *buf) | 1066 | struct device_attribute *attr, const char *buf, size_t len) |
1091 | { | 1067 | { |
1092 | struct zram *zram = dev_to_zram(dev); | 1068 | int ret; |
1093 | u64 orig_size, mem_used = 0; | 1069 | unsigned short do_reset; |
1094 | long max_used; | 1070 | struct zram *zram; |
1095 | ssize_t ret; | 1071 | struct block_device *bdev; |
1096 | 1072 | ||
1097 | down_read(&zram->init_lock); | 1073 | zram = dev_to_zram(dev); |
1098 | if (init_done(zram)) | 1074 | bdev = bdget_disk(zram->disk, 0); |
1099 | mem_used = zs_get_total_pages(zram->meta->mem_pool); | ||
1100 | 1075 | ||
1101 | orig_size = atomic64_read(&zram->stats.pages_stored); | 1076 | if (!bdev) |
1102 | max_used = atomic_long_read(&zram->stats.max_used_pages); | 1077 | return -ENOMEM; |
1103 | 1078 | ||
1104 | ret = scnprintf(buf, PAGE_SIZE, | 1079 | mutex_lock(&bdev->bd_mutex); |
1105 | "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", | 1080 | /* Do not reset an active device! */ |
1106 | orig_size << PAGE_SHIFT, | 1081 | if (bdev->bd_openers) { |
1107 | (u64)atomic64_read(&zram->stats.compr_data_size), | 1082 | ret = -EBUSY; |
1108 | mem_used << PAGE_SHIFT, | 1083 | goto out; |
1109 | zram->limit_pages << PAGE_SHIFT, | 1084 | } |
1110 | max_used << PAGE_SHIFT, | 1085 | |
1111 | (u64)atomic64_read(&zram->stats.zero_pages), | 1086 | ret = kstrtou16(buf, 10, &do_reset); |
1112 | (u64)atomic64_read(&zram->stats.num_migrated)); | 1087 | if (ret) |
1113 | up_read(&zram->init_lock); | 1088 | goto out; |
1089 | |||
1090 | if (!do_reset) { | ||
1091 | ret = -EINVAL; | ||
1092 | goto out; | ||
1093 | } | ||
1114 | 1094 | ||
1095 | /* Make sure all pending I/O is finished */ | ||
1096 | fsync_bdev(bdev); | ||
1097 | zram_reset_device(zram); | ||
1098 | |||
1099 | mutex_unlock(&bdev->bd_mutex); | ||
1100 | revalidate_disk(zram->disk); | ||
1101 | bdput(bdev); | ||
1102 | |||
1103 | return len; | ||
1104 | |||
1105 | out: | ||
1106 | mutex_unlock(&bdev->bd_mutex); | ||
1107 | bdput(bdev); | ||
1115 | return ret; | 1108 | return ret; |
1116 | } | 1109 | } |
1117 | 1110 | ||
1118 | static DEVICE_ATTR_RO(io_stat); | 1111 | static const struct block_device_operations zram_devops = { |
1119 | static DEVICE_ATTR_RO(mm_stat); | 1112 | .swap_slot_free_notify = zram_slot_free_notify, |
1120 | ZRAM_ATTR_RO(num_reads); | 1113 | .rw_page = zram_rw_page, |
1121 | ZRAM_ATTR_RO(num_writes); | 1114 | .owner = THIS_MODULE |
1122 | ZRAM_ATTR_RO(failed_reads); | 1115 | }; |
1123 | ZRAM_ATTR_RO(failed_writes); | 1116 | |
1124 | ZRAM_ATTR_RO(invalid_io); | 1117 | static DEVICE_ATTR_WO(compact); |
1125 | ZRAM_ATTR_RO(notify_free); | 1118 | static DEVICE_ATTR_RW(disksize); |
1126 | ZRAM_ATTR_RO(zero_pages); | 1119 | static DEVICE_ATTR_RO(initstate); |
1127 | ZRAM_ATTR_RO(compr_data_size); | 1120 | static DEVICE_ATTR_WO(reset); |
1121 | static DEVICE_ATTR_RO(orig_data_size); | ||
1122 | static DEVICE_ATTR_RO(mem_used_total); | ||
1123 | static DEVICE_ATTR_RW(mem_limit); | ||
1124 | static DEVICE_ATTR_RW(mem_used_max); | ||
1125 | static DEVICE_ATTR_RW(max_comp_streams); | ||
1126 | static DEVICE_ATTR_RW(comp_algorithm); | ||
1128 | 1127 | ||
1129 | static struct attribute *zram_disk_attrs[] = { | 1128 | static struct attribute *zram_disk_attrs[] = { |
1130 | &dev_attr_disksize.attr, | 1129 | &dev_attr_disksize.attr, |