summaryrefslogtreecommitdiffstats
path: root/drivers/block/zram
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2019-01-08 18:22:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 20:15:10 -0500
commit1d69a3f8ae77e3dbfdc1356225cce5ea9c366aec (patch)
tree6e070457b08c54a2ed09d66c81ecc8227e7d4e9b /drivers/block/zram
parent3bd6e94bec122a951d462c239b47954cf5f36e33 (diff)
zram: idle writeback fixes and cleanup
This patch includes some fixes and cleanup for idle-page writeback. 1. writeback_limit interface Now writeback_limit interface is rather conusing. For example, once writeback limit budget is exausted, admin can see 0 from /sys/block/zramX/writeback_limit which is same semantic with disable writeback_limit at this moment. IOW, admin cannot tell that zero came from disable writeback limit or exausted writeback limit. To make the interface clear, let's sepatate enable of writeback limit to another knob - /sys/block/zram0/writeback_limit_enable * before: while true : # to re-enable writeback limit once previous one is used up echo 0 > /sys/block/zram0/writeback_limit echo $((200<<20)) > /sys/block/zram0/writeback_limit .. .. # used up the writeback limit budget * new # To enable writeback limit, from the beginning, admin should # enable it. echo $((200<<20)) > /sys/block/zram0/writeback_limit echo 1 > /sys/block/zram/0/writeback_limit_enable while true : echo $((200<<20)) > /sys/block/zram0/writeback_limit .. .. # used up the writeback limit budget It's much strightforward. 2. fix condition check idle/huge writeback mode check The mode in writeback_store is not bit opeartion any more so no need to use bit operations. Furthermore, current condition check is broken in that it does writeback every pages regardless of huge/idle. 3. clean up idle_store No need to use goto. [minchan@kernel.org: missed spin_lock_init] Link: http://lkml.kernel.org/r/20190103001601.GA255139@google.com Link: http://lkml.kernel.org/r/20181224033529.19450-1-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Suggested-by: John Dias <joaodias@google.com> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: John Dias <joaodias@google.com> Cc: Srinivas Paladugu <srnvs@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram')
-rw-r--r--drivers/block/zram/zram_drv.c90
-rw-r--r--drivers/block/zram/zram_drv.h5
2 files changed, 69 insertions, 26 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 33c5cc879f24..04ca65912638 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev,
316 * See the comment in writeback_store. 316 * See the comment in writeback_store.
317 */ 317 */
318 zram_slot_lock(zram, index); 318 zram_slot_lock(zram, index);
319 if (!zram_allocated(zram, index) || 319 if (zram_allocated(zram, index) &&
320 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 320 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
321 goto next; 321 zram_set_flag(zram, index, ZRAM_IDLE);
322 zram_set_flag(zram, index, ZRAM_IDLE);
323next:
324 zram_slot_unlock(zram, index); 322 zram_slot_unlock(zram, index);
325 } 323 }
326 324
@@ -330,6 +328,41 @@ next:
330} 328}
331 329
332#ifdef CONFIG_ZRAM_WRITEBACK 330#ifdef CONFIG_ZRAM_WRITEBACK
331static ssize_t writeback_limit_enable_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
333{
334 struct zram *zram = dev_to_zram(dev);
335 u64 val;
336 ssize_t ret = -EINVAL;
337
338 if (kstrtoull(buf, 10, &val))
339 return ret;
340
341 down_read(&zram->init_lock);
342 spin_lock(&zram->wb_limit_lock);
343 zram->wb_limit_enable = val;
344 spin_unlock(&zram->wb_limit_lock);
345 up_read(&zram->init_lock);
346 ret = len;
347
348 return ret;
349}
350
351static ssize_t writeback_limit_enable_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 bool val;
355 struct zram *zram = dev_to_zram(dev);
356
357 down_read(&zram->init_lock);
358 spin_lock(&zram->wb_limit_lock);
359 val = zram->wb_limit_enable;
360 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock);
362
363 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
364}
365
333static ssize_t writeback_limit_store(struct device *dev, 366static ssize_t writeback_limit_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t len) 367 struct device_attribute *attr, const char *buf, size_t len)
335{ 368{
@@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev,
341 return ret; 374 return ret;
342 375
343 down_read(&zram->init_lock); 376 down_read(&zram->init_lock);
344 atomic64_set(&zram->stats.bd_wb_limit, val); 377 spin_lock(&zram->wb_limit_lock);
345 if (val == 0) 378 zram->bd_wb_limit = val;
346 zram->stop_writeback = false; 379 spin_unlock(&zram->wb_limit_lock);
347 up_read(&zram->init_lock); 380 up_read(&zram->init_lock);
348 ret = len; 381 ret = len;
349 382
@@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev,
357 struct zram *zram = dev_to_zram(dev); 390 struct zram *zram = dev_to_zram(dev);
358 391
359 down_read(&zram->init_lock); 392 down_read(&zram->init_lock);
360 val = atomic64_read(&zram->stats.bd_wb_limit); 393 spin_lock(&zram->wb_limit_lock);
394 val = zram->bd_wb_limit;
395 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock); 396 up_read(&zram->init_lock);
362 397
363 return scnprintf(buf, PAGE_SIZE, "%llu\n", val); 398 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
588 return 1; 623 return 1;
589} 624}
590 625
591#define HUGE_WRITEBACK 0x1 626#define HUGE_WRITEBACK 1
592#define IDLE_WRITEBACK 0x2 627#define IDLE_WRITEBACK 2
593 628
594static ssize_t writeback_store(struct device *dev, 629static ssize_t writeback_store(struct device *dev,
595 struct device_attribute *attr, const char *buf, size_t len) 630 struct device_attribute *attr, const char *buf, size_t len)
@@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
602 struct page *page; 637 struct page *page;
603 ssize_t ret, sz; 638 ssize_t ret, sz;
604 char mode_buf[8]; 639 char mode_buf[8];
605 unsigned long mode = -1UL; 640 int mode = -1;
606 unsigned long blk_idx = 0; 641 unsigned long blk_idx = 0;
607 642
608 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 643 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
@@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev,
618 else if (!strcmp(mode_buf, "huge")) 653 else if (!strcmp(mode_buf, "huge"))
619 mode = HUGE_WRITEBACK; 654 mode = HUGE_WRITEBACK;
620 655
621 if (mode == -1UL) 656 if (mode == -1)
622 return -EINVAL; 657 return -EINVAL;
623 658
624 down_read(&zram->init_lock); 659 down_read(&zram->init_lock);
@@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev,
645 bvec.bv_len = PAGE_SIZE; 680 bvec.bv_len = PAGE_SIZE;
646 bvec.bv_offset = 0; 681 bvec.bv_offset = 0;
647 682
648 if (zram->stop_writeback) { 683 spin_lock(&zram->wb_limit_lock);
684 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685 spin_unlock(&zram->wb_limit_lock);
649 ret = -EIO; 686 ret = -EIO;
650 break; 687 break;
651 } 688 }
689 spin_unlock(&zram->wb_limit_lock);
652 690
653 if (!blk_idx) { 691 if (!blk_idx) {
654 blk_idx = alloc_block_bdev(zram); 692 blk_idx = alloc_block_bdev(zram);
@@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev,
667 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 705 zram_test_flag(zram, index, ZRAM_UNDER_WB))
668 goto next; 706 goto next;
669 707
670 if ((mode & IDLE_WRITEBACK && 708 if (mode == IDLE_WRITEBACK &&
671 !zram_test_flag(zram, index, ZRAM_IDLE)) && 709 !zram_test_flag(zram, index, ZRAM_IDLE))
672 (mode & HUGE_WRITEBACK && 710 goto next;
673 !zram_test_flag(zram, index, ZRAM_HUGE))) 711 if (mode == HUGE_WRITEBACK &&
712 !zram_test_flag(zram, index, ZRAM_HUGE))
674 goto next; 713 goto next;
675 /* 714 /*
676 * Clearing ZRAM_UNDER_WB is duty of caller. 715 * Clearing ZRAM_UNDER_WB is duty of caller.
@@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev,
732 zram_set_element(zram, index, blk_idx); 771 zram_set_element(zram, index, blk_idx);
733 blk_idx = 0; 772 blk_idx = 0;
734 atomic64_inc(&zram->stats.pages_stored); 773 atomic64_inc(&zram->stats.pages_stored);
735 if (atomic64_add_unless(&zram->stats.bd_wb_limit, 774 spin_lock(&zram->wb_limit_lock);
736 -1 << (PAGE_SHIFT - 12), 0)) { 775 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
737 if (atomic64_read(&zram->stats.bd_wb_limit) == 0) 776 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
738 zram->stop_writeback = true; 777 spin_unlock(&zram->wb_limit_lock);
739 }
740next: 778next:
741 zram_slot_unlock(zram, index); 779 zram_slot_unlock(zram, index);
742 } 780 }
@@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
1812static DEVICE_ATTR_RW(backing_dev); 1850static DEVICE_ATTR_RW(backing_dev);
1813static DEVICE_ATTR_WO(writeback); 1851static DEVICE_ATTR_WO(writeback);
1814static DEVICE_ATTR_RW(writeback_limit); 1852static DEVICE_ATTR_RW(writeback_limit);
1853static DEVICE_ATTR_RW(writeback_limit_enable);
1815#endif 1854#endif
1816 1855
1817static struct attribute *zram_disk_attrs[] = { 1856static struct attribute *zram_disk_attrs[] = {
@@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = {
1828 &dev_attr_backing_dev.attr, 1867 &dev_attr_backing_dev.attr,
1829 &dev_attr_writeback.attr, 1868 &dev_attr_writeback.attr,
1830 &dev_attr_writeback_limit.attr, 1869 &dev_attr_writeback_limit.attr,
1870 &dev_attr_writeback_limit_enable.attr,
1831#endif 1871#endif
1832 &dev_attr_io_stat.attr, 1872 &dev_attr_io_stat.attr,
1833 &dev_attr_mm_stat.attr, 1873 &dev_attr_mm_stat.attr,
@@ -1867,7 +1907,9 @@ static int zram_add(void)
1867 device_id = ret; 1907 device_id = ret;
1868 1908
1869 init_rwsem(&zram->init_lock); 1909 init_rwsem(&zram->init_lock);
1870 1910#ifdef CONFIG_ZRAM_WRITEBACK
1911 spin_lock_init(&zram->wb_limit_lock);
1912#endif
1871 queue = blk_alloc_queue(GFP_KERNEL); 1913 queue = blk_alloc_queue(GFP_KERNEL);
1872 if (!queue) { 1914 if (!queue) {
1873 pr_err("Error allocating disk queue for device %d\n", 1915 pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 4bd3afd15e83..f2fd46daa760 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -86,7 +86,6 @@ struct zram_stats {
86 atomic64_t bd_count; /* no. of pages in backing device */ 86 atomic64_t bd_count; /* no. of pages in backing device */
87 atomic64_t bd_reads; /* no. of reads from backing device */ 87 atomic64_t bd_reads; /* no. of reads from backing device */
88 atomic64_t bd_writes; /* no. of writes from backing device */ 88 atomic64_t bd_writes; /* no. of writes from backing device */
89 atomic64_t bd_wb_limit; /* writeback limit of backing device */
90#endif 89#endif
91}; 90};
92 91
@@ -114,8 +113,10 @@ struct zram {
114 */ 113 */
115 bool claim; /* Protected by bdev->bd_mutex */ 114 bool claim; /* Protected by bdev->bd_mutex */
116 struct file *backing_dev; 115 struct file *backing_dev;
117 bool stop_writeback;
118#ifdef CONFIG_ZRAM_WRITEBACK 116#ifdef CONFIG_ZRAM_WRITEBACK
117 spinlock_t wb_limit_lock;
118 bool wb_limit_enable;
119 u64 bd_wb_limit;
119 struct block_device *bdev; 120 struct block_device *bdev;
120 unsigned int old_block_size; 121 unsigned int old_block_size;
121 unsigned long *bitmap; 122 unsigned long *bitmap;