diff options
author | Minchan Kim <minchan@kernel.org> | 2017-09-06 19:20:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-06 20:27:25 -0400 |
commit | db8ffbd4e7634cc537c8d32e73e7ce0f06248645 (patch) | |
tree | cecb269b4da514c29db6671da523b57c824e56c7 /drivers/block/zram | |
parent | ae85a8075c5b025b9d503554ddc480a346a24536 (diff) |
zram: write incompressible pages to backing device
This patch enables write IO to transfer data to backing device. For
that, it implements write_to_bdev function which creates new bio and
chaining with parent bio to make the parent bio asynchrnous.
For rw_page which don't have parent bio, it submit owned bio and handle
IO completion by zram_page_end_io.
Also, this patch defines new flag ZRAM_WB to mark written page for later
read IO.
[xieyisheng1@huawei.com: fix typo in comment]
Link: http://lkml.kernel.org/r/1502707447-6944-2-git-send-email-xieyisheng1@huawei.com
Link: http://lkml.kernel.org/r/1498459987-24562-8-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Yisheng Xie <xieyisheng1@huawei.com>
Cc: Juneho Choi <juno.choi@lge.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 113 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.h | 3 |
2 files changed, 103 insertions, 13 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8975f75f113d..195b3372241c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -445,9 +445,76 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry) | |||
445 | WARN_ON_ONCE(!was_set); | 445 | WARN_ON_ONCE(!was_set); |
446 | } | 446 | } |
447 | 447 | ||
448 | void zram_page_end_io(struct bio *bio) | ||
449 | { | ||
450 | struct page *page = bio->bi_io_vec[0].bv_page; | ||
451 | |||
452 | page_endio(page, op_is_write(bio_op(bio)), | ||
453 | blk_status_to_errno(bio->bi_status)); | ||
454 | bio_put(bio); | ||
455 | } | ||
456 | |||
457 | static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, | ||
458 | u32 index, struct bio *parent, | ||
459 | unsigned long *pentry) | ||
460 | { | ||
461 | struct bio *bio; | ||
462 | unsigned long entry; | ||
463 | |||
464 | bio = bio_alloc(GFP_ATOMIC, 1); | ||
465 | if (!bio) | ||
466 | return -ENOMEM; | ||
467 | |||
468 | entry = get_entry_bdev(zram); | ||
469 | if (!entry) { | ||
470 | bio_put(bio); | ||
471 | return -ENOSPC; | ||
472 | } | ||
473 | |||
474 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); | ||
475 | bio->bi_bdev = zram->bdev; | ||
476 | if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, | ||
477 | bvec->bv_offset)) { | ||
478 | bio_put(bio); | ||
479 | put_entry_bdev(zram, entry); | ||
480 | return -EIO; | ||
481 | } | ||
482 | |||
483 | if (!parent) { | ||
484 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; | ||
485 | bio->bi_end_io = zram_page_end_io; | ||
486 | } else { | ||
487 | bio->bi_opf = parent->bi_opf; | ||
488 | bio_chain(bio, parent); | ||
489 | } | ||
490 | |||
491 | submit_bio(bio); | ||
492 | *pentry = entry; | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static void zram_wb_clear(struct zram *zram, u32 index) | ||
498 | { | ||
499 | unsigned long entry; | ||
500 | |||
501 | zram_clear_flag(zram, index, ZRAM_WB); | ||
502 | entry = zram_get_element(zram, index); | ||
503 | zram_set_element(zram, index, 0); | ||
504 | put_entry_bdev(zram, entry); | ||
505 | } | ||
506 | |||
448 | #else | 507 | #else |
449 | static bool zram_wb_enabled(struct zram *zram) { return false; } | 508 | static bool zram_wb_enabled(struct zram *zram) { return false; } |
450 | static inline void reset_bdev(struct zram *zram) {}; | 509 | static inline void reset_bdev(struct zram *zram) {}; |
510 | static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, | ||
511 | u32 index, struct bio *parent, | ||
512 | unsigned long *pentry) | ||
513 | |||
514 | { | ||
515 | return -EIO; | ||
516 | } | ||
517 | static void zram_wb_clear(struct zram *zram, u32 index) {} | ||
451 | #endif | 518 | #endif |
452 | 519 | ||
453 | 520 | ||
@@ -672,7 +739,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) | |||
672 | */ | 739 | */ |
673 | static void zram_free_page(struct zram *zram, size_t index) | 740 | static void zram_free_page(struct zram *zram, size_t index) |
674 | { | 741 | { |
675 | unsigned long handle = zram_get_handle(zram, index); | 742 | unsigned long handle; |
743 | |||
744 | if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) { | ||
745 | zram_wb_clear(zram, index); | ||
746 | atomic64_dec(&zram->stats.pages_stored); | ||
747 | return; | ||
748 | } | ||
676 | 749 | ||
677 | /* | 750 | /* |
678 | * No memory is allocated for same element filled pages. | 751 | * No memory is allocated for same element filled pages. |
@@ -686,6 +759,7 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
686 | return; | 759 | return; |
687 | } | 760 | } |
688 | 761 | ||
762 | handle = zram_get_handle(zram, index); | ||
689 | if (!handle) | 763 | if (!handle) |
690 | return; | 764 | return; |
691 | 765 | ||
@@ -770,7 +844,8 @@ out: | |||
770 | return ret; | 844 | return ret; |
771 | } | 845 | } |
772 | 846 | ||
773 | static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) | 847 | static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
848 | u32 index, struct bio *bio) | ||
774 | { | 849 | { |
775 | int ret = 0; | 850 | int ret = 0; |
776 | unsigned long alloced_pages; | 851 | unsigned long alloced_pages; |
@@ -781,6 +856,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) | |||
781 | struct page *page = bvec->bv_page; | 856 | struct page *page = bvec->bv_page; |
782 | unsigned long element = 0; | 857 | unsigned long element = 0; |
783 | enum zram_pageflags flags = 0; | 858 | enum zram_pageflags flags = 0; |
859 | bool allow_wb = true; | ||
784 | 860 | ||
785 | mem = kmap_atomic(page); | 861 | mem = kmap_atomic(page); |
786 | if (page_same_filled(mem, &element)) { | 862 | if (page_same_filled(mem, &element)) { |
@@ -805,8 +881,20 @@ compress_again: | |||
805 | return ret; | 881 | return ret; |
806 | } | 882 | } |
807 | 883 | ||
808 | if (unlikely(comp_len > max_zpage_size)) | 884 | if (unlikely(comp_len > max_zpage_size)) { |
885 | if (zram_wb_enabled(zram) && allow_wb) { | ||
886 | zcomp_stream_put(zram->comp); | ||
887 | ret = write_to_bdev(zram, bvec, index, bio, &element); | ||
888 | if (!ret) { | ||
889 | flags = ZRAM_WB; | ||
890 | ret = 1; | ||
891 | goto out; | ||
892 | } | ||
893 | allow_wb = false; | ||
894 | goto compress_again; | ||
895 | } | ||
809 | comp_len = PAGE_SIZE; | 896 | comp_len = PAGE_SIZE; |
897 | } | ||
810 | 898 | ||
811 | /* | 899 | /* |
812 | * handle allocation has 2 paths: | 900 | * handle allocation has 2 paths: |
@@ -866,10 +954,11 @@ out: | |||
866 | */ | 954 | */ |
867 | zram_slot_lock(zram, index); | 955 | zram_slot_lock(zram, index); |
868 | zram_free_page(zram, index); | 956 | zram_free_page(zram, index); |
869 | if (flags == ZRAM_SAME) { | 957 | |
870 | zram_set_flag(zram, index, ZRAM_SAME); | 958 | if (flags) { |
959 | zram_set_flag(zram, index, flags); | ||
871 | zram_set_element(zram, index, element); | 960 | zram_set_element(zram, index, element); |
872 | } else { | 961 | } else { |
873 | zram_set_handle(zram, index, handle); | 962 | zram_set_handle(zram, index, handle); |
874 | zram_set_obj_size(zram, index, comp_len); | 963 | zram_set_obj_size(zram, index, comp_len); |
875 | } | 964 | } |
@@ -881,7 +970,7 @@ out: | |||
881 | } | 970 | } |
882 | 971 | ||
883 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, | 972 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
884 | u32 index, int offset) | 973 | u32 index, int offset, struct bio *bio) |
885 | { | 974 | { |
886 | int ret; | 975 | int ret; |
887 | struct page *page = NULL; | 976 | struct page *page = NULL; |
@@ -914,7 +1003,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, | |||
914 | vec.bv_offset = 0; | 1003 | vec.bv_offset = 0; |
915 | } | 1004 | } |
916 | 1005 | ||
917 | ret = __zram_bvec_write(zram, &vec, index); | 1006 | ret = __zram_bvec_write(zram, &vec, index, bio); |
918 | out: | 1007 | out: |
919 | if (is_partial_io(bvec)) | 1008 | if (is_partial_io(bvec)) |
920 | __free_page(page); | 1009 | __free_page(page); |
@@ -965,7 +1054,7 @@ static void zram_bio_discard(struct zram *zram, u32 index, | |||
965 | * Returns 1 if IO request was successfully submitted. | 1054 | * Returns 1 if IO request was successfully submitted. |
966 | */ | 1055 | */ |
967 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | 1056 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
968 | int offset, bool is_write) | 1057 | int offset, bool is_write, struct bio *bio) |
969 | { | 1058 | { |
970 | unsigned long start_time = jiffies; | 1059 | unsigned long start_time = jiffies; |
971 | int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; | 1060 | int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; |
@@ -980,7 +1069,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
980 | flush_dcache_page(bvec->bv_page); | 1069 | flush_dcache_page(bvec->bv_page); |
981 | } else { | 1070 | } else { |
982 | atomic64_inc(&zram->stats.num_writes); | 1071 | atomic64_inc(&zram->stats.num_writes); |
983 | ret = zram_bvec_write(zram, bvec, index, offset); | 1072 | ret = zram_bvec_write(zram, bvec, index, offset, bio); |
984 | } | 1073 | } |
985 | 1074 | ||
986 | generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); | 1075 | generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); |
@@ -1024,7 +1113,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) | |||
1024 | bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, | 1113 | bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, |
1025 | unwritten); | 1114 | unwritten); |
1026 | if (zram_bvec_rw(zram, &bv, index, offset, | 1115 | if (zram_bvec_rw(zram, &bv, index, offset, |
1027 | op_is_write(bio_op(bio))) < 0) | 1116 | op_is_write(bio_op(bio)), bio) < 0) |
1028 | goto out; | 1117 | goto out; |
1029 | 1118 | ||
1030 | bv.bv_offset += bv.bv_len; | 1119 | bv.bv_offset += bv.bv_len; |
@@ -1098,7 +1187,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, | |||
1098 | bv.bv_len = PAGE_SIZE; | 1187 | bv.bv_len = PAGE_SIZE; |
1099 | bv.bv_offset = 0; | 1188 | bv.bv_offset = 0; |
1100 | 1189 | ||
1101 | ret = zram_bvec_rw(zram, &bv, index, offset, is_write); | 1190 | ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL); |
1102 | out: | 1191 | out: |
1103 | /* | 1192 | /* |
1104 | * If I/O fails, just return error(ie, non-zero) without | 1193 | * If I/O fails, just return error(ie, non-zero) without |
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 707aec0a2681..31762db861e3 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h | |||
@@ -60,9 +60,10 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; | |||
60 | 60 | ||
61 | /* Flags for zram pages (table[page_no].value) */ | 61 | /* Flags for zram pages (table[page_no].value) */ |
62 | enum zram_pageflags { | 62 | enum zram_pageflags { |
63 | /* Page consists entirely of zeros */ | 63 | /* Page consists the same element */ |
64 | ZRAM_SAME = ZRAM_FLAG_SHIFT, | 64 | ZRAM_SAME = ZRAM_FLAG_SHIFT, |
65 | ZRAM_ACCESS, /* page is now accessed */ | 65 | ZRAM_ACCESS, /* page is now accessed */ |
66 | ZRAM_WB, /* page is stored on backing_device */ | ||
66 | 67 | ||
67 | __NR_ZRAM_PAGEFLAGS, | 68 | __NR_ZRAM_PAGEFLAGS, |
68 | }; | 69 | }; |