diff options
author | Minchan Kim <minchan@kernel.org> | 2017-05-03 17:55:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 18:52:11 -0400 |
commit | 1f7319c7427503abe2d365683588827b80f5714e (patch) | |
tree | b07894af05b1e0c8dc69b1cd5ab894013f595b38 /drivers/block | |
parent | e86942c7b6c1e1dd5e539f3bf3cfb63799163048 (diff) |
zram: partial IO refactoring
For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
However, the mixed code for handling normal/partial IO is too mess,
error-prone to modify IO handler functions with upcoming feature so this
patch aims for cleaning up zram's IO handling functions.
Link: http://lkml.kernel.org/r/1492052365-16169-3-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 337 |
1 files changed, 184 insertions, 153 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8a38ff0c16a3..47e15fec3cd0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -45,6 +45,8 @@ static const char *default_compressor = "lzo"; | |||
45 | /* Module params (documentation at end) */ | 45 | /* Module params (documentation at end) */ |
46 | static unsigned int num_devices = 1; | 46 | static unsigned int num_devices = 1; |
47 | 47 | ||
48 | static void zram_free_page(struct zram *zram, size_t index); | ||
49 | |||
48 | static inline bool init_done(struct zram *zram) | 50 | static inline bool init_done(struct zram *zram) |
49 | { | 51 | { |
50 | return zram->disksize; | 52 | return zram->disksize; |
@@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta, | |||
98 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | 100 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; |
99 | } | 101 | } |
100 | 102 | ||
103 | #if PAGE_SIZE != 4096 | ||
101 | static inline bool is_partial_io(struct bio_vec *bvec) | 104 | static inline bool is_partial_io(struct bio_vec *bvec) |
102 | { | 105 | { |
103 | return bvec->bv_len != PAGE_SIZE; | 106 | return bvec->bv_len != PAGE_SIZE; |
104 | } | 107 | } |
108 | #else | ||
109 | static inline bool is_partial_io(struct bio_vec *bvec) | ||
110 | { | ||
111 | return false; | ||
112 | } | ||
113 | #endif | ||
105 | 114 | ||
106 | static void zram_revalidate_disk(struct zram *zram) | 115 | static void zram_revalidate_disk(struct zram *zram) |
107 | { | 116 | { |
@@ -189,18 +198,6 @@ static bool page_same_filled(void *ptr, unsigned long *element) | |||
189 | return true; | 198 | return true; |
190 | } | 199 | } |
191 | 200 | ||
192 | static void handle_same_page(struct bio_vec *bvec, unsigned long element) | ||
193 | { | ||
194 | struct page *page = bvec->bv_page; | ||
195 | void *user_mem; | ||
196 | |||
197 | user_mem = kmap_atomic(page); | ||
198 | zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element); | ||
199 | kunmap_atomic(user_mem); | ||
200 | |||
201 | flush_dcache_page(page); | ||
202 | } | ||
203 | |||
204 | static ssize_t initstate_show(struct device *dev, | 201 | static ssize_t initstate_show(struct device *dev, |
205 | struct device_attribute *attr, char *buf) | 202 | struct device_attribute *attr, char *buf) |
206 | { | 203 | { |
@@ -416,6 +413,53 @@ static DEVICE_ATTR_RO(io_stat); | |||
416 | static DEVICE_ATTR_RO(mm_stat); | 413 | static DEVICE_ATTR_RO(mm_stat); |
417 | static DEVICE_ATTR_RO(debug_stat); | 414 | static DEVICE_ATTR_RO(debug_stat); |
418 | 415 | ||
416 | static bool zram_same_page_read(struct zram *zram, u32 index, | ||
417 | struct page *page, | ||
418 | unsigned int offset, unsigned int len) | ||
419 | { | ||
420 | struct zram_meta *meta = zram->meta; | ||
421 | |||
422 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | ||
423 | if (unlikely(!meta->table[index].handle) || | ||
424 | zram_test_flag(meta, index, ZRAM_SAME)) { | ||
425 | void *mem; | ||
426 | |||
427 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
428 | mem = kmap_atomic(page); | ||
429 | zram_fill_page(mem + offset, len, meta->table[index].element); | ||
430 | kunmap_atomic(mem); | ||
431 | return true; | ||
432 | } | ||
433 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
434 | |||
435 | return false; | ||
436 | } | ||
437 | |||
438 | static bool zram_same_page_write(struct zram *zram, u32 index, | ||
439 | struct page *page) | ||
440 | { | ||
441 | unsigned long element; | ||
442 | void *mem = kmap_atomic(page); | ||
443 | |||
444 | if (page_same_filled(mem, &element)) { | ||
445 | struct zram_meta *meta = zram->meta; | ||
446 | |||
447 | kunmap_atomic(mem); | ||
448 | /* Free memory associated with this sector now. */ | ||
449 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | ||
450 | zram_free_page(zram, index); | ||
451 | zram_set_flag(meta, index, ZRAM_SAME); | ||
452 | zram_set_element(meta, index, element); | ||
453 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
454 | |||
455 | atomic64_inc(&zram->stats.same_pages); | ||
456 | return true; | ||
457 | } | ||
458 | kunmap_atomic(mem); | ||
459 | |||
460 | return false; | ||
461 | } | ||
462 | |||
419 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) | 463 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
420 | { | 464 | { |
421 | size_t num_pages = disksize >> PAGE_SHIFT; | 465 | size_t num_pages = disksize >> PAGE_SHIFT; |
@@ -502,169 +546,103 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
502 | zram_set_obj_size(meta, index, 0); | 546 | zram_set_obj_size(meta, index, 0); |
503 | } | 547 | } |
504 | 548 | ||
505 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | 549 | static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) |
506 | { | 550 | { |
507 | int ret = 0; | 551 | int ret; |
508 | unsigned char *cmem; | ||
509 | struct zram_meta *meta = zram->meta; | ||
510 | unsigned long handle; | 552 | unsigned long handle; |
511 | unsigned int size; | 553 | unsigned int size; |
554 | void *src, *dst; | ||
555 | struct zram_meta *meta = zram->meta; | ||
556 | |||
557 | if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) | ||
558 | return 0; | ||
512 | 559 | ||
513 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 560 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
514 | handle = meta->table[index].handle; | 561 | handle = meta->table[index].handle; |
515 | size = zram_get_obj_size(meta, index); | 562 | size = zram_get_obj_size(meta, index); |
516 | 563 | ||
517 | if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) { | 564 | src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
518 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
519 | zram_fill_page(mem, PAGE_SIZE, meta->table[index].element); | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); | ||
524 | if (size == PAGE_SIZE) { | 565 | if (size == PAGE_SIZE) { |
525 | memcpy(mem, cmem, PAGE_SIZE); | 566 | dst = kmap_atomic(page); |
567 | memcpy(dst, src, PAGE_SIZE); | ||
568 | kunmap_atomic(dst); | ||
569 | ret = 0; | ||
526 | } else { | 570 | } else { |
527 | struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); | 571 | struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); |
528 | 572 | ||
529 | ret = zcomp_decompress(zstrm, cmem, size, mem); | 573 | dst = kmap_atomic(page); |
574 | ret = zcomp_decompress(zstrm, src, size, dst); | ||
575 | kunmap_atomic(dst); | ||
530 | zcomp_stream_put(zram->comp); | 576 | zcomp_stream_put(zram->comp); |
531 | } | 577 | } |
532 | zs_unmap_object(meta->mem_pool, handle); | 578 | zs_unmap_object(meta->mem_pool, handle); |
533 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 579 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
534 | 580 | ||
535 | /* Should NEVER happen. Return bio error if it does. */ | 581 | /* Should NEVER happen. Return bio error if it does. */ |
536 | if (unlikely(ret)) { | 582 | if (unlikely(ret)) |
537 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); | 583 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
538 | return ret; | ||
539 | } | ||
540 | 584 | ||
541 | return 0; | 585 | return ret; |
542 | } | 586 | } |
543 | 587 | ||
544 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | 588 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
545 | u32 index, int offset) | 589 | u32 index, int offset) |
546 | { | 590 | { |
547 | int ret; | 591 | int ret; |
548 | struct page *page; | 592 | struct page *page; |
549 | unsigned char *user_mem, *uncmem = NULL; | ||
550 | struct zram_meta *meta = zram->meta; | ||
551 | page = bvec->bv_page; | ||
552 | 593 | ||
553 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 594 | page = bvec->bv_page; |
554 | if (unlikely(!meta->table[index].handle) || | 595 | if (is_partial_io(bvec)) { |
555 | zram_test_flag(meta, index, ZRAM_SAME)) { | 596 | /* Use a temporary buffer to decompress the page */ |
556 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 597 | page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); |
557 | handle_same_page(bvec, meta->table[index].element); | 598 | if (!page) |
558 | return 0; | 599 | return -ENOMEM; |
559 | } | 600 | } |
560 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
561 | 601 | ||
562 | if (is_partial_io(bvec)) | 602 | ret = zram_decompress_page(zram, page, index); |
563 | /* Use a temporary buffer to decompress the page */ | 603 | if (unlikely(ret)) |
564 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); | 604 | goto out; |
565 | 605 | ||
566 | user_mem = kmap_atomic(page); | 606 | if (is_partial_io(bvec)) { |
567 | if (!is_partial_io(bvec)) | 607 | void *dst = kmap_atomic(bvec->bv_page); |
568 | uncmem = user_mem; | 608 | void *src = kmap_atomic(page); |
569 | 609 | ||
570 | if (!uncmem) { | 610 | memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); |
571 | pr_err("Unable to allocate temp memory\n"); | 611 | kunmap_atomic(src); |
572 | ret = -ENOMEM; | 612 | kunmap_atomic(dst); |
573 | goto out_cleanup; | ||
574 | } | 613 | } |
575 | 614 | out: | |
576 | ret = zram_decompress_page(zram, uncmem, index); | ||
577 | /* Should NEVER happen. Return bio error if it does. */ | ||
578 | if (unlikely(ret)) | ||
579 | goto out_cleanup; | ||
580 | |||
581 | if (is_partial_io(bvec)) | 615 | if (is_partial_io(bvec)) |
582 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | 616 | __free_page(page); |
583 | bvec->bv_len); | ||
584 | 617 | ||
585 | flush_dcache_page(page); | ||
586 | ret = 0; | ||
587 | out_cleanup: | ||
588 | kunmap_atomic(user_mem); | ||
589 | if (is_partial_io(bvec)) | ||
590 | kfree(uncmem); | ||
591 | return ret; | 618 | return ret; |
592 | } | 619 | } |
593 | 620 | ||
594 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | 621 | static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm, |
595 | int offset) | 622 | struct page *page, |
623 | unsigned long *out_handle, unsigned int *out_comp_len) | ||
596 | { | 624 | { |
597 | int ret = 0; | 625 | int ret; |
598 | unsigned int clen; | 626 | unsigned int comp_len; |
627 | void *src; | ||
628 | unsigned long alloced_pages; | ||
599 | unsigned long handle = 0; | 629 | unsigned long handle = 0; |
600 | struct page *page; | ||
601 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; | ||
602 | struct zram_meta *meta = zram->meta; | 630 | struct zram_meta *meta = zram->meta; |
603 | struct zcomp_strm *zstrm = NULL; | ||
604 | unsigned long alloced_pages; | ||
605 | unsigned long element; | ||
606 | |||
607 | page = bvec->bv_page; | ||
608 | if (is_partial_io(bvec)) { | ||
609 | /* | ||
610 | * This is a partial IO. We need to read the full page | ||
611 | * before to write the changes. | ||
612 | */ | ||
613 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); | ||
614 | if (!uncmem) { | ||
615 | ret = -ENOMEM; | ||
616 | goto out; | ||
617 | } | ||
618 | ret = zram_decompress_page(zram, uncmem, index); | ||
619 | if (ret) | ||
620 | goto out; | ||
621 | } | ||
622 | 631 | ||
623 | compress_again: | 632 | compress_again: |
624 | user_mem = kmap_atomic(page); | 633 | src = kmap_atomic(page); |
625 | if (is_partial_io(bvec)) { | 634 | ret = zcomp_compress(*zstrm, src, &comp_len); |
626 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, | 635 | kunmap_atomic(src); |
627 | bvec->bv_len); | ||
628 | kunmap_atomic(user_mem); | ||
629 | user_mem = NULL; | ||
630 | } else { | ||
631 | uncmem = user_mem; | ||
632 | } | ||
633 | |||
634 | if (page_same_filled(uncmem, &element)) { | ||
635 | if (user_mem) | ||
636 | kunmap_atomic(user_mem); | ||
637 | /* Free memory associated with this sector now. */ | ||
638 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | ||
639 | zram_free_page(zram, index); | ||
640 | zram_set_flag(meta, index, ZRAM_SAME); | ||
641 | zram_set_element(meta, index, element); | ||
642 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
643 | |||
644 | atomic64_inc(&zram->stats.same_pages); | ||
645 | ret = 0; | ||
646 | goto out; | ||
647 | } | ||
648 | |||
649 | zstrm = zcomp_stream_get(zram->comp); | ||
650 | ret = zcomp_compress(zstrm, uncmem, &clen); | ||
651 | if (!is_partial_io(bvec)) { | ||
652 | kunmap_atomic(user_mem); | ||
653 | user_mem = NULL; | ||
654 | uncmem = NULL; | ||
655 | } | ||
656 | 636 | ||
657 | if (unlikely(ret)) { | 637 | if (unlikely(ret)) { |
658 | pr_err("Compression failed! err=%d\n", ret); | 638 | pr_err("Compression failed! err=%d\n", ret); |
659 | goto out; | 639 | if (handle) |
640 | zs_free(meta->mem_pool, handle); | ||
641 | return ret; | ||
660 | } | 642 | } |
661 | 643 | ||
662 | src = zstrm->buffer; | 644 | if (unlikely(comp_len > max_zpage_size)) |
663 | if (unlikely(clen > max_zpage_size)) { | 645 | comp_len = PAGE_SIZE; |
664 | clen = PAGE_SIZE; | ||
665 | if (is_partial_io(bvec)) | ||
666 | src = uncmem; | ||
667 | } | ||
668 | 646 | ||
669 | /* | 647 | /* |
670 | * handle allocation has 2 paths: | 648 | * handle allocation has 2 paths: |
@@ -680,27 +658,21 @@ compress_again: | |||
680 | * from the slow path and handle has already been allocated. | 658 | * from the slow path and handle has already been allocated. |
681 | */ | 659 | */ |
682 | if (!handle) | 660 | if (!handle) |
683 | handle = zs_malloc(meta->mem_pool, clen, | 661 | handle = zs_malloc(meta->mem_pool, comp_len, |
684 | __GFP_KSWAPD_RECLAIM | | 662 | __GFP_KSWAPD_RECLAIM | |
685 | __GFP_NOWARN | | 663 | __GFP_NOWARN | |
686 | __GFP_HIGHMEM | | 664 | __GFP_HIGHMEM | |
687 | __GFP_MOVABLE); | 665 | __GFP_MOVABLE); |
688 | if (!handle) { | 666 | if (!handle) { |
689 | zcomp_stream_put(zram->comp); | 667 | zcomp_stream_put(zram->comp); |
690 | zstrm = NULL; | ||
691 | |||
692 | atomic64_inc(&zram->stats.writestall); | 668 | atomic64_inc(&zram->stats.writestall); |
693 | 669 | handle = zs_malloc(meta->mem_pool, comp_len, | |
694 | handle = zs_malloc(meta->mem_pool, clen, | ||
695 | GFP_NOIO | __GFP_HIGHMEM | | 670 | GFP_NOIO | __GFP_HIGHMEM | |
696 | __GFP_MOVABLE); | 671 | __GFP_MOVABLE); |
672 | *zstrm = zcomp_stream_get(zram->comp); | ||
697 | if (handle) | 673 | if (handle) |
698 | goto compress_again; | 674 | goto compress_again; |
699 | 675 | return -ENOMEM; | |
700 | pr_err("Error allocating memory for compressed page: %u, size=%u\n", | ||
701 | index, clen); | ||
702 | ret = -ENOMEM; | ||
703 | goto out; | ||
704 | } | 676 | } |
705 | 677 | ||
706 | alloced_pages = zs_get_total_pages(meta->mem_pool); | 678 | alloced_pages = zs_get_total_pages(meta->mem_pool); |
@@ -708,22 +680,45 @@ compress_again: | |||
708 | 680 | ||
709 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { | 681 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
710 | zs_free(meta->mem_pool, handle); | 682 | zs_free(meta->mem_pool, handle); |
711 | ret = -ENOMEM; | 683 | return -ENOMEM; |
712 | goto out; | ||
713 | } | 684 | } |
714 | 685 | ||
715 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); | 686 | *out_handle = handle; |
687 | *out_comp_len = comp_len; | ||
688 | return 0; | ||
689 | } | ||
716 | 690 | ||
717 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { | 691 | static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) |
692 | { | ||
693 | int ret; | ||
694 | unsigned long handle; | ||
695 | unsigned int comp_len; | ||
696 | void *src, *dst; | ||
697 | struct zcomp_strm *zstrm; | ||
698 | struct zram_meta *meta = zram->meta; | ||
699 | struct page *page = bvec->bv_page; | ||
700 | |||
701 | if (zram_same_page_write(zram, index, page)) | ||
702 | return 0; | ||
703 | |||
704 | zstrm = zcomp_stream_get(zram->comp); | ||
705 | ret = zram_compress(zram, &zstrm, page, &handle, &comp_len); | ||
706 | if (ret) { | ||
707 | zcomp_stream_put(zram->comp); | ||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | |||
712 | dst = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); | ||
713 | |||
714 | src = zstrm->buffer; | ||
715 | if (comp_len == PAGE_SIZE) | ||
718 | src = kmap_atomic(page); | 716 | src = kmap_atomic(page); |
719 | memcpy(cmem, src, PAGE_SIZE); | 717 | memcpy(dst, src, comp_len); |
718 | if (comp_len == PAGE_SIZE) | ||
720 | kunmap_atomic(src); | 719 | kunmap_atomic(src); |
721 | } else { | ||
722 | memcpy(cmem, src, clen); | ||
723 | } | ||
724 | 720 | ||
725 | zcomp_stream_put(zram->comp); | 721 | zcomp_stream_put(zram->comp); |
726 | zstrm = NULL; | ||
727 | zs_unmap_object(meta->mem_pool, handle); | 722 | zs_unmap_object(meta->mem_pool, handle); |
728 | 723 | ||
729 | /* | 724 | /* |
@@ -732,19 +727,54 @@ compress_again: | |||
732 | */ | 727 | */ |
733 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 728 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
734 | zram_free_page(zram, index); | 729 | zram_free_page(zram, index); |
735 | |||
736 | meta->table[index].handle = handle; | 730 | meta->table[index].handle = handle; |
737 | zram_set_obj_size(meta, index, clen); | 731 | zram_set_obj_size(meta, index, comp_len); |
738 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 732 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
739 | 733 | ||
740 | /* Update stats */ | 734 | /* Update stats */ |
741 | atomic64_add(clen, &zram->stats.compr_data_size); | 735 | atomic64_add(comp_len, &zram->stats.compr_data_size); |
742 | atomic64_inc(&zram->stats.pages_stored); | 736 | atomic64_inc(&zram->stats.pages_stored); |
737 | return 0; | ||
738 | } | ||
739 | |||
740 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, | ||
741 | u32 index, int offset) | ||
742 | { | ||
743 | int ret; | ||
744 | struct page *page = NULL; | ||
745 | void *src; | ||
746 | struct bio_vec vec; | ||
747 | |||
748 | vec = *bvec; | ||
749 | if (is_partial_io(bvec)) { | ||
750 | void *dst; | ||
751 | /* | ||
752 | * This is a partial IO. We need to read the full page | ||
753 | * before to write the changes. | ||
754 | */ | ||
755 | page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); | ||
756 | if (!page) | ||
757 | return -ENOMEM; | ||
758 | |||
759 | ret = zram_decompress_page(zram, page, index); | ||
760 | if (ret) | ||
761 | goto out; | ||
762 | |||
763 | src = kmap_atomic(bvec->bv_page); | ||
764 | dst = kmap_atomic(page); | ||
765 | memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); | ||
766 | kunmap_atomic(dst); | ||
767 | kunmap_atomic(src); | ||
768 | |||
769 | vec.bv_page = page; | ||
770 | vec.bv_len = PAGE_SIZE; | ||
771 | vec.bv_offset = 0; | ||
772 | } | ||
773 | |||
774 | ret = __zram_bvec_write(zram, &vec, index); | ||
743 | out: | 775 | out: |
744 | if (zstrm) | ||
745 | zcomp_stream_put(zram->comp); | ||
746 | if (is_partial_io(bvec)) | 776 | if (is_partial_io(bvec)) |
747 | kfree(uncmem); | 777 | __free_page(page); |
748 | return ret; | 778 | return ret; |
749 | } | 779 | } |
750 | 780 | ||
@@ -800,6 +830,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
800 | if (!is_write) { | 830 | if (!is_write) { |
801 | atomic64_inc(&zram->stats.num_reads); | 831 | atomic64_inc(&zram->stats.num_reads); |
802 | ret = zram_bvec_read(zram, bvec, index, offset); | 832 | ret = zram_bvec_read(zram, bvec, index, offset); |
833 | flush_dcache_page(bvec->bv_page); | ||
803 | } else { | 834 | } else { |
804 | atomic64_inc(&zram->stats.num_writes); | 835 | atomic64_inc(&zram->stats.num_writes); |
805 | ret = zram_bvec_write(zram, bvec, index, offset); | 836 | ret = zram_bvec_write(zram, bvec, index, offset); |