diff options
author | Sergey Senozhatsky <sergey.senozhatsky@gmail.com> | 2014-04-07 18:38:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:35:59 -0400 |
commit | be257c61306750d11c20d2ac567bf63304c696a3 (patch) | |
tree | d434e1a45ce4a2aff0d81da117c0fca5dcb82c58 /drivers/block | |
parent | be2d1d56c82d8cf20e6c77515eb499f8e86eb5be (diff) |
zram: do not pass rw argument to __zram_make_request()
Do not pass rw argument down the __zram_make_request() -> zram_bvec_rw()
chain, decode it in zram_bvec_rw() instead. Besides, this is the place
where we distinguish READ and WRITE bio data directions, so account zram
RW stats here, instead of __zram_make_request(). This also allows to
account a real number of zram READ/WRITE operations, not just requests
(single RW request may cause a number of zram RW ops with separate
locking, compression/decompression, etc).
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index b3e6f072c19b..3f0d6de36f74 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -533,14 +533,18 @@ out: | |||
533 | } | 533 | } |
534 | 534 | ||
535 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | 535 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
536 | int offset, struct bio *bio, int rw) | 536 | int offset, struct bio *bio) |
537 | { | 537 | { |
538 | int ret; | 538 | int ret; |
539 | int rw = bio_data_dir(bio); | ||
539 | 540 | ||
540 | if (rw == READ) | 541 | if (rw == READ) { |
542 | atomic64_inc(&zram->stats.num_reads); | ||
541 | ret = zram_bvec_read(zram, bvec, index, offset, bio); | 543 | ret = zram_bvec_read(zram, bvec, index, offset, bio); |
542 | else | 544 | } else { |
545 | atomic64_inc(&zram->stats.num_writes); | ||
543 | ret = zram_bvec_write(zram, bvec, index, offset); | 546 | ret = zram_bvec_write(zram, bvec, index, offset); |
547 | } | ||
544 | 548 | ||
545 | return ret; | 549 | return ret; |
546 | } | 550 | } |
@@ -672,22 +676,13 @@ out: | |||
672 | return ret; | 676 | return ret; |
673 | } | 677 | } |
674 | 678 | ||
675 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | 679 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
676 | { | 680 | { |
677 | int offset; | 681 | int offset; |
678 | u32 index; | 682 | u32 index; |
679 | struct bio_vec bvec; | 683 | struct bio_vec bvec; |
680 | struct bvec_iter iter; | 684 | struct bvec_iter iter; |
681 | 685 | ||
682 | switch (rw) { | ||
683 | case READ: | ||
684 | atomic64_inc(&zram->stats.num_reads); | ||
685 | break; | ||
686 | case WRITE: | ||
687 | atomic64_inc(&zram->stats.num_writes); | ||
688 | break; | ||
689 | } | ||
690 | |||
691 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; | 686 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
692 | offset = (bio->bi_iter.bi_sector & | 687 | offset = (bio->bi_iter.bi_sector & |
693 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | 688 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
@@ -706,16 +701,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) | |||
706 | bv.bv_len = max_transfer_size; | 701 | bv.bv_len = max_transfer_size; |
707 | bv.bv_offset = bvec.bv_offset; | 702 | bv.bv_offset = bvec.bv_offset; |
708 | 703 | ||
709 | if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) | 704 | if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0) |
710 | goto out; | 705 | goto out; |
711 | 706 | ||
712 | bv.bv_len = bvec.bv_len - max_transfer_size; | 707 | bv.bv_len = bvec.bv_len - max_transfer_size; |
713 | bv.bv_offset += max_transfer_size; | 708 | bv.bv_offset += max_transfer_size; |
714 | if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) | 709 | if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0) |
715 | goto out; | 710 | goto out; |
716 | } else | 711 | } else |
717 | if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) | 712 | if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0) |
718 | < 0) | ||
719 | goto out; | 713 | goto out; |
720 | 714 | ||
721 | update_position(&index, &offset, &bvec); | 715 | update_position(&index, &offset, &bvec); |
@@ -745,7 +739,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio) | |||
745 | goto error; | 739 | goto error; |
746 | } | 740 | } |
747 | 741 | ||
748 | __zram_make_request(zram, bio, bio_data_dir(bio)); | 742 | __zram_make_request(zram, bio); |
749 | up_read(&zram->init_lock); | 743 | up_read(&zram->init_lock); |
750 | 744 | ||
751 | return; | 745 | return; |