diff options
author | Minchan Kim <minchan@kernel.org> | 2017-05-03 17:55:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 18:52:11 -0400 |
commit | 86c49814d449ebc51c7d455ac8e3d17b9fa702eb (patch) | |
tree | 6a4a9bec78dc7596b76ae2bc825d21eb565c3d62 | |
parent | 1f7319c7427503abe2d365683588827b80f5714e (diff) |
zram: use zram_slot_lock instead of raw bit_spin_lock op
With this clean-up phase, I want to use zram's wrapper function to lock
table access which is more consistent with other zram's functions.
Link: http://lkml.kernel.org/r/1492052365-16169-4-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/block/zram/zram_drv.c | 41 |
1 files changed, 27 insertions, 14 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 47e15fec3cd0..aac48ff69618 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -413,24 +413,38 @@ static DEVICE_ATTR_RO(io_stat); | |||
413 | static DEVICE_ATTR_RO(mm_stat); | 413 | static DEVICE_ATTR_RO(mm_stat); |
414 | static DEVICE_ATTR_RO(debug_stat); | 414 | static DEVICE_ATTR_RO(debug_stat); |
415 | 415 | ||
416 | static void zram_slot_lock(struct zram *zram, u32 index) | ||
417 | { | ||
418 | struct zram_meta *meta = zram->meta; | ||
419 | |||
420 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | ||
421 | } | ||
422 | |||
423 | static void zram_slot_unlock(struct zram *zram, u32 index) | ||
424 | { | ||
425 | struct zram_meta *meta = zram->meta; | ||
426 | |||
427 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | ||
428 | } | ||
429 | |||
416 | static bool zram_same_page_read(struct zram *zram, u32 index, | 430 | static bool zram_same_page_read(struct zram *zram, u32 index, |
417 | struct page *page, | 431 | struct page *page, |
418 | unsigned int offset, unsigned int len) | 432 | unsigned int offset, unsigned int len) |
419 | { | 433 | { |
420 | struct zram_meta *meta = zram->meta; | 434 | struct zram_meta *meta = zram->meta; |
421 | 435 | ||
422 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 436 | zram_slot_lock(zram, index); |
423 | if (unlikely(!meta->table[index].handle) || | 437 | if (unlikely(!meta->table[index].handle) || |
424 | zram_test_flag(meta, index, ZRAM_SAME)) { | 438 | zram_test_flag(meta, index, ZRAM_SAME)) { |
425 | void *mem; | 439 | void *mem; |
426 | 440 | ||
427 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 441 | zram_slot_unlock(zram, index); |
428 | mem = kmap_atomic(page); | 442 | mem = kmap_atomic(page); |
429 | zram_fill_page(mem + offset, len, meta->table[index].element); | 443 | zram_fill_page(mem + offset, len, meta->table[index].element); |
430 | kunmap_atomic(mem); | 444 | kunmap_atomic(mem); |
431 | return true; | 445 | return true; |
432 | } | 446 | } |
433 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 447 | zram_slot_unlock(zram, index); |
434 | 448 | ||
435 | return false; | 449 | return false; |
436 | } | 450 | } |
@@ -446,11 +460,11 @@ static bool zram_same_page_write(struct zram *zram, u32 index, | |||
446 | 460 | ||
447 | kunmap_atomic(mem); | 461 | kunmap_atomic(mem); |
448 | /* Free memory associated with this sector now. */ | 462 | /* Free memory associated with this sector now. */ |
449 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 463 | zram_slot_lock(zram, index); |
450 | zram_free_page(zram, index); | 464 | zram_free_page(zram, index); |
451 | zram_set_flag(meta, index, ZRAM_SAME); | 465 | zram_set_flag(meta, index, ZRAM_SAME); |
452 | zram_set_element(meta, index, element); | 466 | zram_set_element(meta, index, element); |
453 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 467 | zram_slot_unlock(zram, index); |
454 | 468 | ||
455 | atomic64_inc(&zram->stats.same_pages); | 469 | atomic64_inc(&zram->stats.same_pages); |
456 | return true; | 470 | return true; |
@@ -557,7 +571,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) | |||
557 | if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) | 571 | if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) |
558 | return 0; | 572 | return 0; |
559 | 573 | ||
560 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 574 | zram_slot_lock(zram, index); |
561 | handle = meta->table[index].handle; | 575 | handle = meta->table[index].handle; |
562 | size = zram_get_obj_size(meta, index); | 576 | size = zram_get_obj_size(meta, index); |
563 | 577 | ||
@@ -576,7 +590,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index) | |||
576 | zcomp_stream_put(zram->comp); | 590 | zcomp_stream_put(zram->comp); |
577 | } | 591 | } |
578 | zs_unmap_object(meta->mem_pool, handle); | 592 | zs_unmap_object(meta->mem_pool, handle); |
579 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 593 | zram_slot_unlock(zram, index); |
580 | 594 | ||
581 | /* Should NEVER happen. Return bio error if it does. */ | 595 | /* Should NEVER happen. Return bio error if it does. */ |
582 | if (unlikely(ret)) | 596 | if (unlikely(ret)) |
@@ -725,11 +739,11 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index) | |||
725 | * Free memory associated with this sector | 739 | * Free memory associated with this sector |
726 | * before overwriting unused sectors. | 740 | * before overwriting unused sectors. |
727 | */ | 741 | */ |
728 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 742 | zram_slot_lock(zram, index); |
729 | zram_free_page(zram, index); | 743 | zram_free_page(zram, index); |
730 | meta->table[index].handle = handle; | 744 | meta->table[index].handle = handle; |
731 | zram_set_obj_size(meta, index, comp_len); | 745 | zram_set_obj_size(meta, index, comp_len); |
732 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 746 | zram_slot_unlock(zram, index); |
733 | 747 | ||
734 | /* Update stats */ | 748 | /* Update stats */ |
735 | atomic64_add(comp_len, &zram->stats.compr_data_size); | 749 | atomic64_add(comp_len, &zram->stats.compr_data_size); |
@@ -787,7 +801,6 @@ static void zram_bio_discard(struct zram *zram, u32 index, | |||
787 | int offset, struct bio *bio) | 801 | int offset, struct bio *bio) |
788 | { | 802 | { |
789 | size_t n = bio->bi_iter.bi_size; | 803 | size_t n = bio->bi_iter.bi_size; |
790 | struct zram_meta *meta = zram->meta; | ||
791 | 804 | ||
792 | /* | 805 | /* |
793 | * zram manages data in physical block size units. Because logical block | 806 | * zram manages data in physical block size units. Because logical block |
@@ -808,9 +821,9 @@ static void zram_bio_discard(struct zram *zram, u32 index, | |||
808 | } | 821 | } |
809 | 822 | ||
810 | while (n >= PAGE_SIZE) { | 823 | while (n >= PAGE_SIZE) { |
811 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 824 | zram_slot_lock(zram, index); |
812 | zram_free_page(zram, index); | 825 | zram_free_page(zram, index); |
813 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 826 | zram_slot_unlock(zram, index); |
814 | atomic64_inc(&zram->stats.notify_free); | 827 | atomic64_inc(&zram->stats.notify_free); |
815 | index++; | 828 | index++; |
816 | n -= PAGE_SIZE; | 829 | n -= PAGE_SIZE; |
@@ -924,9 +937,9 @@ static void zram_slot_free_notify(struct block_device *bdev, | |||
924 | zram = bdev->bd_disk->private_data; | 937 | zram = bdev->bd_disk->private_data; |
925 | meta = zram->meta; | 938 | meta = zram->meta; |
926 | 939 | ||
927 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); | 940 | zram_slot_lock(zram, index); |
928 | zram_free_page(zram, index); | 941 | zram_free_page(zram, index); |
929 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | 942 | zram_slot_unlock(zram, index); |
930 | atomic64_inc(&zram->stats.notify_free); | 943 | atomic64_inc(&zram->stats.notify_free); |
931 | } | 944 | } |
932 | 945 | ||