diff options
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1db375f50a13..d682a0511381 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len) | |||
94 | 94 | ||
95 | static void __iot_io_end(struct io_tracker *iot, sector_t len) | 95 | static void __iot_io_end(struct io_tracker *iot, sector_t len) |
96 | { | 96 | { |
97 | if (!len) | ||
98 | return; | ||
99 | |||
97 | iot->in_flight -= len; | 100 | iot->in_flight -= len; |
98 | if (!iot->in_flight) | 101 | if (!iot->in_flight) |
99 | iot->idle_time = jiffies; | 102 | iot->idle_time = jiffies; |
@@ -474,7 +477,7 @@ struct cache { | |||
474 | spinlock_t invalidation_lock; | 477 | spinlock_t invalidation_lock; |
475 | struct list_head invalidation_requests; | 478 | struct list_head invalidation_requests; |
476 | 479 | ||
477 | struct io_tracker origin_tracker; | 480 | struct io_tracker tracker; |
478 | 481 | ||
479 | struct work_struct commit_ws; | 482 | struct work_struct commit_ws; |
480 | struct batcher committer; | 483 | struct batcher committer; |
@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | |||
901 | 904 | ||
902 | static bool accountable_bio(struct cache *cache, struct bio *bio) | 905 | static bool accountable_bio(struct cache *cache, struct bio *bio) |
903 | { | 906 | { |
904 | return ((bio->bi_bdev == cache->origin_dev->bdev) && | 907 | return bio_op(bio) != REQ_OP_DISCARD; |
905 | bio_op(bio) != REQ_OP_DISCARD); | ||
906 | } | 908 | } |
907 | 909 | ||
908 | static void accounted_begin(struct cache *cache, struct bio *bio) | 910 | static void accounted_begin(struct cache *cache, struct bio *bio) |
@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio) | |||
912 | 914 | ||
913 | if (accountable_bio(cache, bio)) { | 915 | if (accountable_bio(cache, bio)) { |
914 | pb->len = bio_sectors(bio); | 916 | pb->len = bio_sectors(bio); |
915 | iot_io_begin(&cache->origin_tracker, pb->len); | 917 | iot_io_begin(&cache->tracker, pb->len); |
916 | } | 918 | } |
917 | } | 919 | } |
918 | 920 | ||
@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) | |||
921 | size_t pb_data_size = get_per_bio_data_size(cache); | 923 | size_t pb_data_size = get_per_bio_data_size(cache); |
922 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 924 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
923 | 925 | ||
924 | iot_io_end(&cache->origin_tracker, pb->len); | 926 | iot_io_end(&cache->tracker, pb->len); |
925 | } | 927 | } |
926 | 928 | ||
927 | static void accounted_request(struct cache *cache, struct bio *bio) | 929 | static void accounted_request(struct cache *cache, struct bio *bio) |
@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, | |||
1716 | 1718 | ||
1717 | enum busy { | 1719 | enum busy { |
1718 | IDLE, | 1720 | IDLE, |
1719 | MODERATE, | ||
1720 | BUSY | 1721 | BUSY |
1721 | }; | 1722 | }; |
1722 | 1723 | ||
1723 | static enum busy spare_migration_bandwidth(struct cache *cache) | 1724 | static enum busy spare_migration_bandwidth(struct cache *cache) |
1724 | { | 1725 | { |
1725 | bool idle = iot_idle_for(&cache->origin_tracker, HZ); | 1726 | bool idle = iot_idle_for(&cache->tracker, HZ); |
1726 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * | 1727 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * |
1727 | cache->sectors_per_block; | 1728 | cache->sectors_per_block; |
1728 | 1729 | ||
1729 | if (current_volume <= cache->migration_threshold) | 1730 | if (idle && current_volume <= cache->migration_threshold) |
1730 | return idle ? IDLE : MODERATE; | 1731 | return IDLE; |
1731 | else | 1732 | else |
1732 | return idle ? MODERATE : BUSY; | 1733 | return BUSY; |
1733 | } | 1734 | } |
1734 | 1735 | ||
1735 | static void inc_hit_counter(struct cache *cache, struct bio *bio) | 1736 | static void inc_hit_counter(struct cache *cache, struct bio *bio) |
@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws) | |||
2045 | 2046 | ||
2046 | for (;;) { | 2047 | for (;;) { |
2047 | b = spare_migration_bandwidth(cache); | 2048 | b = spare_migration_bandwidth(cache); |
2048 | if (b == BUSY) | ||
2049 | break; | ||
2050 | 2049 | ||
2051 | r = policy_get_background_work(cache->policy, b == IDLE, &op); | 2050 | r = policy_get_background_work(cache->policy, b == IDLE, &op); |
2052 | if (r == -ENODATA) | 2051 | if (r == -ENODATA) |
@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2717 | 2716 | ||
2718 | batcher_init(&cache->committer, commit_op, cache, | 2717 | batcher_init(&cache->committer, commit_op, cache, |
2719 | issue_op, cache, cache->wq); | 2718 | issue_op, cache, cache->wq); |
2720 | iot_init(&cache->origin_tracker); | 2719 | iot_init(&cache->tracker); |
2721 | 2720 | ||
2722 | init_rwsem(&cache->background_work_lock); | 2721 | init_rwsem(&cache->background_work_lock); |
2723 | prevent_background_work(cache); | 2722 | prevent_background_work(cache); |
@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti) | |||
2941 | 2940 | ||
2942 | cancel_delayed_work(&cache->waker); | 2941 | cancel_delayed_work(&cache->waker); |
2943 | flush_workqueue(cache->wq); | 2942 | flush_workqueue(cache->wq); |
2944 | WARN_ON(cache->origin_tracker.in_flight); | 2943 | WARN_ON(cache->tracker.in_flight); |
2945 | 2944 | ||
2946 | /* | 2945 | /* |
2947 | * If it's a flush suspend there won't be any deferred bios, so this | 2946 | * If it's a flush suspend there won't be any deferred bios, so this |