aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-05-11 08:22:31 -0400
committerMike Snitzer <snitzer@redhat.com>2017-05-14 21:54:33 -0400
commit701e03e4e180f0cd97d4139a32e2b2d879d12da2 (patch)
tree891165d167eedd13ab433a86377d9f46b2d44e97
parent6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc (diff)
dm cache: track all IO to the cache rather than just the origin device's IO
IO tracking used to throttle writebacks when the origin device is busy. Even if all the IO is going to the fast device, writebacks can significantly degrade performance. So track all IO to gauge whether the cache is busy or not. Otherwise, synthetic IO tests (e.g. fio) that might send all IO to the fast device wouldn't cause writebacks to get throttled. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-target.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0760ba409c21..232078e48167 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -477,7 +477,7 @@ struct cache {
477 spinlock_t invalidation_lock; 477 spinlock_t invalidation_lock;
478 struct list_head invalidation_requests; 478 struct list_head invalidation_requests;
479 479
480 struct io_tracker origin_tracker; 480 struct io_tracker tracker;
481 481
482 struct work_struct commit_ws; 482 struct work_struct commit_ws;
483 struct batcher committer; 483 struct batcher committer;
@@ -904,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
904 904
905static bool accountable_bio(struct cache *cache, struct bio *bio) 905static bool accountable_bio(struct cache *cache, struct bio *bio)
906{ 906{
907 return ((bio->bi_bdev == cache->origin_dev->bdev) && 907 return bio_op(bio) != REQ_OP_DISCARD;
908 bio_op(bio) != REQ_OP_DISCARD);
909} 908}
910 909
911static void accounted_begin(struct cache *cache, struct bio *bio) 910static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -915,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
915 914
916 if (accountable_bio(cache, bio)) { 915 if (accountable_bio(cache, bio)) {
917 pb->len = bio_sectors(bio); 916 pb->len = bio_sectors(bio);
918 iot_io_begin(&cache->origin_tracker, pb->len); 917 iot_io_begin(&cache->tracker, pb->len);
919 } 918 }
920} 919}
921 920
@@ -924,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
924 size_t pb_data_size = get_per_bio_data_size(cache); 923 size_t pb_data_size = get_per_bio_data_size(cache);
925 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 924 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
926 925
927 iot_io_end(&cache->origin_tracker, pb->len); 926 iot_io_end(&cache->tracker, pb->len);
928} 927}
929 928
930static void accounted_request(struct cache *cache, struct bio *bio) 929static void accounted_request(struct cache *cache, struct bio *bio)
@@ -1725,7 +1724,7 @@ enum busy {
1725 1724
1726static enum busy spare_migration_bandwidth(struct cache *cache) 1725static enum busy spare_migration_bandwidth(struct cache *cache)
1727{ 1726{
1728 bool idle = iot_idle_for(&cache->origin_tracker, HZ); 1727 bool idle = iot_idle_for(&cache->tracker, HZ);
1729 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1728 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1730 cache->sectors_per_block; 1729 cache->sectors_per_block;
1731 1730
@@ -2720,7 +2719,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2720 2719
2721 batcher_init(&cache->committer, commit_op, cache, 2720 batcher_init(&cache->committer, commit_op, cache,
2722 issue_op, cache, cache->wq); 2721 issue_op, cache, cache->wq);
2723 iot_init(&cache->origin_tracker); 2722 iot_init(&cache->tracker);
2724 2723
2725 init_rwsem(&cache->background_work_lock); 2724 init_rwsem(&cache->background_work_lock);
2726 prevent_background_work(cache); 2725 prevent_background_work(cache);
@@ -2944,7 +2943,7 @@ static void cache_postsuspend(struct dm_target *ti)
2944 2943
2945 cancel_delayed_work(&cache->waker); 2944 cancel_delayed_work(&cache->waker);
2946 flush_workqueue(cache->wq); 2945 flush_workqueue(cache->wq);
2947 WARN_ON(cache->origin_tracker.in_flight); 2946 WARN_ON(cache->tracker.in_flight);
2948 2947
2949 /* 2948 /*
2950 * If it's a flush suspend there won't be any deferred bios, so this 2949 * If it's a flush suspend there won't be any deferred bios, so this