aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2015-05-15 10:29:58 -0400
committerMike Snitzer <snitzer@redhat.com>2015-05-29 14:19:07 -0400
commit40775257b97e27305cf5c2425be7acaa6edee4ea (patch)
treed58225e340a166656b916f7542cfbd9c96401222 /drivers/md/dm-cache-target.c
parent651f5fa2a3959ff5db60c09a84efd66309fe4035 (diff)
dm cache: boost promotion of blocks that will be overwritten
When considering whether to move a block to the cache we already give preferential treatment to discarded blocks, since they are cheap to promote (no read of the origin required since the data is junk). The same is true of blocks that are about to be completely overwritten, so we likewise boost their promotion chances. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d2d91c164420..7829d947ef01 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1697,17 +1697,17 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
1697 dm_oblock_t block = get_bio_block(cache, bio); 1697 dm_oblock_t block = get_bio_block(cache, bio);
1698 struct policy_result lookup_result; 1698 struct policy_result lookup_result;
1699 bool passthrough = passthrough_mode(&cache->features); 1699 bool passthrough = passthrough_mode(&cache->features);
1700 bool discarded_block, can_migrate; 1700 bool fast_promotion, can_migrate;
1701 struct old_oblock_lock ool; 1701 struct old_oblock_lock ool;
1702 1702
1703 discarded_block = is_discarded_oblock(cache, block); 1703 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
1704 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1704 can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
1705 1705
1706 ool.locker.fn = cell_locker; 1706 ool.locker.fn = cell_locker;
1707 ool.cache = cache; 1707 ool.cache = cache;
1708 ool.structs = structs; 1708 ool.structs = structs;
1709 ool.cell = NULL; 1709 ool.cell = NULL;
1710 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, 1710 r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
1711 bio, &ool.locker, &lookup_result); 1711 bio, &ool.locker, &lookup_result);
1712 1712
1713 if (r == -EWOULDBLOCK) 1713 if (r == -EWOULDBLOCK)
@@ -2895,7 +2895,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2895 dm_oblock_t block = get_bio_block(cache, bio); 2895 dm_oblock_t block = get_bio_block(cache, bio);
2896 size_t pb_data_size = get_per_bio_data_size(cache); 2896 size_t pb_data_size = get_per_bio_data_size(cache);
2897 bool can_migrate = false; 2897 bool can_migrate = false;
2898 bool discarded_block; 2898 bool fast_promotion;
2899 struct policy_result lookup_result; 2899 struct policy_result lookup_result;
2900 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 2900 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2901 struct old_oblock_lock ool; 2901 struct old_oblock_lock ool;
@@ -2937,9 +2937,9 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2937 return DM_MAPIO_SUBMITTED; 2937 return DM_MAPIO_SUBMITTED;
2938 } 2938 }
2939 2939
2940 discarded_block = is_discarded_oblock(cache, block); 2940 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
2941 2941
2942 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 2942 r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
2943 bio, &ool.locker, &lookup_result); 2943 bio, &ool.locker, &lookup_result);
2944 if (r == -EWOULDBLOCK) { 2944 if (r == -EWOULDBLOCK) {
2945 cell_defer(cache, cell, true); 2945 cell_defer(cache, cell, true);