aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2017-10-19 17:16:54 -0400
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:44:47 -0500
commit2df3bae9a6543e90042291707b8db0cbfbae9ee9 (patch)
treec6f6fc8506e3d0cc38f882de23497f3961f70e4e /drivers/md/dm-cache-target.c
parent8e3c3827776fc93728c0c8d7c7b731226dc6ee23 (diff)
dm cache: submit writethrough writes in parallel to origin and cache
Discontinue issuing writethrough write IO in series to the origin and then cache. Use bio_clone_fast() to create a new origin clone bio that will be mapped to the origin device and then bio_chain() it to the bio that gets remapped to the cache device. The origin clone bio does _not_ have a copy of the per_bio_data -- as such check_if_tick_bio_needed() will not be called. The cache bio (parent bio) will not complete until the origin bio has completed -- this fulfills bio_clone_fast()'s requirements as well as the requirement to not complete the original IO until the write IO has completed to both the origin and cache device. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 04ffae7b2301..6d83439aa7c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -450,6 +450,7 @@ struct cache {
450 struct work_struct migration_worker; 450 struct work_struct migration_worker;
451 struct delayed_work waker; 451 struct delayed_work waker;
452 struct dm_bio_prison_v2 *prison; 452 struct dm_bio_prison_v2 *prison;
453 struct bio_set *bs;
453 454
454 mempool_t *migration_pool; 455 mempool_t *migration_pool;
455 456
@@ -868,16 +869,23 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
868 spin_unlock_irqrestore(&cache->lock, flags); 869 spin_unlock_irqrestore(&cache->lock, flags);
869} 870}
870 871
871static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, 872static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
872 dm_oblock_t oblock) 873 dm_oblock_t oblock, bool bio_has_pbd)
873{ 874{
874 // FIXME: this is called way too much. 875 if (bio_has_pbd)
875 check_if_tick_bio_needed(cache, bio); 876 check_if_tick_bio_needed(cache, bio);
876 remap_to_origin(cache, bio); 877 remap_to_origin(cache, bio);
877 if (bio_data_dir(bio) == WRITE) 878 if (bio_data_dir(bio) == WRITE)
878 clear_discard(cache, oblock_to_dblock(cache, oblock)); 879 clear_discard(cache, oblock_to_dblock(cache, oblock));
879} 880}
880 881
882static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
883 dm_oblock_t oblock)
884{
885 // FIXME: check_if_tick_bio_needed() is called way too much through this interface
886 __remap_to_origin_clear_discard(cache, bio, oblock, true);
887}
888
881static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, 889static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
882 dm_oblock_t oblock, dm_cblock_t cblock) 890 dm_oblock_t oblock, dm_cblock_t cblock)
883{ 891{
@@ -971,23 +979,25 @@ static void writethrough_endio(struct bio *bio)
971} 979}
972 980
973/* 981/*
974 * FIXME: send in parallel, huge latency as is.
975 * When running in writethrough mode we need to send writes to clean blocks 982 * When running in writethrough mode we need to send writes to clean blocks
976 * to both the cache and origin devices. In future we'd like to clone the 983 * to both the cache and origin devices. Clone the bio and send them in parallel.
977 * bio and send them in parallel, but for now we're doing them in
978 * series as this is easier.
979 */ 984 */
980static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 985static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
981 dm_oblock_t oblock, dm_cblock_t cblock) 986 dm_oblock_t oblock, dm_cblock_t cblock)
982{ 987{
983 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 988 struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
984 989
985 pb->cache = cache; 990 BUG_ON(!origin_bio);
986 pb->cblock = cblock;
987 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
988 dm_bio_record(&pb->bio_details, bio);
989 991
990 remap_to_origin_clear_discard(pb->cache, bio, oblock); 992 bio_chain(origin_bio, bio);
993 /*
994 * Passing false to __remap_to_origin_clear_discard() skips
995 * all code that might use per_bio_data (since clone doesn't have it)
996 */
997 __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
998 submit_bio(origin_bio);
999
1000 remap_to_cache(cache, bio, cblock);
991} 1001}
992 1002
993/*---------------------------------------------------------------- 1003/*----------------------------------------------------------------
@@ -1873,7 +1883,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1873 } else { 1883 } else {
1874 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && 1884 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1875 !is_dirty(cache, cblock)) { 1885 !is_dirty(cache, cblock)) {
1876 remap_to_origin_then_cache(cache, bio, block, cblock); 1886 remap_to_origin_and_cache(cache, bio, block, cblock);
1877 accounted_begin(cache, bio); 1887 accounted_begin(cache, bio);
1878 } else 1888 } else
1879 remap_to_cache_dirty(cache, bio, block, cblock); 1889 remap_to_cache_dirty(cache, bio, block, cblock);
@@ -2132,6 +2142,9 @@ static void destroy(struct cache *cache)
2132 kfree(cache->ctr_args[i]); 2142 kfree(cache->ctr_args[i]);
2133 kfree(cache->ctr_args); 2143 kfree(cache->ctr_args);
2134 2144
2145 if (cache->bs)
2146 bioset_free(cache->bs);
2147
2135 kfree(cache); 2148 kfree(cache);
2136} 2149}
2137 2150
@@ -2578,6 +2591,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2578 cache->features = ca->features; 2591 cache->features = ca->features;
2579 ti->per_io_data_size = get_per_bio_data_size(cache); 2592 ti->per_io_data_size = get_per_bio_data_size(cache);
2580 2593
2594 if (writethrough_mode(cache)) {
2595 /* Create bioset for writethrough bios issued to origin */
2596 cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
2597 if (!cache->bs)
2598 goto bad;
2599 }
2600
2581 cache->callbacks.congested_fn = cache_is_congested; 2601 cache->callbacks.congested_fn = cache_is_congested;
2582 dm_table_add_target_callbacks(ti->table, &cache->callbacks); 2602 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2583 2603