diff options
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 68 |
1 files changed, 42 insertions, 26 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1b1469ebe5cb..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) | |||
85 | { | 85 | { |
86 | bio->bi_end_io = h->bi_end_io; | 86 | bio->bi_end_io = h->bi_end_io; |
87 | bio->bi_private = h->bi_private; | 87 | bio->bi_private = h->bi_private; |
88 | |||
89 | /* | ||
90 | * Must bump bi_remaining to allow bio to complete with | ||
91 | * restored bi_end_io. | ||
92 | */ | ||
93 | atomic_inc(&bio->bi_remaining); | ||
88 | } | 94 | } |
89 | 95 | ||
90 | /*----------------------------------------------------------------*/ | 96 | /*----------------------------------------------------------------*/ |
@@ -283,6 +289,7 @@ struct per_bio_data { | |||
283 | bool tick:1; | 289 | bool tick:1; |
284 | unsigned req_nr:2; | 290 | unsigned req_nr:2; |
285 | struct dm_deferred_entry *all_io_entry; | 291 | struct dm_deferred_entry *all_io_entry; |
292 | struct dm_hook_info hook_info; | ||
286 | 293 | ||
287 | /* | 294 | /* |
288 | * writethrough fields. These MUST remain at the end of this | 295 | * writethrough fields. These MUST remain at the end of this |
@@ -291,7 +298,6 @@ struct per_bio_data { | |||
291 | */ | 298 | */ |
292 | struct cache *cache; | 299 | struct cache *cache; |
293 | dm_cblock_t cblock; | 300 | dm_cblock_t cblock; |
294 | struct dm_hook_info hook_info; | ||
295 | struct dm_bio_details bio_details; | 301 | struct dm_bio_details bio_details; |
296 | }; | 302 | }; |
297 | 303 | ||
@@ -664,15 +670,18 @@ static void remap_to_origin(struct cache *cache, struct bio *bio) | |||
664 | static void remap_to_cache(struct cache *cache, struct bio *bio, | 670 | static void remap_to_cache(struct cache *cache, struct bio *bio, |
665 | dm_cblock_t cblock) | 671 | dm_cblock_t cblock) |
666 | { | 672 | { |
667 | sector_t bi_sector = bio->bi_sector; | 673 | sector_t bi_sector = bio->bi_iter.bi_sector; |
674 | sector_t block = from_cblock(cblock); | ||
668 | 675 | ||
669 | bio->bi_bdev = cache->cache_dev->bdev; | 676 | bio->bi_bdev = cache->cache_dev->bdev; |
670 | if (!block_size_is_power_of_two(cache)) | 677 | if (!block_size_is_power_of_two(cache)) |
671 | bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + | 678 | bio->bi_iter.bi_sector = |
672 | sector_div(bi_sector, cache->sectors_per_block); | 679 | (block * cache->sectors_per_block) + |
680 | sector_div(bi_sector, cache->sectors_per_block); | ||
673 | else | 681 | else |
674 | bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | | 682 | bio->bi_iter.bi_sector = |
675 | (bi_sector & (cache->sectors_per_block - 1)); | 683 | (block << cache->sectors_per_block_shift) | |
684 | (bi_sector & (cache->sectors_per_block - 1)); | ||
676 | } | 685 | } |
677 | 686 | ||
678 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 687 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
@@ -712,7 +721,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, | |||
712 | 721 | ||
713 | static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | 722 | static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) |
714 | { | 723 | { |
715 | sector_t block_nr = bio->bi_sector; | 724 | sector_t block_nr = bio->bi_iter.bi_sector; |
716 | 725 | ||
717 | if (!block_size_is_power_of_two(cache)) | 726 | if (!block_size_is_power_of_two(cache)) |
718 | (void) sector_div(block_nr, cache->sectors_per_block); | 727 | (void) sector_div(block_nr, cache->sectors_per_block); |
@@ -970,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
970 | int r; | 979 | int r; |
971 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
972 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
982 | sector_t cblock = from_cblock(mg->cblock); | ||
973 | 983 | ||
974 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
975 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
976 | 986 | ||
977 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
978 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
979 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
980 | 990 | ||
981 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
@@ -1002,13 +1012,15 @@ static void overwrite_endio(struct bio *bio, int err) | |||
1002 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 1012 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
1003 | unsigned long flags; | 1013 | unsigned long flags; |
1004 | 1014 | ||
1015 | dm_unhook_bio(&pb->hook_info, bio); | ||
1016 | |||
1005 | if (err) | 1017 | if (err) |
1006 | mg->err = true; | 1018 | mg->err = true; |
1007 | 1019 | ||
1020 | mg->requeue_holder = false; | ||
1021 | |||
1008 | spin_lock_irqsave(&cache->lock, flags); | 1022 | spin_lock_irqsave(&cache->lock, flags); |
1009 | list_add_tail(&mg->list, &cache->completed_migrations); | 1023 | list_add_tail(&mg->list, &cache->completed_migrations); |
1010 | dm_unhook_bio(&pb->hook_info, bio); | ||
1011 | mg->requeue_holder = false; | ||
1012 | spin_unlock_irqrestore(&cache->lock, flags); | 1024 | spin_unlock_irqrestore(&cache->lock, flags); |
1013 | 1025 | ||
1014 | wake_worker(cache); | 1026 | wake_worker(cache); |
@@ -1027,7 +1039,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) | |||
1027 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) | 1039 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) |
1028 | { | 1040 | { |
1029 | return (bio_data_dir(bio) == WRITE) && | 1041 | return (bio_data_dir(bio) == WRITE) && |
1030 | (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); | 1042 | (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); |
1031 | } | 1043 | } |
1032 | 1044 | ||
1033 | static void avoid_copy(struct dm_cache_migration *mg) | 1045 | static void avoid_copy(struct dm_cache_migration *mg) |
@@ -1252,7 +1264,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1252 | size_t pb_data_size = get_per_bio_data_size(cache); | 1264 | size_t pb_data_size = get_per_bio_data_size(cache); |
1253 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 1265 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
1254 | 1266 | ||
1255 | BUG_ON(bio->bi_size); | 1267 | BUG_ON(bio->bi_iter.bi_size); |
1256 | if (!pb->req_nr) | 1268 | if (!pb->req_nr) |
1257 | remap_to_origin(cache, bio); | 1269 | remap_to_origin(cache, bio); |
1258 | else | 1270 | else |
@@ -1275,9 +1287,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1275 | */ | 1287 | */ |
1276 | static void process_discard_bio(struct cache *cache, struct bio *bio) | 1288 | static void process_discard_bio(struct cache *cache, struct bio *bio) |
1277 | { | 1289 | { |
1278 | dm_block_t start_block = dm_sector_div_up(bio->bi_sector, | 1290 | dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, |
1279 | cache->discard_block_size); | 1291 | cache->discard_block_size); |
1280 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); | 1292 | dm_block_t end_block = bio_end_sector(bio); |
1281 | dm_block_t b; | 1293 | dm_block_t b; |
1282 | 1294 | ||
1283 | end_block = block_div(end_block, cache->discard_block_size); | 1295 | end_block = block_div(end_block, cache->discard_block_size); |
@@ -2453,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2453 | bool discarded_block; | 2465 | bool discarded_block; |
2454 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
2455 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
2456 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
2457 | 2469 | ||
2458 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
2459 | /* | 2471 | /* |
2460 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
2461 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
2462 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
2463 | */ | 2475 | */ |
2464 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
2465 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
2466 | } | 2478 | } |
2467 | 2479 | ||
2468 | pb = init_per_bio_data(bio, pb_data_size); | ||
2469 | |||
2470 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2471 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
2472 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |
@@ -2826,12 +2836,13 @@ static void cache_resume(struct dm_target *ti) | |||
2826 | /* | 2836 | /* |
2827 | * Status format: | 2837 | * Status format: |
2828 | * | 2838 | * |
2829 | * <#used metadata blocks>/<#total metadata blocks> | 2839 | * <metadata block size> <#used metadata blocks>/<#total metadata blocks> |
2840 | * <cache block size> <#used cache blocks>/<#total cache blocks> | ||
2830 | * <#read hits> <#read misses> <#write hits> <#write misses> | 2841 | * <#read hits> <#read misses> <#write hits> <#write misses> |
2831 | * <#demotions> <#promotions> <#blocks in cache> <#dirty> | 2842 | * <#demotions> <#promotions> <#dirty> |
2832 | * <#features> <features>* | 2843 | * <#features> <features>* |
2833 | * <#core args> <core args> | 2844 | * <#core args> <core args> |
2834 | * <#policy args> <policy args>* | 2845 | * <policy name> <#policy args> <policy args>* |
2835 | */ | 2846 | */ |
2836 | static void cache_status(struct dm_target *ti, status_type_t type, | 2847 | static void cache_status(struct dm_target *ti, status_type_t type, |
2837 | unsigned status_flags, char *result, unsigned maxlen) | 2848 | unsigned status_flags, char *result, unsigned maxlen) |
@@ -2869,17 +2880,20 @@ static void cache_status(struct dm_target *ti, status_type_t type, | |||
2869 | 2880 | ||
2870 | residency = policy_residency(cache->policy); | 2881 | residency = policy_residency(cache->policy); |
2871 | 2882 | ||
2872 | DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", | 2883 | DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ", |
2884 | (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), | ||
2873 | (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), | 2885 | (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), |
2874 | (unsigned long long)nr_blocks_metadata, | 2886 | (unsigned long long)nr_blocks_metadata, |
2887 | cache->sectors_per_block, | ||
2888 | (unsigned long long) from_cblock(residency), | ||
2889 | (unsigned long long) from_cblock(cache->cache_size), | ||
2875 | (unsigned) atomic_read(&cache->stats.read_hit), | 2890 | (unsigned) atomic_read(&cache->stats.read_hit), |
2876 | (unsigned) atomic_read(&cache->stats.read_miss), | 2891 | (unsigned) atomic_read(&cache->stats.read_miss), |
2877 | (unsigned) atomic_read(&cache->stats.write_hit), | 2892 | (unsigned) atomic_read(&cache->stats.write_hit), |
2878 | (unsigned) atomic_read(&cache->stats.write_miss), | 2893 | (unsigned) atomic_read(&cache->stats.write_miss), |
2879 | (unsigned) atomic_read(&cache->stats.demotion), | 2894 | (unsigned) atomic_read(&cache->stats.demotion), |
2880 | (unsigned) atomic_read(&cache->stats.promotion), | 2895 | (unsigned) atomic_read(&cache->stats.promotion), |
2881 | (unsigned long long) from_cblock(residency), | 2896 | (unsigned long long) from_cblock(cache->nr_dirty)); |
2882 | cache->nr_dirty); | ||
2883 | 2897 | ||
2884 | if (writethrough_mode(&cache->features)) | 2898 | if (writethrough_mode(&cache->features)) |
2885 | DMEMIT("1 writethrough "); | 2899 | DMEMIT("1 writethrough "); |
@@ -2896,6 +2910,8 @@ static void cache_status(struct dm_target *ti, status_type_t type, | |||
2896 | } | 2910 | } |
2897 | 2911 | ||
2898 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); | 2912 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); |
2913 | |||
2914 | DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); | ||
2899 | if (sz < maxlen) { | 2915 | if (sz < maxlen) { |
2900 | r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); | 2916 | r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); |
2901 | if (r) | 2917 | if (r) |
@@ -3129,7 +3145,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
3129 | 3145 | ||
3130 | static struct target_type cache_target = { | 3146 | static struct target_type cache_target = { |
3131 | .name = "cache", | 3147 | .name = "cache", |
3132 | .version = {1, 2, 0}, | 3148 | .version = {1, 3, 0}, |
3133 | .module = THIS_MODULE, | 3149 | .module = THIS_MODULE, |
3134 | .ctr = cache_ctr, | 3150 | .ctr = cache_ctr, |
3135 | .dtr = cache_dtr, | 3151 | .dtr = cache_dtr, |