diff options
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 169 |
1 files changed, 109 insertions, 60 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0f4e84b15c30..66120bd46d15 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -142,6 +142,7 @@ struct cache { | |||
142 | spinlock_t lock; | 142 | spinlock_t lock; |
143 | struct bio_list deferred_bios; | 143 | struct bio_list deferred_bios; |
144 | struct bio_list deferred_flush_bios; | 144 | struct bio_list deferred_flush_bios; |
145 | struct bio_list deferred_writethrough_bios; | ||
145 | struct list_head quiesced_migrations; | 146 | struct list_head quiesced_migrations; |
146 | struct list_head completed_migrations; | 147 | struct list_head completed_migrations; |
147 | struct list_head need_commit_migrations; | 148 | struct list_head need_commit_migrations; |
@@ -158,7 +159,7 @@ struct cache { | |||
158 | /* | 159 | /* |
159 | * origin_blocks entries, discarded if set. | 160 | * origin_blocks entries, discarded if set. |
160 | */ | 161 | */ |
161 | sector_t discard_block_size; /* a power of 2 times sectors per block */ | 162 | uint32_t discard_block_size; /* a power of 2 times sectors per block */ |
162 | dm_dblock_t discard_nr_blocks; | 163 | dm_dblock_t discard_nr_blocks; |
163 | unsigned long *discard_bitset; | 164 | unsigned long *discard_bitset; |
164 | 165 | ||
@@ -199,6 +200,11 @@ struct per_bio_data { | |||
199 | bool tick:1; | 200 | bool tick:1; |
200 | unsigned req_nr:2; | 201 | unsigned req_nr:2; |
201 | struct dm_deferred_entry *all_io_entry; | 202 | struct dm_deferred_entry *all_io_entry; |
203 | |||
204 | /* writethrough fields */ | ||
205 | struct cache *cache; | ||
206 | dm_cblock_t cblock; | ||
207 | bio_end_io_t *saved_bi_end_io; | ||
202 | }; | 208 | }; |
203 | 209 | ||
204 | struct dm_cache_migration { | 210 | struct dm_cache_migration { |
@@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache) | |||
412 | return cache->sectors_per_block_shift >= 0; | 418 | return cache->sectors_per_block_shift >= 0; |
413 | } | 419 | } |
414 | 420 | ||
421 | static dm_block_t block_div(dm_block_t b, uint32_t n) | ||
422 | { | ||
423 | do_div(b, n); | ||
424 | |||
425 | return b; | ||
426 | } | ||
427 | |||
415 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) | 428 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) |
416 | { | 429 | { |
417 | sector_t discard_blocks = cache->discard_block_size; | 430 | uint32_t discard_blocks = cache->discard_block_size; |
418 | dm_block_t b = from_oblock(oblock); | 431 | dm_block_t b = from_oblock(oblock); |
419 | 432 | ||
420 | if (!block_size_is_power_of_two(cache)) | 433 | if (!block_size_is_power_of_two(cache)) |
421 | (void) sector_div(discard_blocks, cache->sectors_per_block); | 434 | discard_blocks = discard_blocks / cache->sectors_per_block; |
422 | else | 435 | else |
423 | discard_blocks >>= cache->sectors_per_block_shift; | 436 | discard_blocks >>= cache->sectors_per_block_shift; |
424 | 437 | ||
425 | (void) sector_div(b, discard_blocks); | 438 | b = block_div(b, discard_blocks); |
426 | 439 | ||
427 | return to_dblock(b); | 440 | return to_dblock(b); |
428 | } | 441 | } |
@@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio) | |||
609 | spin_unlock_irqrestore(&cache->lock, flags); | 622 | spin_unlock_irqrestore(&cache->lock, flags); |
610 | } | 623 | } |
611 | 624 | ||
625 | static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | ||
626 | { | ||
627 | unsigned long flags; | ||
628 | |||
629 | spin_lock_irqsave(&cache->lock, flags); | ||
630 | bio_list_add(&cache->deferred_writethrough_bios, bio); | ||
631 | spin_unlock_irqrestore(&cache->lock, flags); | ||
632 | |||
633 | wake_worker(cache); | ||
634 | } | ||
635 | |||
636 | static void writethrough_endio(struct bio *bio, int err) | ||
637 | { | ||
638 | struct per_bio_data *pb = get_per_bio_data(bio); | ||
639 | bio->bi_end_io = pb->saved_bi_end_io; | ||
640 | |||
641 | if (err) { | ||
642 | bio_endio(bio, err); | ||
643 | return; | ||
644 | } | ||
645 | |||
646 | remap_to_cache(pb->cache, bio, pb->cblock); | ||
647 | |||
648 | /* | ||
649 | * We can't issue this bio directly, since we're in interrupt | ||
650 | * context. So it get's put on a bio list for processing by the | ||
651 | * worker thread. | ||
652 | */ | ||
653 | defer_writethrough_bio(pb->cache, bio); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * When running in writethrough mode we need to send writes to clean blocks | ||
658 | * to both the cache and origin devices. In future we'd like to clone the | ||
659 | * bio and send them in parallel, but for now we're doing them in | ||
660 | * series as this is easier. | ||
661 | */ | ||
662 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | ||
663 | dm_oblock_t oblock, dm_cblock_t cblock) | ||
664 | { | ||
665 | struct per_bio_data *pb = get_per_bio_data(bio); | ||
666 | |||
667 | pb->cache = cache; | ||
668 | pb->cblock = cblock; | ||
669 | pb->saved_bi_end_io = bio->bi_end_io; | ||
670 | bio->bi_end_io = writethrough_endio; | ||
671 | |||
672 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | ||
673 | } | ||
674 | |||
612 | /*---------------------------------------------------------------- | 675 | /*---------------------------------------------------------------- |
613 | * Migration processing | 676 | * Migration processing |
614 | * | 677 | * |
@@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio) | |||
1002 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); | 1065 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); |
1003 | dm_block_t b; | 1066 | dm_block_t b; |
1004 | 1067 | ||
1005 | (void) sector_div(end_block, cache->discard_block_size); | 1068 | end_block = block_div(end_block, cache->discard_block_size); |
1006 | 1069 | ||
1007 | for (b = start_block; b < end_block; b++) | 1070 | for (b = start_block; b < end_block; b++) |
1008 | set_discard(cache, to_dblock(b)); | 1071 | set_discard(cache, to_dblock(b)); |
@@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1070 | inc_hit_counter(cache, bio); | 1133 | inc_hit_counter(cache, bio); |
1071 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1134 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1072 | 1135 | ||
1073 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 1136 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
1074 | /* | 1137 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
1075 | * No need to mark anything dirty in write through mode. | 1138 | else |
1076 | */ | ||
1077 | pb->req_nr == 0 ? | ||
1078 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
1079 | remap_to_origin_clear_discard(cache, bio, block); | ||
1080 | } else | ||
1081 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 1139 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
1082 | 1140 | ||
1083 | issue(cache, bio); | 1141 | issue(cache, bio); |
@@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1086 | case POLICY_MISS: | 1144 | case POLICY_MISS: |
1087 | inc_miss_counter(cache, bio); | 1145 | inc_miss_counter(cache, bio); |
1088 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1146 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1089 | 1147 | remap_to_origin_clear_discard(cache, bio, block); | |
1090 | if (pb->req_nr != 0) { | 1148 | issue(cache, bio); |
1091 | /* | ||
1092 | * This is a duplicate writethrough io that is no | ||
1093 | * longer needed because the block has been demoted. | ||
1094 | */ | ||
1095 | bio_endio(bio, 0); | ||
1096 | } else { | ||
1097 | remap_to_origin_clear_discard(cache, bio, block); | ||
1098 | issue(cache, bio); | ||
1099 | } | ||
1100 | break; | 1149 | break; |
1101 | 1150 | ||
1102 | case POLICY_NEW: | 1151 | case POLICY_NEW: |
@@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) | |||
1217 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); | 1266 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); |
1218 | } | 1267 | } |
1219 | 1268 | ||
1269 | static void process_deferred_writethrough_bios(struct cache *cache) | ||
1270 | { | ||
1271 | unsigned long flags; | ||
1272 | struct bio_list bios; | ||
1273 | struct bio *bio; | ||
1274 | |||
1275 | bio_list_init(&bios); | ||
1276 | |||
1277 | spin_lock_irqsave(&cache->lock, flags); | ||
1278 | bio_list_merge(&bios, &cache->deferred_writethrough_bios); | ||
1279 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1280 | spin_unlock_irqrestore(&cache->lock, flags); | ||
1281 | |||
1282 | while ((bio = bio_list_pop(&bios))) | ||
1283 | generic_make_request(bio); | ||
1284 | } | ||
1285 | |||
1220 | static void writeback_some_dirty_blocks(struct cache *cache) | 1286 | static void writeback_some_dirty_blocks(struct cache *cache) |
1221 | { | 1287 | { |
1222 | int r = 0; | 1288 | int r = 0; |
@@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache) | |||
1313 | else | 1379 | else |
1314 | return !bio_list_empty(&cache->deferred_bios) || | 1380 | return !bio_list_empty(&cache->deferred_bios) || |
1315 | !bio_list_empty(&cache->deferred_flush_bios) || | 1381 | !bio_list_empty(&cache->deferred_flush_bios) || |
1382 | !bio_list_empty(&cache->deferred_writethrough_bios) || | ||
1316 | !list_empty(&cache->quiesced_migrations) || | 1383 | !list_empty(&cache->quiesced_migrations) || |
1317 | !list_empty(&cache->completed_migrations) || | 1384 | !list_empty(&cache->completed_migrations) || |
1318 | !list_empty(&cache->need_commit_migrations); | 1385 | !list_empty(&cache->need_commit_migrations); |
@@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws) | |||
1331 | 1398 | ||
1332 | writeback_some_dirty_blocks(cache); | 1399 | writeback_some_dirty_blocks(cache); |
1333 | 1400 | ||
1401 | process_deferred_writethrough_bios(cache); | ||
1402 | |||
1334 | if (commit_if_needed(cache)) { | 1403 | if (commit_if_needed(cache)) { |
1335 | process_deferred_flush_bios(cache, false); | 1404 | process_deferred_flush_bios(cache, false); |
1336 | 1405 | ||
@@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, | |||
1756 | } | 1825 | } |
1757 | 1826 | ||
1758 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); | 1827 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); |
1759 | if (r) | 1828 | if (r) { |
1829 | *error = "Error setting cache policy's config values"; | ||
1760 | dm_cache_policy_destroy(cache->policy); | 1830 | dm_cache_policy_destroy(cache->policy); |
1831 | cache->policy = NULL; | ||
1832 | } | ||
1761 | 1833 | ||
1762 | return r; | 1834 | return r; |
1763 | } | 1835 | } |
@@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, | |||
1793 | 1865 | ||
1794 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) | 1866 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) |
1795 | 1867 | ||
1796 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio); | ||
1797 | |||
1798 | static int cache_create(struct cache_args *ca, struct cache **result) | 1868 | static int cache_create(struct cache_args *ca, struct cache **result) |
1799 | { | 1869 | { |
1800 | int r = 0; | 1870 | int r = 0; |
@@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1821 | 1891 | ||
1822 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1892 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1823 | 1893 | ||
1824 | if (cache->features.write_through) | ||
1825 | ti->num_write_bios = cache_num_write_bios; | ||
1826 | |||
1827 | cache->callbacks.congested_fn = cache_is_congested; | 1894 | cache->callbacks.congested_fn = cache_is_congested; |
1828 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1895 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
1829 | 1896 | ||
@@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1835 | 1902 | ||
1836 | /* FIXME: factor out this whole section */ | 1903 | /* FIXME: factor out this whole section */ |
1837 | origin_blocks = cache->origin_sectors = ca->origin_sectors; | 1904 | origin_blocks = cache->origin_sectors = ca->origin_sectors; |
1838 | (void) sector_div(origin_blocks, ca->block_size); | 1905 | origin_blocks = block_div(origin_blocks, ca->block_size); |
1839 | cache->origin_blocks = to_oblock(origin_blocks); | 1906 | cache->origin_blocks = to_oblock(origin_blocks); |
1840 | 1907 | ||
1841 | cache->sectors_per_block = ca->block_size; | 1908 | cache->sectors_per_block = ca->block_size; |
@@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1848 | dm_block_t cache_size = ca->cache_sectors; | 1915 | dm_block_t cache_size = ca->cache_sectors; |
1849 | 1916 | ||
1850 | cache->sectors_per_block_shift = -1; | 1917 | cache->sectors_per_block_shift = -1; |
1851 | (void) sector_div(cache_size, ca->block_size); | 1918 | cache_size = block_div(cache_size, ca->block_size); |
1852 | cache->cache_size = to_cblock(cache_size); | 1919 | cache->cache_size = to_cblock(cache_size); |
1853 | } else { | 1920 | } else { |
1854 | cache->sectors_per_block_shift = __ffs(ca->block_size); | 1921 | cache->sectors_per_block_shift = __ffs(ca->block_size); |
@@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1873 | spin_lock_init(&cache->lock); | 1940 | spin_lock_init(&cache->lock); |
1874 | bio_list_init(&cache->deferred_bios); | 1941 | bio_list_init(&cache->deferred_bios); |
1875 | bio_list_init(&cache->deferred_flush_bios); | 1942 | bio_list_init(&cache->deferred_flush_bios); |
1943 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1876 | INIT_LIST_HEAD(&cache->quiesced_migrations); | 1944 | INIT_LIST_HEAD(&cache->quiesced_migrations); |
1877 | INIT_LIST_HEAD(&cache->completed_migrations); | 1945 | INIT_LIST_HEAD(&cache->completed_migrations); |
1878 | INIT_LIST_HEAD(&cache->need_commit_migrations); | 1946 | INIT_LIST_HEAD(&cache->need_commit_migrations); |
@@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2002 | goto out; | 2070 | goto out; |
2003 | 2071 | ||
2004 | r = cache_create(ca, &cache); | 2072 | r = cache_create(ca, &cache); |
2073 | if (r) | ||
2074 | goto out; | ||
2005 | 2075 | ||
2006 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); | 2076 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); |
2007 | if (r) { | 2077 | if (r) { |
@@ -2016,20 +2086,6 @@ out: | |||
2016 | return r; | 2086 | return r; |
2017 | } | 2087 | } |
2018 | 2088 | ||
2019 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio) | ||
2020 | { | ||
2021 | int r; | ||
2022 | struct cache *cache = ti->private; | ||
2023 | dm_oblock_t block = get_bio_block(cache, bio); | ||
2024 | dm_cblock_t cblock; | ||
2025 | |||
2026 | r = policy_lookup(cache->policy, block, &cblock); | ||
2027 | if (r < 0) | ||
2028 | return 2; /* assume the worst */ | ||
2029 | |||
2030 | return (!r && !is_dirty(cache, cblock)) ? 2 : 1; | ||
2031 | } | ||
2032 | |||
2033 | static int cache_map(struct dm_target *ti, struct bio *bio) | 2089 | static int cache_map(struct dm_target *ti, struct bio *bio) |
2034 | { | 2090 | { |
2035 | struct cache *cache = ti->private; | 2091 | struct cache *cache = ti->private; |
@@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2097 | inc_hit_counter(cache, bio); | 2153 | inc_hit_counter(cache, bio); |
2098 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 2154 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
2099 | 2155 | ||
2100 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 2156 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
2101 | /* | 2157 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
2102 | * No need to mark anything dirty in write through mode. | 2158 | else |
2103 | */ | ||
2104 | pb->req_nr == 0 ? | ||
2105 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
2106 | remap_to_origin_clear_discard(cache, bio, block); | ||
2107 | cell_defer(cache, cell, false); | ||
2108 | } else { | ||
2109 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 2159 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
2110 | cell_defer(cache, cell, false); | 2160 | |
2111 | } | 2161 | cell_defer(cache, cell, false); |
2112 | break; | 2162 | break; |
2113 | 2163 | ||
2114 | case POLICY_MISS: | 2164 | case POLICY_MISS: |
@@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti) | |||
2319 | } | 2369 | } |
2320 | 2370 | ||
2321 | if (!cache->loaded_mappings) { | 2371 | if (!cache->loaded_mappings) { |
2322 | r = dm_cache_load_mappings(cache->cmd, | 2372 | r = dm_cache_load_mappings(cache->cmd, cache->policy, |
2323 | dm_cache_policy_get_name(cache->policy), | ||
2324 | load_mapping, cache); | 2373 | load_mapping, cache); |
2325 | if (r) { | 2374 | if (r) { |
2326 | DMERR("could not load cache mappings"); | 2375 | DMERR("could not load cache mappings"); |
@@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
2535 | 2584 | ||
2536 | static struct target_type cache_target = { | 2585 | static struct target_type cache_target = { |
2537 | .name = "cache", | 2586 | .name = "cache", |
2538 | .version = {1, 0, 0}, | 2587 | .version = {1, 1, 0}, |
2539 | .module = THIS_MODULE, | 2588 | .module = THIS_MODULE, |
2540 | .ctr = cache_ctr, | 2589 | .ctr = cache_ctr, |
2541 | .dtr = cache_dtr, | 2590 | .dtr = cache_dtr, |