aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c867
1 files changed, 678 insertions, 189 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 7755af351867..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -25,44 +25,93 @@ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
25 25
26/*----------------------------------------------------------------*/ 26/*----------------------------------------------------------------*/
27 27
28/* 28#define IOT_RESOLUTION 4
29 * Glossary:
30 *
31 * oblock: index of an origin block
32 * cblock: index of a cache block
33 * promotion: movement of a block from origin to cache
34 * demotion: movement of a block from cache to origin
35 * migration: movement of a block between the origin and cache device,
36 * either direction
37 */
38 29
39/*----------------------------------------------------------------*/ 30struct io_tracker {
31 spinlock_t lock;
32
33 /*
34 * Sectors of in-flight IO.
35 */
36 sector_t in_flight;
37
38 /*
39 * The time, in jiffies, when this device became idle (if it is
40 * indeed idle).
41 */
42 unsigned long idle_time;
43 unsigned long last_update_time;
44};
45
46static void iot_init(struct io_tracker *iot)
47{
48 spin_lock_init(&iot->lock);
49 iot->in_flight = 0ul;
50 iot->idle_time = 0ul;
51 iot->last_update_time = jiffies;
52}
53
54static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
55{
56 if (iot->in_flight)
57 return false;
58
59 return time_after(jiffies, iot->idle_time + jifs);
60}
40 61
41static size_t bitset_size_in_bytes(unsigned nr_entries) 62static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
42{ 63{
43 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); 64 bool r;
65 unsigned long flags;
66
67 spin_lock_irqsave(&iot->lock, flags);
68 r = __iot_idle_for(iot, jifs);
69 spin_unlock_irqrestore(&iot->lock, flags);
70
71 return r;
44} 72}
45 73
46static unsigned long *alloc_bitset(unsigned nr_entries) 74static void iot_io_begin(struct io_tracker *iot, sector_t len)
47{ 75{
48 size_t s = bitset_size_in_bytes(nr_entries); 76 unsigned long flags;
49 return vzalloc(s); 77
78 spin_lock_irqsave(&iot->lock, flags);
79 iot->in_flight += len;
80 spin_unlock_irqrestore(&iot->lock, flags);
50} 81}
51 82
52static void clear_bitset(void *bitset, unsigned nr_entries) 83static void __iot_io_end(struct io_tracker *iot, sector_t len)
53{ 84{
54 size_t s = bitset_size_in_bytes(nr_entries); 85 iot->in_flight -= len;
55 memset(bitset, 0, s); 86 if (!iot->in_flight)
87 iot->idle_time = jiffies;
56} 88}
57 89
58static void free_bitset(unsigned long *bits) 90static void iot_io_end(struct io_tracker *iot, sector_t len)
59{ 91{
60 vfree(bits); 92 unsigned long flags;
93
94 spin_lock_irqsave(&iot->lock, flags);
95 __iot_io_end(iot, len);
96 spin_unlock_irqrestore(&iot->lock, flags);
61} 97}
62 98
63/*----------------------------------------------------------------*/ 99/*----------------------------------------------------------------*/
64 100
65/* 101/*
102 * Glossary:
103 *
104 * oblock: index of an origin block
105 * cblock: index of a cache block
106 * promotion: movement of a block from origin to cache
107 * demotion: movement of a block from cache to origin
108 * migration: movement of a block between the origin and cache device,
109 * either direction
110 */
111
112/*----------------------------------------------------------------*/
113
114/*
66 * There are a couple of places where we let a bio run, but want to do some 115 * There are a couple of places where we let a bio run, but want to do some
67 * work before calling its endio function. We do this by temporarily 116 * work before calling its endio function. We do this by temporarily
68 * changing the endio fn. 117 * changing the endio fn.
@@ -86,12 +135,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
86{ 135{
87 bio->bi_end_io = h->bi_end_io; 136 bio->bi_end_io = h->bi_end_io;
88 bio->bi_private = h->bi_private; 137 bio->bi_private = h->bi_private;
89
90 /*
91 * Must bump bi_remaining to allow bio to complete with
92 * restored bi_end_io.
93 */
94 atomic_inc(&bio->bi_remaining);
95} 138}
96 139
97/*----------------------------------------------------------------*/ 140/*----------------------------------------------------------------*/
@@ -107,12 +150,10 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
107#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) 150#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
108#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 151#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
109 152
110/*
111 * FIXME: the cache is read/write for the time being.
112 */
113enum cache_metadata_mode { 153enum cache_metadata_mode {
114 CM_WRITE, /* metadata may be changed */ 154 CM_WRITE, /* metadata may be changed */
115 CM_READ_ONLY, /* metadata may not be changed */ 155 CM_READ_ONLY, /* metadata may not be changed */
156 CM_FAIL
116}; 157};
117 158
118enum cache_io_mode { 159enum cache_io_mode {
@@ -214,6 +255,7 @@ struct cache {
214 int sectors_per_block_shift; 255 int sectors_per_block_shift;
215 256
216 spinlock_t lock; 257 spinlock_t lock;
258 struct list_head deferred_cells;
217 struct bio_list deferred_bios; 259 struct bio_list deferred_bios;
218 struct bio_list deferred_flush_bios; 260 struct bio_list deferred_flush_bios;
219 struct bio_list deferred_writethrough_bios; 261 struct bio_list deferred_writethrough_bios;
@@ -288,6 +330,8 @@ struct cache {
288 */ 330 */
289 spinlock_t invalidation_lock; 331 spinlock_t invalidation_lock;
290 struct list_head invalidation_requests; 332 struct list_head invalidation_requests;
333
334 struct io_tracker origin_tracker;
291}; 335};
292 336
293struct per_bio_data { 337struct per_bio_data {
@@ -295,6 +339,7 @@ struct per_bio_data {
295 unsigned req_nr:2; 339 unsigned req_nr:2;
296 struct dm_deferred_entry *all_io_entry; 340 struct dm_deferred_entry *all_io_entry;
297 struct dm_hook_info hook_info; 341 struct dm_hook_info hook_info;
342 sector_t len;
298 343
299 /* 344 /*
300 * writethrough fields. These MUST remain at the end of this 345 * writethrough fields. These MUST remain at the end of this
@@ -338,6 +383,8 @@ struct prealloc {
338 struct dm_bio_prison_cell *cell2; 383 struct dm_bio_prison_cell *cell2;
339}; 384};
340 385
386static enum cache_metadata_mode get_cache_mode(struct cache *cache);
387
341static void wake_worker(struct cache *cache) 388static void wake_worker(struct cache *cache)
342{ 389{
343 queue_work(cache->wq, &cache->worker); 390 queue_work(cache->wq, &cache->worker);
@@ -371,10 +418,13 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
371 418
372static void free_migration(struct dm_cache_migration *mg) 419static void free_migration(struct dm_cache_migration *mg)
373{ 420{
374 if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) 421 struct cache *cache = mg->cache;
375 wake_up(&mg->cache->migration_wait); 422
423 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
424 wake_up(&cache->migration_wait);
376 425
377 mempool_free(mg, mg->cache->migration_pool); 426 mempool_free(mg, cache->migration_pool);
427 wake_worker(cache);
378} 428}
379 429
380static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 430static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -649,6 +699,9 @@ static void save_stats(struct cache *cache)
649{ 699{
650 struct dm_cache_statistics stats; 700 struct dm_cache_statistics stats;
651 701
702 if (get_cache_mode(cache) >= CM_READ_ONLY)
703 return;
704
652 stats.read_hits = atomic_read(&cache->stats.read_hit); 705 stats.read_hits = atomic_read(&cache->stats.read_hit);
653 stats.read_misses = atomic_read(&cache->stats.read_miss); 706 stats.read_misses = atomic_read(&cache->stats.read_miss);
654 stats.write_hits = atomic_read(&cache->stats.write_hit); 707 stats.write_hits = atomic_read(&cache->stats.write_hit);
@@ -701,6 +754,7 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
701 pb->tick = false; 754 pb->tick = false;
702 pb->req_nr = dm_bio_get_target_bio_nr(bio); 755 pb->req_nr = dm_bio_get_target_bio_nr(bio);
703 pb->all_io_entry = NULL; 756 pb->all_io_entry = NULL;
757 pb->len = 0;
704 758
705 return pb; 759 return pb;
706} 760}
@@ -798,12 +852,43 @@ static void inc_ds(struct cache *cache, struct bio *bio,
798 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); 852 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
799} 853}
800 854
855static bool accountable_bio(struct cache *cache, struct bio *bio)
856{
857 return ((bio->bi_bdev == cache->origin_dev->bdev) &&
858 !(bio->bi_rw & REQ_DISCARD));
859}
860
861static void accounted_begin(struct cache *cache, struct bio *bio)
862{
863 size_t pb_data_size = get_per_bio_data_size(cache);
864 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
865
866 if (accountable_bio(cache, bio)) {
867 pb->len = bio_sectors(bio);
868 iot_io_begin(&cache->origin_tracker, pb->len);
869 }
870}
871
872static void accounted_complete(struct cache *cache, struct bio *bio)
873{
874 size_t pb_data_size = get_per_bio_data_size(cache);
875 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
876
877 iot_io_end(&cache->origin_tracker, pb->len);
878}
879
880static void accounted_request(struct cache *cache, struct bio *bio)
881{
882 accounted_begin(cache, bio);
883 generic_make_request(bio);
884}
885
801static void issue(struct cache *cache, struct bio *bio) 886static void issue(struct cache *cache, struct bio *bio)
802{ 887{
803 unsigned long flags; 888 unsigned long flags;
804 889
805 if (!bio_triggers_commit(cache, bio)) { 890 if (!bio_triggers_commit(cache, bio)) {
806 generic_make_request(bio); 891 accounted_request(cache, bio);
807 return; 892 return;
808 } 893 }
809 894
@@ -876,6 +961,94 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
876} 961}
877 962
878/*---------------------------------------------------------------- 963/*----------------------------------------------------------------
964 * Failure modes
965 *--------------------------------------------------------------*/
966static enum cache_metadata_mode get_cache_mode(struct cache *cache)
967{
968 return cache->features.mode;
969}
970
971static const char *cache_device_name(struct cache *cache)
972{
973 return dm_device_name(dm_table_get_md(cache->ti->table));
974}
975
976static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
977{
978 const char *descs[] = {
979 "write",
980 "read-only",
981 "fail"
982 };
983
984 dm_table_event(cache->ti->table);
985 DMINFO("%s: switching cache to %s mode",
986 cache_device_name(cache), descs[(int)mode]);
987}
988
989static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
990{
991 bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
992 enum cache_metadata_mode old_mode = get_cache_mode(cache);
993
994 if (new_mode == CM_WRITE && needs_check) {
995 DMERR("%s: unable to switch cache to write mode until repaired.",
996 cache_device_name(cache));
997 if (old_mode != new_mode)
998 new_mode = old_mode;
999 else
1000 new_mode = CM_READ_ONLY;
1001 }
1002
1003 /* Never move out of fail mode */
1004 if (old_mode == CM_FAIL)
1005 new_mode = CM_FAIL;
1006
1007 switch (new_mode) {
1008 case CM_FAIL:
1009 case CM_READ_ONLY:
1010 dm_cache_metadata_set_read_only(cache->cmd);
1011 break;
1012
1013 case CM_WRITE:
1014 dm_cache_metadata_set_read_write(cache->cmd);
1015 break;
1016 }
1017
1018 cache->features.mode = new_mode;
1019
1020 if (new_mode != old_mode)
1021 notify_mode_switch(cache, new_mode);
1022}
1023
1024static void abort_transaction(struct cache *cache)
1025{
1026 const char *dev_name = cache_device_name(cache);
1027
1028 if (get_cache_mode(cache) >= CM_READ_ONLY)
1029 return;
1030
1031 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
1032 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1033 set_cache_mode(cache, CM_FAIL);
1034 }
1035
1036 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1037 if (dm_cache_metadata_abort(cache->cmd)) {
1038 DMERR("%s: failed to abort metadata transaction", dev_name);
1039 set_cache_mode(cache, CM_FAIL);
1040 }
1041}
1042
1043static void metadata_operation_failed(struct cache *cache, const char *op, int r)
1044{
1045 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1046 cache_device_name(cache), op, r);
1047 abort_transaction(cache);
1048 set_cache_mode(cache, CM_READ_ONLY);
1049}
1050
1051/*----------------------------------------------------------------
879 * Migration processing 1052 * Migration processing
880 * 1053 *
881 * Migration covers moving data from the origin device to the cache, or 1054 * Migration covers moving data from the origin device to the cache, or
@@ -891,26 +1064,63 @@ static void dec_io_migrations(struct cache *cache)
891 atomic_dec(&cache->nr_io_migrations); 1064 atomic_dec(&cache->nr_io_migrations);
892} 1065}
893 1066
894static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 1067static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
895 bool holder) 1068 bool holder, struct bio_list *bios)
896{ 1069{
897 (holder ? dm_cell_release : dm_cell_release_no_holder) 1070 (holder ? dm_cell_release : dm_cell_release_no_holder)
898 (cache->prison, cell, &cache->deferred_bios); 1071 (cache->prison, cell, bios);
899 free_prison_cell(cache, cell); 1072 free_prison_cell(cache, cell);
900} 1073}
901 1074
902static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, 1075static bool discard_or_flush(struct bio *bio)
903 bool holder) 1076{
1077 return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
1078}
1079
1080static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
1081{
1082 if (discard_or_flush(cell->holder))
1083 /*
1084 * We have to handle these bios
1085 * individually.
1086 */
1087 __cell_release(cache, cell, true, &cache->deferred_bios);
1088
1089 else
1090 list_add_tail(&cell->user_list, &cache->deferred_cells);
1091}
1092
1093static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder)
904{ 1094{
905 unsigned long flags; 1095 unsigned long flags;
906 1096
1097 if (!holder && dm_cell_promote_or_release(cache->prison, cell)) {
1098 /*
1099 * There was no prisoner to promote to holder, the
1100 * cell has been released.
1101 */
1102 free_prison_cell(cache, cell);
1103 return;
1104 }
1105
907 spin_lock_irqsave(&cache->lock, flags); 1106 spin_lock_irqsave(&cache->lock, flags);
908 __cell_defer(cache, cell, holder); 1107 __cell_defer(cache, cell);
909 spin_unlock_irqrestore(&cache->lock, flags); 1108 spin_unlock_irqrestore(&cache->lock, flags);
910 1109
911 wake_worker(cache); 1110 wake_worker(cache);
912} 1111}
913 1112
1113static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
1114{
1115 dm_cell_error(cache->prison, cell, err);
1116 dm_bio_prison_free_cell(cache->prison, cell);
1117}
1118
1119static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
1120{
1121 cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE);
1122}
1123
914static void free_io_migration(struct dm_cache_migration *mg) 1124static void free_io_migration(struct dm_cache_migration *mg)
915{ 1125{
916 dec_io_migrations(mg->cache); 1126 dec_io_migrations(mg->cache);
@@ -920,21 +1130,22 @@ static void free_io_migration(struct dm_cache_migration *mg)
920static void migration_failure(struct dm_cache_migration *mg) 1130static void migration_failure(struct dm_cache_migration *mg)
921{ 1131{
922 struct cache *cache = mg->cache; 1132 struct cache *cache = mg->cache;
1133 const char *dev_name = cache_device_name(cache);
923 1134
924 if (mg->writeback) { 1135 if (mg->writeback) {
925 DMWARN_LIMIT("writeback failed; couldn't copy block"); 1136 DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name);
926 set_dirty(cache, mg->old_oblock, mg->cblock); 1137 set_dirty(cache, mg->old_oblock, mg->cblock);
927 cell_defer(cache, mg->old_ocell, false); 1138 cell_defer(cache, mg->old_ocell, false);
928 1139
929 } else if (mg->demote) { 1140 } else if (mg->demote) {
930 DMWARN_LIMIT("demotion failed; couldn't copy block"); 1141 DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name);
931 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); 1142 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
932 1143
933 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 1144 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
934 if (mg->promote) 1145 if (mg->promote)
935 cell_defer(cache, mg->new_ocell, true); 1146 cell_defer(cache, mg->new_ocell, true);
936 } else { 1147 } else {
937 DMWARN_LIMIT("promotion failed; couldn't copy block"); 1148 DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name);
938 policy_remove_mapping(cache->policy, mg->new_oblock); 1149 policy_remove_mapping(cache->policy, mg->new_oblock);
939 cell_defer(cache, mg->new_ocell, true); 1150 cell_defer(cache, mg->new_ocell, true);
940 } 1151 }
@@ -944,6 +1155,7 @@ static void migration_failure(struct dm_cache_migration *mg)
944 1155
945static void migration_success_pre_commit(struct dm_cache_migration *mg) 1156static void migration_success_pre_commit(struct dm_cache_migration *mg)
946{ 1157{
1158 int r;
947 unsigned long flags; 1159 unsigned long flags;
948 struct cache *cache = mg->cache; 1160 struct cache *cache = mg->cache;
949 1161
@@ -954,8 +1166,11 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
954 return; 1166 return;
955 1167
956 } else if (mg->demote) { 1168 } else if (mg->demote) {
957 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { 1169 r = dm_cache_remove_mapping(cache->cmd, mg->cblock);
958 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); 1170 if (r) {
1171 DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata",
1172 cache_device_name(cache));
1173 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
959 policy_force_mapping(cache->policy, mg->new_oblock, 1174 policy_force_mapping(cache->policy, mg->new_oblock,
960 mg->old_oblock); 1175 mg->old_oblock);
961 if (mg->promote) 1176 if (mg->promote)
@@ -964,8 +1179,11 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
964 return; 1179 return;
965 } 1180 }
966 } else { 1181 } else {
967 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { 1182 r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock);
968 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); 1183 if (r) {
1184 DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata",
1185 cache_device_name(cache));
1186 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
969 policy_remove_mapping(cache->policy, mg->new_oblock); 1187 policy_remove_mapping(cache->policy, mg->new_oblock);
970 free_io_migration(mg); 1188 free_io_migration(mg);
971 return; 1189 return;
@@ -984,7 +1202,8 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
984 struct cache *cache = mg->cache; 1202 struct cache *cache = mg->cache;
985 1203
986 if (mg->writeback) { 1204 if (mg->writeback) {
987 DMWARN("writeback unexpectedly triggered commit"); 1205 DMWARN_LIMIT("%s: writeback unexpectedly triggered commit",
1206 cache_device_name(cache));
988 return; 1207 return;
989 1208
990 } else if (mg->demote) { 1209 } else if (mg->demote) {
@@ -1060,7 +1279,7 @@ static void issue_copy(struct dm_cache_migration *mg)
1060 } 1279 }
1061 1280
1062 if (r < 0) { 1281 if (r < 0) {
1063 DMERR_LIMIT("issuing migration failed"); 1282 DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache));
1064 migration_failure(mg); 1283 migration_failure(mg);
1065 } 1284 }
1066} 1285}
@@ -1099,7 +1318,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1099 * No need to inc_ds() here, since the cell will be held for the 1318 * No need to inc_ds() here, since the cell will be held for the
1100 * duration of the io. 1319 * duration of the io.
1101 */ 1320 */
1102 generic_make_request(bio); 1321 accounted_request(mg->cache, bio);
1103} 1322}
1104 1323
1105static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1324static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
@@ -1445,32 +1664,154 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
1445 &cache->stats.read_miss : &cache->stats.write_miss); 1664 &cache->stats.read_miss : &cache->stats.write_miss);
1446} 1665}
1447 1666
1448static void process_bio(struct cache *cache, struct prealloc *structs, 1667/*----------------------------------------------------------------*/
1449 struct bio *bio) 1668
1669struct inc_detail {
1670 struct cache *cache;
1671 struct bio_list bios_for_issue;
1672 struct bio_list unhandled_bios;
1673 bool any_writes;
1674};
1675
1676static void inc_fn(void *context, struct dm_bio_prison_cell *cell)
1677{
1678 struct bio *bio;
1679 struct inc_detail *detail = context;
1680 struct cache *cache = detail->cache;
1681
1682 inc_ds(cache, cell->holder, cell);
1683 if (bio_data_dir(cell->holder) == WRITE)
1684 detail->any_writes = true;
1685
1686 while ((bio = bio_list_pop(&cell->bios))) {
1687 if (discard_or_flush(bio)) {
1688 bio_list_add(&detail->unhandled_bios, bio);
1689 continue;
1690 }
1691
1692 if (bio_data_dir(bio) == WRITE)
1693 detail->any_writes = true;
1694
1695 bio_list_add(&detail->bios_for_issue, bio);
1696 inc_ds(cache, bio, cell);
1697 }
1698}
1699
1700// FIXME: refactor these two
1701static void remap_cell_to_origin_clear_discard(struct cache *cache,
1702 struct dm_bio_prison_cell *cell,
1703 dm_oblock_t oblock, bool issue_holder)
1704{
1705 struct bio *bio;
1706 unsigned long flags;
1707 struct inc_detail detail;
1708
1709 detail.cache = cache;
1710 bio_list_init(&detail.bios_for_issue);
1711 bio_list_init(&detail.unhandled_bios);
1712 detail.any_writes = false;
1713
1714 spin_lock_irqsave(&cache->lock, flags);
1715 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1716 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1717 spin_unlock_irqrestore(&cache->lock, flags);
1718
1719 remap_to_origin(cache, cell->holder);
1720 if (issue_holder)
1721 issue(cache, cell->holder);
1722 else
1723 accounted_begin(cache, cell->holder);
1724
1725 if (detail.any_writes)
1726 clear_discard(cache, oblock_to_dblock(cache, oblock));
1727
1728 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1729 remap_to_origin(cache, bio);
1730 issue(cache, bio);
1731 }
1732}
1733
1734static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
1735 dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder)
1736{
1737 struct bio *bio;
1738 unsigned long flags;
1739 struct inc_detail detail;
1740
1741 detail.cache = cache;
1742 bio_list_init(&detail.bios_for_issue);
1743 bio_list_init(&detail.unhandled_bios);
1744 detail.any_writes = false;
1745
1746 spin_lock_irqsave(&cache->lock, flags);
1747 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell);
1748 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios);
1749 spin_unlock_irqrestore(&cache->lock, flags);
1750
1751 remap_to_cache(cache, cell->holder, cblock);
1752 if (issue_holder)
1753 issue(cache, cell->holder);
1754 else
1755 accounted_begin(cache, cell->holder);
1756
1757 if (detail.any_writes) {
1758 set_dirty(cache, oblock, cblock);
1759 clear_discard(cache, oblock_to_dblock(cache, oblock));
1760 }
1761
1762 while ((bio = bio_list_pop(&detail.bios_for_issue))) {
1763 remap_to_cache(cache, bio, cblock);
1764 issue(cache, bio);
1765 }
1766}
1767
1768/*----------------------------------------------------------------*/
1769
1770struct old_oblock_lock {
1771 struct policy_locker locker;
1772 struct cache *cache;
1773 struct prealloc *structs;
1774 struct dm_bio_prison_cell *cell;
1775};
1776
1777static int null_locker(struct policy_locker *locker, dm_oblock_t b)
1778{
1779 /* This should never be called */
1780 BUG();
1781 return 0;
1782}
1783
1784static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
1785{
1786 struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
1787 struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
1788
1789 return bio_detain(l->cache, b, NULL, cell_prealloc,
1790 (cell_free_fn) prealloc_put_cell,
1791 l->structs, &l->cell);
1792}
1793
1794static void process_cell(struct cache *cache, struct prealloc *structs,
1795 struct dm_bio_prison_cell *new_ocell)
1450{ 1796{
1451 int r; 1797 int r;
1452 bool release_cell = true; 1798 bool release_cell = true;
1799 struct bio *bio = new_ocell->holder;
1453 dm_oblock_t block = get_bio_block(cache, bio); 1800 dm_oblock_t block = get_bio_block(cache, bio);
1454 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1455 struct policy_result lookup_result; 1801 struct policy_result lookup_result;
1456 bool passthrough = passthrough_mode(&cache->features); 1802 bool passthrough = passthrough_mode(&cache->features);
1457 bool discarded_block, can_migrate; 1803 bool fast_promotion, can_migrate;
1804 struct old_oblock_lock ool;
1458 1805
1459 /* 1806 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
1460 * Check to see if that block is currently migrating. 1807 can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
1461 */
1462 cell_prealloc = prealloc_get_cell(structs);
1463 r = bio_detain(cache, block, bio, cell_prealloc,
1464 (cell_free_fn) prealloc_put_cell,
1465 structs, &new_ocell);
1466 if (r > 0)
1467 return;
1468 1808
1469 discarded_block = is_discarded_oblock(cache, block); 1809 ool.locker.fn = cell_locker;
1470 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1810 ool.cache = cache;
1471 1811 ool.structs = structs;
1472 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, 1812 ool.cell = NULL;
1473 bio, &lookup_result); 1813 r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
1814 bio, &ool.locker, &lookup_result);
1474 1815
1475 if (r == -EWOULDBLOCK) 1816 if (r == -EWOULDBLOCK)
1476 /* migration has been denied */ 1817 /* migration has been denied */
@@ -1506,9 +1847,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1506 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 1847 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1507 inc_and_issue(cache, bio, new_ocell); 1848 inc_and_issue(cache, bio, new_ocell);
1508 1849
1509 } else { 1850 } else {
1510 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 1851 remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true);
1511 inc_and_issue(cache, bio, new_ocell); 1852 release_cell = false;
1512 } 1853 }
1513 } 1854 }
1514 1855
@@ -1516,8 +1857,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1516 1857
1517 case POLICY_MISS: 1858 case POLICY_MISS:
1518 inc_miss_counter(cache, bio); 1859 inc_miss_counter(cache, bio);
1519 remap_to_origin_clear_discard(cache, bio, block); 1860 remap_cell_to_origin_clear_discard(cache, new_ocell, block, true);
1520 inc_and_issue(cache, bio, new_ocell); 1861 release_cell = false;
1521 break; 1862 break;
1522 1863
1523 case POLICY_NEW: 1864 case POLICY_NEW:
@@ -1527,32 +1868,17 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1527 break; 1868 break;
1528 1869
1529 case POLICY_REPLACE: 1870 case POLICY_REPLACE:
1530 cell_prealloc = prealloc_get_cell(structs);
1531 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1532 (cell_free_fn) prealloc_put_cell,
1533 structs, &old_ocell);
1534 if (r > 0) {
1535 /*
1536 * We have to be careful to avoid lock inversion of
1537 * the cells. So we back off, and wait for the
1538 * old_ocell to become free.
1539 */
1540 policy_force_mapping(cache->policy, block,
1541 lookup_result.old_oblock);
1542 atomic_inc(&cache->stats.cache_cell_clash);
1543 break;
1544 }
1545 atomic_inc(&cache->stats.demotion); 1871 atomic_inc(&cache->stats.demotion);
1546 atomic_inc(&cache->stats.promotion); 1872 atomic_inc(&cache->stats.promotion);
1547
1548 demote_then_promote(cache, structs, lookup_result.old_oblock, 1873 demote_then_promote(cache, structs, lookup_result.old_oblock,
1549 block, lookup_result.cblock, 1874 block, lookup_result.cblock,
1550 old_ocell, new_ocell); 1875 ool.cell, new_ocell);
1551 release_cell = false; 1876 release_cell = false;
1552 break; 1877 break;
1553 1878
1554 default: 1879 default:
1555 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, 1880 DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u",
1881 cache_device_name(cache), __func__,
1556 (unsigned) lookup_result.op); 1882 (unsigned) lookup_result.op);
1557 bio_io_error(bio); 1883 bio_io_error(bio);
1558 } 1884 }
@@ -1561,10 +1887,48 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1561 cell_defer(cache, new_ocell, false); 1887 cell_defer(cache, new_ocell, false);
1562} 1888}
1563 1889
1890static void process_bio(struct cache *cache, struct prealloc *structs,
1891 struct bio *bio)
1892{
1893 int r;
1894 dm_oblock_t block = get_bio_block(cache, bio);
1895 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1896
1897 /*
1898 * Check to see if that block is currently migrating.
1899 */
1900 cell_prealloc = prealloc_get_cell(structs);
1901 r = bio_detain(cache, block, bio, cell_prealloc,
1902 (cell_free_fn) prealloc_put_cell,
1903 structs, &new_ocell);
1904 if (r > 0)
1905 return;
1906
1907 process_cell(cache, structs, new_ocell);
1908}
1909
1564static int need_commit_due_to_time(struct cache *cache) 1910static int need_commit_due_to_time(struct cache *cache)
1565{ 1911{
1566 return !time_in_range(jiffies, cache->last_commit_jiffies, 1912 return jiffies < cache->last_commit_jiffies ||
1567 cache->last_commit_jiffies + COMMIT_PERIOD); 1913 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1914}
1915
1916/*
1917 * A non-zero return indicates read_only or fail_io mode.
1918 */
1919static int commit(struct cache *cache, bool clean_shutdown)
1920{
1921 int r;
1922
1923 if (get_cache_mode(cache) >= CM_READ_ONLY)
1924 return -EINVAL;
1925
1926 atomic_inc(&cache->stats.commit_count);
1927 r = dm_cache_commit(cache->cmd, clean_shutdown);
1928 if (r)
1929 metadata_operation_failed(cache, "dm_cache_commit", r);
1930
1931 return r;
1568} 1932}
1569 1933
1570static int commit_if_needed(struct cache *cache) 1934static int commit_if_needed(struct cache *cache)
@@ -1573,9 +1937,8 @@ static int commit_if_needed(struct cache *cache)
1573 1937
1574 if ((cache->commit_requested || need_commit_due_to_time(cache)) && 1938 if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1575 dm_cache_changed_this_transaction(cache->cmd)) { 1939 dm_cache_changed_this_transaction(cache->cmd)) {
1576 atomic_inc(&cache->stats.commit_count); 1940 r = commit(cache, false);
1577 cache->commit_requested = false; 1941 cache->commit_requested = false;
1578 r = dm_cache_commit(cache->cmd, false);
1579 cache->last_commit_jiffies = jiffies; 1942 cache->last_commit_jiffies = jiffies;
1580 } 1943 }
1581 1944
@@ -1584,6 +1947,7 @@ static int commit_if_needed(struct cache *cache)
1584 1947
1585static void process_deferred_bios(struct cache *cache) 1948static void process_deferred_bios(struct cache *cache)
1586{ 1949{
1950 bool prealloc_used = false;
1587 unsigned long flags; 1951 unsigned long flags;
1588 struct bio_list bios; 1952 struct bio_list bios;
1589 struct bio *bio; 1953 struct bio *bio;
@@ -1603,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1603 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1604 * prepared mappings to process. 1968 * prepared mappings to process.
1605 */ 1969 */
1970 prealloc_used = true;
1606 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1607 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1608 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1620,7 +1985,45 @@ static void process_deferred_bios(struct cache *cache)
1620 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1621 } 1986 }
1622 1987
1623 prealloc_free_structs(cache, &structs); 1988 if (prealloc_used)
1989 prealloc_free_structs(cache, &structs);
1990}
1991
1992static void process_deferred_cells(struct cache *cache)
1993{
1994 bool prealloc_used = false;
1995 unsigned long flags;
1996 struct dm_bio_prison_cell *cell, *tmp;
1997 struct list_head cells;
1998 struct prealloc structs;
1999
2000 memset(&structs, 0, sizeof(structs));
2001
2002 INIT_LIST_HEAD(&cells);
2003
2004 spin_lock_irqsave(&cache->lock, flags);
2005 list_splice_init(&cache->deferred_cells, &cells);
2006 spin_unlock_irqrestore(&cache->lock, flags);
2007
2008 list_for_each_entry_safe(cell, tmp, &cells, user_list) {
2009 /*
2010 * If we've got no free migration structs, and processing
2011 * this bio might require one, we pause until there are some
2012 * prepared mappings to process.
2013 */
2014 prealloc_used = true;
2015 if (prealloc_data_structs(cache, &structs)) {
2016 spin_lock_irqsave(&cache->lock, flags);
2017 list_splice(&cells, &cache->deferred_cells);
2018 spin_unlock_irqrestore(&cache->lock, flags);
2019 break;
2020 }
2021
2022 process_cell(cache, &structs, cell);
2023 }
2024
2025 if (prealloc_used)
2026 prealloc_free_structs(cache, &structs);
1624} 2027}
1625 2028
1626static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 2029static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -1640,7 +2043,7 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1640 * These bios have already been through inc_ds() 2043 * These bios have already been through inc_ds()
1641 */ 2044 */
1642 while ((bio = bio_list_pop(&bios))) 2045 while ((bio = bio_list_pop(&bios)))
1643 submit_bios ? generic_make_request(bio) : bio_io_error(bio); 2046 submit_bios ? accounted_request(cache, bio) : bio_io_error(bio);
1644} 2047}
1645 2048
1646static void process_deferred_writethrough_bios(struct cache *cache) 2049static void process_deferred_writethrough_bios(struct cache *cache)
@@ -1660,29 +2063,27 @@ static void process_deferred_writethrough_bios(struct cache *cache)
1660 * These bios have already been through inc_ds() 2063 * These bios have already been through inc_ds()
1661 */ 2064 */
1662 while ((bio = bio_list_pop(&bios))) 2065 while ((bio = bio_list_pop(&bios)))
1663 generic_make_request(bio); 2066 accounted_request(cache, bio);
1664} 2067}
1665 2068
1666static void writeback_some_dirty_blocks(struct cache *cache) 2069static void writeback_some_dirty_blocks(struct cache *cache)
1667{ 2070{
1668 int r = 0; 2071 bool prealloc_used = false;
1669 dm_oblock_t oblock; 2072 dm_oblock_t oblock;
1670 dm_cblock_t cblock; 2073 dm_cblock_t cblock;
1671 struct prealloc structs; 2074 struct prealloc structs;
1672 struct dm_bio_prison_cell *old_ocell; 2075 struct dm_bio_prison_cell *old_ocell;
2076 bool busy = !iot_idle_for(&cache->origin_tracker, HZ);
1673 2077
1674 memset(&structs, 0, sizeof(structs)); 2078 memset(&structs, 0, sizeof(structs));
1675 2079
1676 while (spare_migration_bandwidth(cache)) { 2080 while (spare_migration_bandwidth(cache)) {
1677 if (prealloc_data_structs(cache, &structs)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
1678 break; 2082 break; /* no work to do */
1679 2083
1680 r = policy_writeback_work(cache->policy, &oblock, &cblock); 2084 prealloc_used = true;
1681 if (r) 2085 if (prealloc_data_structs(cache, &structs) ||
1682 break; 2086 get_cell(cache, oblock, &structs, &old_ocell)) {
1683
1684 r = get_cell(cache, oblock, &structs, &old_ocell);
1685 if (r) {
1686 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
1687 break; 2088 break;
1688 } 2089 }
@@ -1690,7 +2091,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
1690 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
1691 } 2092 }
1692 2093
1693 prealloc_free_structs(cache, &structs); 2094 if (prealloc_used)
2095 prealloc_free_structs(cache, &structs);
1694} 2096}
1695 2097
1696/*---------------------------------------------------------------- 2098/*----------------------------------------------------------------
@@ -1708,15 +2110,17 @@ static void process_invalidation_request(struct cache *cache, struct invalidatio
1708 r = policy_remove_cblock(cache->policy, to_cblock(begin)); 2110 r = policy_remove_cblock(cache->policy, to_cblock(begin));
1709 if (!r) { 2111 if (!r) {
1710 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); 2112 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1711 if (r) 2113 if (r) {
2114 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1712 break; 2115 break;
2116 }
1713 2117
1714 } else if (r == -ENODATA) { 2118 } else if (r == -ENODATA) {
1715 /* harmless, already unmapped */ 2119 /* harmless, already unmapped */
1716 r = 0; 2120 r = 0;
1717 2121
1718 } else { 2122 } else {
1719 DMERR("policy_remove_cblock failed"); 2123 DMERR("%s: policy_remove_cblock failed", cache_device_name(cache));
1720 break; 2124 break;
1721 } 2125 }
1722 2126
@@ -1789,7 +2193,22 @@ static void stop_worker(struct cache *cache)
1789 flush_workqueue(cache->wq); 2193 flush_workqueue(cache->wq);
1790} 2194}
1791 2195
1792static void requeue_deferred_io(struct cache *cache) 2196static void requeue_deferred_cells(struct cache *cache)
2197{
2198 unsigned long flags;
2199 struct list_head cells;
2200 struct dm_bio_prison_cell *cell, *tmp;
2201
2202 INIT_LIST_HEAD(&cells);
2203 spin_lock_irqsave(&cache->lock, flags);
2204 list_splice_init(&cache->deferred_cells, &cells);
2205 spin_unlock_irqrestore(&cache->lock, flags);
2206
2207 list_for_each_entry_safe(cell, tmp, &cells, user_list)
2208 cell_requeue(cache, cell);
2209}
2210
2211static void requeue_deferred_bios(struct cache *cache)
1793{ 2212{
1794 struct bio *bio; 2213 struct bio *bio;
1795 struct bio_list bios; 2214 struct bio_list bios;
@@ -1810,6 +2229,7 @@ static int more_work(struct cache *cache)
1810 !list_empty(&cache->need_commit_migrations); 2229 !list_empty(&cache->need_commit_migrations);
1811 else 2230 else
1812 return !bio_list_empty(&cache->deferred_bios) || 2231 return !bio_list_empty(&cache->deferred_bios) ||
2232 !list_empty(&cache->deferred_cells) ||
1813 !bio_list_empty(&cache->deferred_flush_bios) || 2233 !bio_list_empty(&cache->deferred_flush_bios) ||
1814 !bio_list_empty(&cache->deferred_writethrough_bios) || 2234 !bio_list_empty(&cache->deferred_writethrough_bios) ||
1815 !list_empty(&cache->quiesced_migrations) || 2235 !list_empty(&cache->quiesced_migrations) ||
@@ -1827,6 +2247,7 @@ static void do_worker(struct work_struct *ws)
1827 writeback_some_dirty_blocks(cache); 2247 writeback_some_dirty_blocks(cache);
1828 process_deferred_writethrough_bios(cache); 2248 process_deferred_writethrough_bios(cache);
1829 process_deferred_bios(cache); 2249 process_deferred_bios(cache);
2250 process_deferred_cells(cache);
1830 process_invalidation_requests(cache); 2251 process_invalidation_requests(cache);
1831 } 2252 }
1832 2253
@@ -1836,11 +2257,6 @@ static void do_worker(struct work_struct *ws)
1836 if (commit_if_needed(cache)) { 2257 if (commit_if_needed(cache)) {
1837 process_deferred_flush_bios(cache, false); 2258 process_deferred_flush_bios(cache, false);
1838 process_migrations(cache, &cache->need_commit_migrations, migration_failure); 2259 process_migrations(cache, &cache->need_commit_migrations, migration_failure);
1839
1840 /*
1841 * FIXME: rollback metadata or just go into a
1842 * failure mode and error everything
1843 */
1844 } else { 2260 } else {
1845 process_deferred_flush_bios(cache, true); 2261 process_deferred_flush_bios(cache, true);
1846 process_migrations(cache, &cache->need_commit_migrations, 2262 process_migrations(cache, &cache->need_commit_migrations,
@@ -1859,7 +2275,7 @@ static void do_worker(struct work_struct *ws)
1859static void do_waker(struct work_struct *ws) 2275static void do_waker(struct work_struct *ws)
1860{ 2276{
1861 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 2277 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1862 policy_tick(cache->policy); 2278 policy_tick(cache->policy, true);
1863 wake_worker(cache); 2279 wake_worker(cache);
1864 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 2280 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1865} 2281}
@@ -2413,6 +2829,12 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2413 goto bad; 2829 goto bad;
2414 } 2830 }
2415 cache->cmd = cmd; 2831 cache->cmd = cmd;
2832 set_cache_mode(cache, CM_WRITE);
2833 if (get_cache_mode(cache) != CM_WRITE) {
2834 *error = "Unable to get write access to metadata, please check/repair metadata.";
2835 r = -EINVAL;
2836 goto bad;
2837 }
2416 2838
2417 if (passthrough_mode(&cache->features)) { 2839 if (passthrough_mode(&cache->features)) {
2418 bool all_clean; 2840 bool all_clean;
@@ -2431,6 +2853,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2431 } 2853 }
2432 2854
2433 spin_lock_init(&cache->lock); 2855 spin_lock_init(&cache->lock);
2856 INIT_LIST_HEAD(&cache->deferred_cells);
2434 bio_list_init(&cache->deferred_bios); 2857 bio_list_init(&cache->deferred_bios);
2435 bio_list_init(&cache->deferred_flush_bios); 2858 bio_list_init(&cache->deferred_flush_bios);
2436 bio_list_init(&cache->deferred_writethrough_bios); 2859 bio_list_init(&cache->deferred_writethrough_bios);
@@ -2520,6 +2943,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2520 spin_lock_init(&cache->invalidation_lock); 2943 spin_lock_init(&cache->invalidation_lock);
2521 INIT_LIST_HEAD(&cache->invalidation_requests); 2944 INIT_LIST_HEAD(&cache->invalidation_requests);
2522 2945
2946 iot_init(&cache->origin_tracker);
2947
2523 *result = cache; 2948 *result = cache;
2524 return 0; 2949 return 0;
2525 2950
@@ -2586,15 +3011,23 @@ out:
2586 return r; 3011 return r;
2587} 3012}
2588 3013
2589static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) 3014/*----------------------------------------------------------------*/
3015
3016static int cache_map(struct dm_target *ti, struct bio *bio)
2590{ 3017{
3018 struct cache *cache = ti->private;
3019
2591 int r; 3020 int r;
3021 struct dm_bio_prison_cell *cell = NULL;
2592 dm_oblock_t block = get_bio_block(cache, bio); 3022 dm_oblock_t block = get_bio_block(cache, bio);
2593 size_t pb_data_size = get_per_bio_data_size(cache); 3023 size_t pb_data_size = get_per_bio_data_size(cache);
2594 bool can_migrate = false; 3024 bool can_migrate = false;
2595 bool discarded_block; 3025 bool fast_promotion;
2596 struct policy_result lookup_result; 3026 struct policy_result lookup_result;
2597 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); 3027 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
3028 struct old_oblock_lock ool;
3029
3030 ool.locker.fn = null_locker;
2598 3031
2599 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { 3032 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2600 /* 3033 /*
@@ -2603,10 +3036,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2603 * Just remap to the origin and carry on. 3036 * Just remap to the origin and carry on.
2604 */ 3037 */
2605 remap_to_origin(cache, bio); 3038 remap_to_origin(cache, bio);
3039 accounted_begin(cache, bio);
2606 return DM_MAPIO_REMAPPED; 3040 return DM_MAPIO_REMAPPED;
2607 } 3041 }
2608 3042
2609 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 3043 if (discard_or_flush(bio)) {
2610 defer_bio(cache, bio); 3044 defer_bio(cache, bio);
2611 return DM_MAPIO_SUBMITTED; 3045 return DM_MAPIO_SUBMITTED;
2612 } 3046 }
@@ -2614,15 +3048,15 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2614 /* 3048 /*
2615 * Check to see if that block is currently migrating. 3049 * Check to see if that block is currently migrating.
2616 */ 3050 */
2617 *cell = alloc_prison_cell(cache); 3051 cell = alloc_prison_cell(cache);
2618 if (!*cell) { 3052 if (!cell) {
2619 defer_bio(cache, bio); 3053 defer_bio(cache, bio);
2620 return DM_MAPIO_SUBMITTED; 3054 return DM_MAPIO_SUBMITTED;
2621 } 3055 }
2622 3056
2623 r = bio_detain(cache, block, bio, *cell, 3057 r = bio_detain(cache, block, bio, cell,
2624 (cell_free_fn) free_prison_cell, 3058 (cell_free_fn) free_prison_cell,
2625 cache, cell); 3059 cache, &cell);
2626 if (r) { 3060 if (r) {
2627 if (r < 0) 3061 if (r < 0)
2628 defer_bio(cache, bio); 3062 defer_bio(cache, bio);
@@ -2630,17 +3064,18 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2630 return DM_MAPIO_SUBMITTED; 3064 return DM_MAPIO_SUBMITTED;
2631 } 3065 }
2632 3066
2633 discarded_block = is_discarded_oblock(cache, block); 3067 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
2634 3068
2635 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, 3069 r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
2636 bio, &lookup_result); 3070 bio, &ool.locker, &lookup_result);
2637 if (r == -EWOULDBLOCK) { 3071 if (r == -EWOULDBLOCK) {
2638 cell_defer(cache, *cell, true); 3072 cell_defer(cache, cell, true);
2639 return DM_MAPIO_SUBMITTED; 3073 return DM_MAPIO_SUBMITTED;
2640 3074
2641 } else if (r) { 3075 } else if (r) {
2642 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 3076 DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d",
2643 cell_defer(cache, *cell, false); 3077 cache_device_name(cache), r);
3078 cell_defer(cache, cell, false);
2644 bio_io_error(bio); 3079 bio_io_error(bio);
2645 return DM_MAPIO_SUBMITTED; 3080 return DM_MAPIO_SUBMITTED;
2646 } 3081 }
@@ -2654,21 +3089,30 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2654 * We need to invalidate this block, so 3089 * We need to invalidate this block, so
2655 * defer for the worker thread. 3090 * defer for the worker thread.
2656 */ 3091 */
2657 cell_defer(cache, *cell, true); 3092 cell_defer(cache, cell, true);
2658 r = DM_MAPIO_SUBMITTED; 3093 r = DM_MAPIO_SUBMITTED;
2659 3094
2660 } else { 3095 } else {
2661 inc_miss_counter(cache, bio); 3096 inc_miss_counter(cache, bio);
2662 remap_to_origin_clear_discard(cache, bio, block); 3097 remap_to_origin_clear_discard(cache, bio, block);
3098 accounted_begin(cache, bio);
3099 inc_ds(cache, bio, cell);
3100 // FIXME: we want to remap hits or misses straight
3101 // away rather than passing over to the worker.
3102 cell_defer(cache, cell, false);
2663 } 3103 }
2664 3104
2665 } else { 3105 } else {
2666 inc_hit_counter(cache, bio); 3106 inc_hit_counter(cache, bio);
2667 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 3107 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2668 !is_dirty(cache, lookup_result.cblock)) 3108 !is_dirty(cache, lookup_result.cblock)) {
2669 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); 3109 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2670 else 3110 accounted_begin(cache, bio);
2671 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); 3111 inc_ds(cache, bio, cell);
3112 cell_defer(cache, cell, false);
3113
3114 } else
3115 remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false);
2672 } 3116 }
2673 break; 3117 break;
2674 3118
@@ -2680,18 +3124,19 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2680 * longer needed because the block has been demoted. 3124 * longer needed because the block has been demoted.
2681 */ 3125 */
2682 bio_endio(bio, 0); 3126 bio_endio(bio, 0);
2683 cell_defer(cache, *cell, false); 3127 // FIXME: remap everything as a miss
3128 cell_defer(cache, cell, false);
2684 r = DM_MAPIO_SUBMITTED; 3129 r = DM_MAPIO_SUBMITTED;
2685 3130
2686 } else 3131 } else
2687 remap_to_origin_clear_discard(cache, bio, block); 3132 remap_cell_to_origin_clear_discard(cache, cell, block, false);
2688
2689 break; 3133 break;
2690 3134
2691 default: 3135 default:
2692 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 3136 DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u",
3137 cache_device_name(cache), __func__,
2693 (unsigned) lookup_result.op); 3138 (unsigned) lookup_result.op);
2694 cell_defer(cache, *cell, false); 3139 cell_defer(cache, cell, false);
2695 bio_io_error(bio); 3140 bio_io_error(bio);
2696 r = DM_MAPIO_SUBMITTED; 3141 r = DM_MAPIO_SUBMITTED;
2697 } 3142 }
@@ -2699,21 +3144,6 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2699 return r; 3144 return r;
2700} 3145}
2701 3146
2702static int cache_map(struct dm_target *ti, struct bio *bio)
2703{
2704 int r;
2705 struct dm_bio_prison_cell *cell = NULL;
2706 struct cache *cache = ti->private;
2707
2708 r = __cache_map(cache, bio, &cell);
2709 if (r == DM_MAPIO_REMAPPED && cell) {
2710 inc_ds(cache, bio, cell);
2711 cell_defer(cache, cell, false);
2712 }
2713
2714 return r;
2715}
2716
2717static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) 3147static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2718{ 3148{
2719 struct cache *cache = ti->private; 3149 struct cache *cache = ti->private;
@@ -2722,7 +3152,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2722 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 3152 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2723 3153
2724 if (pb->tick) { 3154 if (pb->tick) {
2725 policy_tick(cache->policy); 3155 policy_tick(cache->policy, false);
2726 3156
2727 spin_lock_irqsave(&cache->lock, flags); 3157 spin_lock_irqsave(&cache->lock, flags);
2728 cache->need_tick_bio = true; 3158 cache->need_tick_bio = true;
@@ -2730,6 +3160,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2730 } 3160 }
2731 3161
2732 check_for_quiesced_migrations(cache, pb); 3162 check_for_quiesced_migrations(cache, pb);
3163 accounted_complete(cache, bio);
2733 3164
2734 return 0; 3165 return 0;
2735} 3166}
@@ -2738,11 +3169,16 @@ static int write_dirty_bitset(struct cache *cache)
2738{ 3169{
2739 unsigned i, r; 3170 unsigned i, r;
2740 3171
3172 if (get_cache_mode(cache) >= CM_READ_ONLY)
3173 return -EINVAL;
3174
2741 for (i = 0; i < from_cblock(cache->cache_size); i++) { 3175 for (i = 0; i < from_cblock(cache->cache_size); i++) {
2742 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), 3176 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2743 is_dirty(cache, to_cblock(i))); 3177 is_dirty(cache, to_cblock(i)));
2744 if (r) 3178 if (r) {
3179 metadata_operation_failed(cache, "dm_cache_set_dirty", r);
2745 return r; 3180 return r;
3181 }
2746 } 3182 }
2747 3183
2748 return 0; 3184 return 0;
@@ -2752,18 +3188,40 @@ static int write_discard_bitset(struct cache *cache)
2752{ 3188{
2753 unsigned i, r; 3189 unsigned i, r;
2754 3190
3191 if (get_cache_mode(cache) >= CM_READ_ONLY)
3192 return -EINVAL;
3193
2755 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 3194 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2756 cache->discard_nr_blocks); 3195 cache->discard_nr_blocks);
2757 if (r) { 3196 if (r) {
2758 DMERR("could not resize on-disk discard bitset"); 3197 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
3198 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2759 return r; 3199 return r;
2760 } 3200 }
2761 3201
2762 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { 3202 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2763 r = dm_cache_set_discard(cache->cmd, to_dblock(i), 3203 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2764 is_discarded(cache, to_dblock(i))); 3204 is_discarded(cache, to_dblock(i)));
2765 if (r) 3205 if (r) {
3206 metadata_operation_failed(cache, "dm_cache_set_discard", r);
2766 return r; 3207 return r;
3208 }
3209 }
3210
3211 return 0;
3212}
3213
3214static int write_hints(struct cache *cache)
3215{
3216 int r;
3217
3218 if (get_cache_mode(cache) >= CM_READ_ONLY)
3219 return -EINVAL;
3220
3221 r = dm_cache_write_hints(cache->cmd, cache->policy);
3222 if (r) {
3223 metadata_operation_failed(cache, "dm_cache_write_hints", r);
3224 return r;
2767 } 3225 }
2768 3226
2769 return 0; 3227 return 0;
@@ -2778,26 +3236,26 @@ static bool sync_metadata(struct cache *cache)
2778 3236
2779 r1 = write_dirty_bitset(cache); 3237 r1 = write_dirty_bitset(cache);
2780 if (r1) 3238 if (r1)
2781 DMERR("could not write dirty bitset"); 3239 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2782 3240
2783 r2 = write_discard_bitset(cache); 3241 r2 = write_discard_bitset(cache);
2784 if (r2) 3242 if (r2)
2785 DMERR("could not write discard bitset"); 3243 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2786 3244
2787 save_stats(cache); 3245 save_stats(cache);
2788 3246
2789 r3 = dm_cache_write_hints(cache->cmd, cache->policy); 3247 r3 = write_hints(cache);
2790 if (r3) 3248 if (r3)
2791 DMERR("could not write hints"); 3249 DMERR("%s: could not write hints", cache_device_name(cache));
2792 3250
2793 /* 3251 /*
2794 * If writing the above metadata failed, we still commit, but don't 3252 * If writing the above metadata failed, we still commit, but don't
2795 * set the clean shutdown flag. This will effectively force every 3253 * set the clean shutdown flag. This will effectively force every
2796 * dirty bit to be set on reload. 3254 * dirty bit to be set on reload.
2797 */ 3255 */
2798 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); 3256 r4 = commit(cache, !r1 && !r2 && !r3);
2799 if (r4) 3257 if (r4)
2800 DMERR("could not write cache metadata. Data loss may occur."); 3258 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2801 3259
2802 return !r1 && !r2 && !r3 && !r4; 3260 return !r1 && !r2 && !r3 && !r4;
2803} 3261}
@@ -2809,10 +3267,12 @@ static void cache_postsuspend(struct dm_target *ti)
2809 start_quiescing(cache); 3267 start_quiescing(cache);
2810 wait_for_migrations(cache); 3268 wait_for_migrations(cache);
2811 stop_worker(cache); 3269 stop_worker(cache);
2812 requeue_deferred_io(cache); 3270 requeue_deferred_bios(cache);
3271 requeue_deferred_cells(cache);
2813 stop_quiescing(cache); 3272 stop_quiescing(cache);
2814 3273
2815 (void) sync_metadata(cache); 3274 if (get_cache_mode(cache) == CM_WRITE)
3275 (void) sync_metadata(cache);
2816} 3276}
2817 3277
2818static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, 3278static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
@@ -2935,7 +3395,8 @@ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2935 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { 3395 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2936 new_size = to_cblock(from_cblock(new_size) + 1); 3396 new_size = to_cblock(from_cblock(new_size) + 1);
2937 if (is_dirty(cache, new_size)) { 3397 if (is_dirty(cache, new_size)) {
2938 DMERR("unable to shrink cache; cache block %llu is dirty", 3398 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
3399 cache_device_name(cache),
2939 (unsigned long long) from_cblock(new_size)); 3400 (unsigned long long) from_cblock(new_size));
2940 return false; 3401 return false;
2941 } 3402 }
@@ -2950,7 +3411,8 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2950 3411
2951 r = dm_cache_resize(cache->cmd, new_size); 3412 r = dm_cache_resize(cache->cmd, new_size);
2952 if (r) { 3413 if (r) {
2953 DMERR("could not resize cache metadata"); 3414 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
3415 metadata_operation_failed(cache, "dm_cache_resize", r);
2954 return r; 3416 return r;
2955 } 3417 }
2956 3418
@@ -2988,7 +3450,8 @@ static int cache_preresume(struct dm_target *ti)
2988 r = dm_cache_load_mappings(cache->cmd, cache->policy, 3450 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2989 load_mapping, cache); 3451 load_mapping, cache);
2990 if (r) { 3452 if (r) {
2991 DMERR("could not load cache mappings"); 3453 DMERR("%s: could not load cache mappings", cache_device_name(cache));
3454 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
2992 return r; 3455 return r;
2993 } 3456 }
2994 3457
@@ -3008,7 +3471,8 @@ static int cache_preresume(struct dm_target *ti)
3008 discard_load_info_init(cache, &li); 3471 discard_load_info_init(cache, &li);
3009 r = dm_cache_load_discards(cache->cmd, load_discard, &li); 3472 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
3010 if (r) { 3473 if (r) {
3011 DMERR("could not load origin discards"); 3474 DMERR("%s: could not load origin discards", cache_device_name(cache));
3475 metadata_operation_failed(cache, "dm_cache_load_discards", r);
3012 return r; 3476 return r;
3013 } 3477 }
3014 set_discard_range(&li); 3478 set_discard_range(&li);
@@ -3036,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
3036 * <#demotions> <#promotions> <#dirty> 3500 * <#demotions> <#promotions> <#dirty>
3037 * <#features> <features>* 3501 * <#features> <features>*
3038 * <#core args> <core args> 3502 * <#core args> <core args>
3039 * <policy name> <#policy args> <policy args>* 3503 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3040 */ 3504 */
3041static void cache_status(struct dm_target *ti, status_type_t type, 3505static void cache_status(struct dm_target *ti, status_type_t type,
3042 unsigned status_flags, char *result, unsigned maxlen) 3506 unsigned status_flags, char *result, unsigned maxlen)
@@ -3052,23 +3516,26 @@ static void cache_status(struct dm_target *ti, status_type_t type,
3052 3516
3053 switch (type) { 3517 switch (type) {
3054 case STATUSTYPE_INFO: 3518 case STATUSTYPE_INFO:
3055 /* Commit to ensure statistics aren't out-of-date */ 3519 if (get_cache_mode(cache) == CM_FAIL) {
3056 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) { 3520 DMEMIT("Fail");
3057 r = dm_cache_commit(cache->cmd, false); 3521 break;
3058 if (r)
3059 DMERR("could not commit metadata for accurate status");
3060 } 3522 }
3061 3523
3062 r = dm_cache_get_free_metadata_block_count(cache->cmd, 3524 /* Commit to ensure statistics aren't out-of-date */
3063 &nr_free_blocks_metadata); 3525 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3526 (void) commit(cache, false);
3527
3528 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3064 if (r) { 3529 if (r) {
3065 DMERR("could not get metadata free block count"); 3530 DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3531 cache_device_name(cache), r);
3066 goto err; 3532 goto err;
3067 } 3533 }
3068 3534
3069 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); 3535 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3070 if (r) { 3536 if (r) {
3071 DMERR("could not get metadata device size"); 3537 DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3538 cache_device_name(cache), r);
3072 goto err; 3539 goto err;
3073 } 3540 }
3074 3541
@@ -3099,7 +3566,8 @@ static void cache_status(struct dm_target *ti, status_type_t type,
3099 DMEMIT("1 writeback "); 3566 DMEMIT("1 writeback ");
3100 3567
3101 else { 3568 else {
3102 DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); 3569 DMERR("%s: internal error: unknown io mode: %d",
3570 cache_device_name(cache), (int) cache->features.io_mode);
3103 goto err; 3571 goto err;
3104 } 3572 }
3105 3573
@@ -3107,11 +3575,22 @@ static void cache_status(struct dm_target *ti, status_type_t type,
3107 3575
3108 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); 3576 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3109 if (sz < maxlen) { 3577 if (sz < maxlen) {
3110 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); 3578 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3111 if (r) 3579 if (r)
3112 DMERR("policy_emit_config_values returned %d", r); 3580 DMERR("%s: policy_emit_config_values returned %d",
3581 cache_device_name(cache), r);
3113 } 3582 }
3114 3583
3584 if (get_cache_mode(cache) == CM_READ_ONLY)
3585 DMEMIT("ro ");
3586 else
3587 DMEMIT("rw ");
3588
3589 if (dm_cache_metadata_needs_check(cache->cmd))
3590 DMEMIT("needs_check ");
3591 else
3592 DMEMIT("- ");
3593
3115 break; 3594 break;
3116 3595
3117 case STATUSTYPE_TABLE: 3596 case STATUSTYPE_TABLE:
@@ -3173,7 +3652,7 @@ static int parse_cblock_range(struct cache *cache, const char *str,
3173 return 0; 3652 return 0;
3174 } 3653 }
3175 3654
3176 DMERR("invalid cblock range '%s'", str); 3655 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3177 return -EINVAL; 3656 return -EINVAL;
3178} 3657}
3179 3658
@@ -3184,17 +3663,20 @@ static int validate_cblock_range(struct cache *cache, struct cblock_range *range
3184 uint64_t n = from_cblock(cache->cache_size); 3663 uint64_t n = from_cblock(cache->cache_size);
3185 3664
3186 if (b >= n) { 3665 if (b >= n) {
3187 DMERR("begin cblock out of range: %llu >= %llu", b, n); 3666 DMERR("%s: begin cblock out of range: %llu >= %llu",
3667 cache_device_name(cache), b, n);
3188 return -EINVAL; 3668 return -EINVAL;
3189 } 3669 }
3190 3670
3191 if (e > n) { 3671 if (e > n) {
3192 DMERR("end cblock out of range: %llu > %llu", e, n); 3672 DMERR("%s: end cblock out of range: %llu > %llu",
3673 cache_device_name(cache), e, n);
3193 return -EINVAL; 3674 return -EINVAL;
3194 } 3675 }
3195 3676
3196 if (b >= e) { 3677 if (b >= e) {
3197 DMERR("invalid cblock range: %llu >= %llu", b, e); 3678 DMERR("%s: invalid cblock range: %llu >= %llu",
3679 cache_device_name(cache), b, e);
3198 return -EINVAL; 3680 return -EINVAL;
3199 } 3681 }
3200 3682
@@ -3228,7 +3710,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
3228 struct cblock_range range; 3710 struct cblock_range range;
3229 3711
3230 if (!passthrough_mode(&cache->features)) { 3712 if (!passthrough_mode(&cache->features)) {
3231 DMERR("cache has to be in passthrough mode for invalidation"); 3713 DMERR("%s: cache has to be in passthrough mode for invalidation",
3714 cache_device_name(cache));
3232 return -EPERM; 3715 return -EPERM;
3233 } 3716 }
3234 3717
@@ -3267,6 +3750,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
3267 if (!argc) 3750 if (!argc)
3268 return -EINVAL; 3751 return -EINVAL;
3269 3752
3753 if (get_cache_mode(cache) >= CM_READ_ONLY) {
3754 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3755 cache_device_name(cache));
3756 return -EOPNOTSUPP;
3757 }
3758
3270 if (!strcasecmp(argv[0], "invalidate_cblocks")) 3759 if (!strcasecmp(argv[0], "invalidate_cblocks"))
3271 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); 3760 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3272 3761
@@ -3340,7 +3829,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3340 3829
3341static struct target_type cache_target = { 3830static struct target_type cache_target = {
3342 .name = "cache", 3831 .name = "cache",
3343 .version = {1, 6, 0}, 3832 .version = {1, 8, 0},
3344 .module = THIS_MODULE, 3833 .module = THIS_MODULE,
3345 .ctr = cache_ctr, 3834 .ctr = cache_ctr,
3346 .dtr = cache_dtr, 3835 .dtr = cache_dtr,