aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c65
1 files changed, 55 insertions, 10 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index dadd9696340c..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
257 257
258 spinlock_t lock; 258 spinlock_t lock;
259 struct bio_list deferred_flush_bios; 259 struct bio_list deferred_flush_bios;
260 struct bio_list deferred_flush_completions;
260 struct list_head prepared_mappings; 261 struct list_head prepared_mappings;
261 struct list_head prepared_discards; 262 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2; 263 struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
956 mempool_free(m, &m->tc->pool->mapping_pool); 957 mempool_free(m, &m->tc->pool->mapping_pool);
957} 958}
958 959
960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961{
962 struct pool *pool = tc->pool;
963 unsigned long flags;
964
965 /*
966 * If the bio has the REQ_FUA flag set we must commit the metadata
967 * before signaling its completion.
968 */
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
971 return;
972 }
973
974 /*
975 * Complete bio with an error if earlier I/O caused changes to the
976 * metadata that can't be committed, e.g, due to I/O errors on the
977 * metadata device.
978 */
979 if (dm_thin_aborted_changes(tc->td)) {
980 bio_io_error(bio);
981 return;
982 }
983
984 /*
985 * Batch together any bios that trigger commits and then issue a
986 * single commit for them in process_deferred_bios().
987 */
988 spin_lock_irqsave(&pool->lock, flags);
989 bio_list_add(&pool->deferred_flush_completions, bio);
990 spin_unlock_irqrestore(&pool->lock, flags);
991}
992
959static void process_prepared_mapping(struct dm_thin_new_mapping *m) 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
960{ 994{
961 struct thin_c *tc = m->tc; 995 struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
988 */ 1022 */
989 if (bio) { 1023 if (bio) {
990 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991 bio_endio(bio); 1025 complete_overwrite_bio(tc, bio);
992 } else { 1026 } else {
993 inc_all_io_entry(tc->pool, m->cell->holder); 1027 inc_all_io_entry(tc->pool, m->cell->holder);
994 remap_and_issue(tc, m->cell->holder, m->data_block); 1028 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1048,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1048 * passdown we have to check that these blocks are now unused. 1082 * passdown we have to check that these blocks are now unused.
1049 */ 1083 */
1050 int r = 0; 1084 int r = 0;
1051 bool used = true; 1085 bool shared = true;
1052 struct thin_c *tc = m->tc; 1086 struct thin_c *tc = m->tc;
1053 struct pool *pool = tc->pool; 1087 struct pool *pool = tc->pool;
1054 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1088 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1058 while (b != end) { 1092 while (b != end) {
1059 /* find start of unmapped run */ 1093 /* find start of unmapped run */
1060 for (; b < end; b++) { 1094 for (; b < end; b++) {
1061 r = dm_pool_block_is_used(pool->pmd, b, &used); 1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1062 if (r) 1096 if (r)
1063 goto out; 1097 goto out;
1064 1098
1065 if (!used) 1099 if (!shared)
1066 break; 1100 break;
1067 } 1101 }
1068 1102
@@ -1071,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1071 1105
1072 /* find end of run */ 1106 /* find end of run */
1073 for (e = b + 1; e != end; e++) { 1107 for (e = b + 1; e != end; e++) {
1074 r = dm_pool_block_is_used(pool->pmd, e, &used); 1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1075 if (r) 1109 if (r)
1076 goto out; 1110 goto out;
1077 1111
1078 if (used) 1112 if (shared)
1079 break; 1113 break;
1080 } 1114 }
1081 1115
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2317{ 2351{
2318 unsigned long flags; 2352 unsigned long flags;
2319 struct bio *bio; 2353 struct bio *bio;
2320 struct bio_list bios; 2354 struct bio_list bios, bio_completions;
2321 struct thin_c *tc; 2355 struct thin_c *tc;
2322 2356
2323 tc = get_first_thin(pool); 2357 tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2328 } 2362 }
2329 2363
2330 /* 2364 /*
2331 * If there are any deferred flush bios, we must commit 2365 * If there are any deferred flush bios, we must commit the metadata
2332 * the metadata before issuing them. 2366 * before issuing them or signaling their completion.
2333 */ 2367 */
2334 bio_list_init(&bios); 2368 bio_list_init(&bios);
2369 bio_list_init(&bio_completions);
2370
2335 spin_lock_irqsave(&pool->lock, flags); 2371 spin_lock_irqsave(&pool->lock, flags);
2336 bio_list_merge(&bios, &pool->deferred_flush_bios); 2372 bio_list_merge(&bios, &pool->deferred_flush_bios);
2337 bio_list_init(&pool->deferred_flush_bios); 2373 bio_list_init(&pool->deferred_flush_bios);
2374
2375 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376 bio_list_init(&pool->deferred_flush_completions);
2338 spin_unlock_irqrestore(&pool->lock, flags); 2377 spin_unlock_irqrestore(&pool->lock, flags);
2339 2378
2340 if (bio_list_empty(&bios) && 2379 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2341 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2380 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2342 return; 2381 return;
2343 2382
2344 if (commit(pool)) { 2383 if (commit(pool)) {
2384 bio_list_merge(&bios, &bio_completions);
2385
2345 while ((bio = bio_list_pop(&bios))) 2386 while ((bio = bio_list_pop(&bios)))
2346 bio_io_error(bio); 2387 bio_io_error(bio);
2347 return; 2388 return;
2348 } 2389 }
2349 pool->last_commit_jiffies = jiffies; 2390 pool->last_commit_jiffies = jiffies;
2350 2391
2392 while ((bio = bio_list_pop(&bio_completions)))
2393 bio_endio(bio);
2394
2351 while ((bio = bio_list_pop(&bios))) 2395 while ((bio = bio_list_pop(&bios)))
2352 generic_make_request(bio); 2396 generic_make_request(bio);
2353} 2397}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2954 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2998 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2955 spin_lock_init(&pool->lock); 2999 spin_lock_init(&pool->lock);
2956 bio_list_init(&pool->deferred_flush_bios); 3000 bio_list_init(&pool->deferred_flush_bios);
3001 bio_list_init(&pool->deferred_flush_completions);
2957 INIT_LIST_HEAD(&pool->prepared_mappings); 3002 INIT_LIST_HEAD(&pool->prepared_mappings);
2958 INIT_LIST_HEAD(&pool->prepared_discards); 3003 INIT_LIST_HEAD(&pool->prepared_discards);
2959 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 3004 INIT_LIST_HEAD(&pool->prepared_discards_pt2);