aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-02-15 11:50:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-02-15 11:50:48 -0500
commitae3fa8bd73c9b64416816ec0e9951cd8695c9854 (patch)
tree1b293b8c193c7da65ecd3b1904cad4164d8d6182
parentdfeae3379836fd0609488e73f4df260af4af2672 (diff)
parent4ae280b4ee3463fa57bbe6eede26b97daff8a0f1 (diff)
Merge tag 'for-5.0/dm-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: - Fix bug in DM crypt's sizing of its block integrity tag space, resulting in less memory use when DM crypt layers on DM integrity. - Fix a long-standing DM thinp crash consistency bug that was due to improper handling of FUA. This issue is specific to writes that fill an entire thinp block which needs to be allocated. * tag 'for-5.0/dm-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm thin: fix bug where bio that overwrites thin block ignores FUA dm crypt: don't overallocate the integrity tag space
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-thin.c55
2 files changed, 51 insertions, 6 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 47d4e0d30bf0..dd538e6b2748 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
932 if (IS_ERR(bip)) 932 if (IS_ERR(bip))
933 return PTR_ERR(bip); 933 return PTR_ERR(bip);
934 934
935 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); 935 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
936 936
937 bip->bip_iter.bi_size = tag_len; 937 bip->bip_iter.bi_size = tag_len;
938 bip->bip_iter.bi_sector = io->cc->start + io->sector; 938 bip->bip_iter.bi_sector = io->cc->start + io->sector;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ca8af21bf644..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
257 257
258 spinlock_t lock; 258 spinlock_t lock;
259 struct bio_list deferred_flush_bios; 259 struct bio_list deferred_flush_bios;
260 struct bio_list deferred_flush_completions;
260 struct list_head prepared_mappings; 261 struct list_head prepared_mappings;
261 struct list_head prepared_discards; 262 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2; 263 struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
956 mempool_free(m, &m->tc->pool->mapping_pool); 957 mempool_free(m, &m->tc->pool->mapping_pool);
957} 958}
958 959
960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961{
962 struct pool *pool = tc->pool;
963 unsigned long flags;
964
965 /*
966 * If the bio has the REQ_FUA flag set we must commit the metadata
967 * before signaling its completion.
968 */
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
971 return;
972 }
973
974 /*
975 * Complete bio with an error if earlier I/O caused changes to the
976 * metadata that can't be committed, e.g, due to I/O errors on the
977 * metadata device.
978 */
979 if (dm_thin_aborted_changes(tc->td)) {
980 bio_io_error(bio);
981 return;
982 }
983
984 /*
985 * Batch together any bios that trigger commits and then issue a
986 * single commit for them in process_deferred_bios().
987 */
988 spin_lock_irqsave(&pool->lock, flags);
989 bio_list_add(&pool->deferred_flush_completions, bio);
990 spin_unlock_irqrestore(&pool->lock, flags);
991}
992
959static void process_prepared_mapping(struct dm_thin_new_mapping *m) 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
960{ 994{
961 struct thin_c *tc = m->tc; 995 struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
988 */ 1022 */
989 if (bio) { 1023 if (bio) {
990 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991 bio_endio(bio); 1025 complete_overwrite_bio(tc, bio);
992 } else { 1026 } else {
993 inc_all_io_entry(tc->pool, m->cell->holder); 1027 inc_all_io_entry(tc->pool, m->cell->holder);
994 remap_and_issue(tc, m->cell->holder, m->data_block); 1028 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2317{ 2351{
2318 unsigned long flags; 2352 unsigned long flags;
2319 struct bio *bio; 2353 struct bio *bio;
2320 struct bio_list bios; 2354 struct bio_list bios, bio_completions;
2321 struct thin_c *tc; 2355 struct thin_c *tc;
2322 2356
2323 tc = get_first_thin(pool); 2357 tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2328 } 2362 }
2329 2363
2330 /* 2364 /*
2331 * If there are any deferred flush bios, we must commit 2365 * If there are any deferred flush bios, we must commit the metadata
2332 * the metadata before issuing them. 2366 * before issuing them or signaling their completion.
2333 */ 2367 */
2334 bio_list_init(&bios); 2368 bio_list_init(&bios);
2369 bio_list_init(&bio_completions);
2370
2335 spin_lock_irqsave(&pool->lock, flags); 2371 spin_lock_irqsave(&pool->lock, flags);
2336 bio_list_merge(&bios, &pool->deferred_flush_bios); 2372 bio_list_merge(&bios, &pool->deferred_flush_bios);
2337 bio_list_init(&pool->deferred_flush_bios); 2373 bio_list_init(&pool->deferred_flush_bios);
2374
2375 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376 bio_list_init(&pool->deferred_flush_completions);
2338 spin_unlock_irqrestore(&pool->lock, flags); 2377 spin_unlock_irqrestore(&pool->lock, flags);
2339 2378
2340 if (bio_list_empty(&bios) && 2379 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2341 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2380 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2342 return; 2381 return;
2343 2382
2344 if (commit(pool)) { 2383 if (commit(pool)) {
2384 bio_list_merge(&bios, &bio_completions);
2385
2345 while ((bio = bio_list_pop(&bios))) 2386 while ((bio = bio_list_pop(&bios)))
2346 bio_io_error(bio); 2387 bio_io_error(bio);
2347 return; 2388 return;
2348 } 2389 }
2349 pool->last_commit_jiffies = jiffies; 2390 pool->last_commit_jiffies = jiffies;
2350 2391
2392 while ((bio = bio_list_pop(&bio_completions)))
2393 bio_endio(bio);
2394
2351 while ((bio = bio_list_pop(&bios))) 2395 while ((bio = bio_list_pop(&bios)))
2352 generic_make_request(bio); 2396 generic_make_request(bio);
2353} 2397}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2954 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2998 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2955 spin_lock_init(&pool->lock); 2999 spin_lock_init(&pool->lock);
2956 bio_list_init(&pool->deferred_flush_bios); 3000 bio_list_init(&pool->deferred_flush_bios);
3001 bio_list_init(&pool->deferred_flush_completions);
2957 INIT_LIST_HEAD(&pool->prepared_mappings); 3002 INIT_LIST_HEAD(&pool->prepared_mappings);
2958 INIT_LIST_HEAD(&pool->prepared_discards); 3003 INIT_LIST_HEAD(&pool->prepared_discards);
2959 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 3004 INIT_LIST_HEAD(&pool->prepared_discards_pt2);