diff options
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 108 |
1 files changed, 71 insertions, 37 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 598a78ba894b..fc803d50f9f0 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -334,26 +334,55 @@ static sector_t block_to_sectors(struct pool *pool, dm_block_t b) | |||
334 | (b * pool->sectors_per_block); | 334 | (b * pool->sectors_per_block); |
335 | } | 335 | } |
336 | 336 | ||
337 | static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e, | 337 | /*----------------------------------------------------------------*/ |
338 | struct bio *parent_bio) | 338 | |
339 | struct discard_op { | ||
340 | struct thin_c *tc; | ||
341 | struct blk_plug plug; | ||
342 | struct bio *parent_bio; | ||
343 | struct bio *bio; | ||
344 | }; | ||
345 | |||
346 | static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) | ||
347 | { | ||
348 | BUG_ON(!parent); | ||
349 | |||
350 | op->tc = tc; | ||
351 | blk_start_plug(&op->plug); | ||
352 | op->parent_bio = parent; | ||
353 | op->bio = NULL; | ||
354 | } | ||
355 | |||
356 | static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) | ||
339 | { | 357 | { |
340 | int type = REQ_WRITE | REQ_DISCARD; | 358 | struct thin_c *tc = op->tc; |
341 | sector_t s = block_to_sectors(tc->pool, data_b); | 359 | sector_t s = block_to_sectors(tc->pool, data_b); |
342 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); | 360 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); |
343 | struct bio *bio = NULL; | ||
344 | struct blk_plug plug; | ||
345 | int ret; | ||
346 | 361 | ||
347 | blk_start_plug(&plug); | 362 | return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, |
348 | ret = __blkdev_issue_discard(tc->pool_dev->bdev, s, len, | 363 | GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio); |
349 | GFP_NOWAIT, type, &bio); | 364 | } |
350 | if (!ret && bio) { | 365 | |
351 | bio_chain(bio, parent_bio); | 366 | static void end_discard(struct discard_op *op, int r) |
352 | submit_bio(type, bio); | 367 | { |
368 | if (op->bio) { | ||
369 | /* | ||
370 | * Even if one of the calls to issue_discard failed, we | ||
371 | * need to wait for the chain to complete. | ||
372 | */ | ||
373 | bio_chain(op->bio, op->parent_bio); | ||
374 | submit_bio(REQ_WRITE | REQ_DISCARD, op->bio); | ||
353 | } | 375 | } |
354 | blk_finish_plug(&plug); | ||
355 | 376 | ||
356 | return ret; | 377 | blk_finish_plug(&op->plug); |
378 | |||
379 | /* | ||
380 | * Even if r is set, there could be sub discards in flight that we | ||
381 | * need to wait for. | ||
382 | */ | ||
383 | if (r && !op->parent_bio->bi_error) | ||
384 | op->parent_bio->bi_error = r; | ||
385 | bio_endio(op->parent_bio); | ||
357 | } | 386 | } |
358 | 387 | ||
359 | /*----------------------------------------------------------------*/ | 388 | /*----------------------------------------------------------------*/ |
@@ -968,24 +997,28 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m) | |||
968 | mempool_free(m, tc->pool->mapping_pool); | 997 | mempool_free(m, tc->pool->mapping_pool); |
969 | } | 998 | } |
970 | 999 | ||
971 | static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m) | 1000 | /*----------------------------------------------------------------*/ |
1001 | |||
1002 | static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m) | ||
972 | { | 1003 | { |
973 | /* | 1004 | /* |
974 | * We've already unmapped this range of blocks, but before we | 1005 | * We've already unmapped this range of blocks, but before we |
975 | * passdown we have to check that these blocks are now unused. | 1006 | * passdown we have to check that these blocks are now unused. |
976 | */ | 1007 | */ |
977 | int r; | 1008 | int r = 0; |
978 | bool used = true; | 1009 | bool used = true; |
979 | struct thin_c *tc = m->tc; | 1010 | struct thin_c *tc = m->tc; |
980 | struct pool *pool = tc->pool; | 1011 | struct pool *pool = tc->pool; |
981 | dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; | 1012 | dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; |
1013 | struct discard_op op; | ||
982 | 1014 | ||
1015 | begin_discard(&op, tc, m->bio); | ||
983 | while (b != end) { | 1016 | while (b != end) { |
984 | /* find start of unmapped run */ | 1017 | /* find start of unmapped run */ |
985 | for (; b < end; b++) { | 1018 | for (; b < end; b++) { |
986 | r = dm_pool_block_is_used(pool->pmd, b, &used); | 1019 | r = dm_pool_block_is_used(pool->pmd, b, &used); |
987 | if (r) | 1020 | if (r) |
988 | return r; | 1021 | goto out; |
989 | 1022 | ||
990 | if (!used) | 1023 | if (!used) |
991 | break; | 1024 | break; |
@@ -998,20 +1031,20 @@ static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m) | |||
998 | for (e = b + 1; e != end; e++) { | 1031 | for (e = b + 1; e != end; e++) { |
999 | r = dm_pool_block_is_used(pool->pmd, e, &used); | 1032 | r = dm_pool_block_is_used(pool->pmd, e, &used); |
1000 | if (r) | 1033 | if (r) |
1001 | return r; | 1034 | goto out; |
1002 | 1035 | ||
1003 | if (used) | 1036 | if (used) |
1004 | break; | 1037 | break; |
1005 | } | 1038 | } |
1006 | 1039 | ||
1007 | r = issue_discard(tc, b, e, m->bio); | 1040 | r = issue_discard(&op, b, e); |
1008 | if (r) | 1041 | if (r) |
1009 | return r; | 1042 | goto out; |
1010 | 1043 | ||
1011 | b = e; | 1044 | b = e; |
1012 | } | 1045 | } |
1013 | 1046 | out: | |
1014 | return 0; | 1047 | end_discard(&op, r); |
1015 | } | 1048 | } |
1016 | 1049 | ||
1017 | static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) | 1050 | static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) |
@@ -1021,20 +1054,21 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) | |||
1021 | struct pool *pool = tc->pool; | 1054 | struct pool *pool = tc->pool; |
1022 | 1055 | ||
1023 | r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); | 1056 | r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); |
1024 | if (r) | 1057 | if (r) { |
1025 | metadata_operation_failed(pool, "dm_thin_remove_range", r); | 1058 | metadata_operation_failed(pool, "dm_thin_remove_range", r); |
1059 | bio_io_error(m->bio); | ||
1026 | 1060 | ||
1027 | else if (m->maybe_shared) | 1061 | } else if (m->maybe_shared) { |
1028 | r = passdown_double_checking_shared_status(m); | 1062 | passdown_double_checking_shared_status(m); |
1029 | else | 1063 | |
1030 | r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio); | 1064 | } else { |
1065 | struct discard_op op; | ||
1066 | begin_discard(&op, tc, m->bio); | ||
1067 | r = issue_discard(&op, m->data_block, | ||
1068 | m->data_block + (m->virt_end - m->virt_begin)); | ||
1069 | end_discard(&op, r); | ||
1070 | } | ||
1031 | 1071 | ||
1032 | /* | ||
1033 | * Even if r is set, there could be sub discards in flight that we | ||
1034 | * need to wait for. | ||
1035 | */ | ||
1036 | m->bio->bi_error = r; | ||
1037 | bio_endio(m->bio); | ||
1038 | cell_defer_no_holder(tc, m->cell); | 1072 | cell_defer_no_holder(tc, m->cell); |
1039 | mempool_free(m, pool->mapping_pool); | 1073 | mempool_free(m, pool->mapping_pool); |
1040 | } | 1074 | } |
@@ -1505,11 +1539,11 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t | |||
1505 | 1539 | ||
1506 | /* | 1540 | /* |
1507 | * The parent bio must not complete before sub discard bios are | 1541 | * The parent bio must not complete before sub discard bios are |
1508 | * chained to it (see issue_discard's bio_chain)! | 1542 | * chained to it (see end_discard's bio_chain)! |
1509 | * | 1543 | * |
1510 | * This per-mapping bi_remaining increment is paired with | 1544 | * This per-mapping bi_remaining increment is paired with |
1511 | * the implicit decrement that occurs via bio_endio() in | 1545 | * the implicit decrement that occurs via bio_endio() in |
1512 | * process_prepared_discard_passdown(). | 1546 | * end_discard(). |
1513 | */ | 1547 | */ |
1514 | bio_inc_remaining(bio); | 1548 | bio_inc_remaining(bio); |
1515 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) | 1549 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) |
@@ -3850,7 +3884,7 @@ static struct target_type pool_target = { | |||
3850 | .name = "thin-pool", | 3884 | .name = "thin-pool", |
3851 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 3885 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
3852 | DM_TARGET_IMMUTABLE, | 3886 | DM_TARGET_IMMUTABLE, |
3853 | .version = {1, 18, 0}, | 3887 | .version = {1, 19, 0}, |
3854 | .module = THIS_MODULE, | 3888 | .module = THIS_MODULE, |
3855 | .ctr = pool_ctr, | 3889 | .ctr = pool_ctr, |
3856 | .dtr = pool_dtr, | 3890 | .dtr = pool_dtr, |
@@ -4224,7 +4258,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
4224 | 4258 | ||
4225 | static struct target_type thin_target = { | 4259 | static struct target_type thin_target = { |
4226 | .name = "thin", | 4260 | .name = "thin", |
4227 | .version = {1, 18, 0}, | 4261 | .version = {1, 19, 0}, |
4228 | .module = THIS_MODULE, | 4262 | .module = THIS_MODULE, |
4229 | .ctr = thin_ctr, | 4263 | .ctr = thin_ctr, |
4230 | .dtr = thin_dtr, | 4264 | .dtr = thin_dtr, |