summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-03 03:38:06 -0400
committerJens Axboe <axboe@fb.com>2017-06-09 11:27:32 -0400
commit4e4cbee93d56137ebff722be022cae5f70ef84fb (patch)
tree4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md/dm-thin.c
parentfc17b6534eb8395f0b3133eb31d87deec32c642b (diff)
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c65
1 files changed, 32 insertions, 33 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 22b1a64c44b7..3490b300cbff 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
383 * Even if r is set, there could be sub discards in flight that we 383 * Even if r is set, there could be sub discards in flight that we
384 * need to wait for. 384 * need to wait for.
385 */ 385 */
386 if (r && !op->parent_bio->bi_error) 386 if (r && !op->parent_bio->bi_status)
387 op->parent_bio->bi_error = r; 387 op->parent_bio->bi_status = errno_to_blk_status(r);
388 bio_endio(op->parent_bio); 388 bio_endio(op->parent_bio);
389} 389}
390 390
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
450} 450}
451 451
452static void cell_error_with_code(struct pool *pool, 452static void cell_error_with_code(struct pool *pool,
453 struct dm_bio_prison_cell *cell, int error_code) 453 struct dm_bio_prison_cell *cell, blk_status_t error_code)
454{ 454{
455 dm_cell_error(pool->prison, cell, error_code); 455 dm_cell_error(pool->prison, cell, error_code);
456 dm_bio_prison_free_cell(pool->prison, cell); 456 dm_bio_prison_free_cell(pool->prison, cell);
457} 457}
458 458
459static int get_pool_io_error_code(struct pool *pool) 459static blk_status_t get_pool_io_error_code(struct pool *pool)
460{ 460{
461 return pool->out_of_data_space ? -ENOSPC : -EIO; 461 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
462} 462}
463 463
464static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) 464static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
465{ 465{
466 int error = get_pool_io_error_code(pool); 466 cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
467
468 cell_error_with_code(pool, cell, error);
469} 467}
470 468
471static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) 469static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
475 473
476static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) 474static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
477{ 475{
478 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); 476 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
479} 477}
480 478
481/*----------------------------------------------------------------*/ 479/*----------------------------------------------------------------*/
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
555 bio_list_init(master); 553 bio_list_init(master);
556} 554}
557 555
558static void error_bio_list(struct bio_list *bios, int error) 556static void error_bio_list(struct bio_list *bios, blk_status_t error)
559{ 557{
560 struct bio *bio; 558 struct bio *bio;
561 559
562 while ((bio = bio_list_pop(bios))) { 560 while ((bio = bio_list_pop(bios))) {
563 bio->bi_error = error; 561 bio->bi_status = error;
564 bio_endio(bio); 562 bio_endio(bio);
565 } 563 }
566} 564}
567 565
568static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) 566static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
567 blk_status_t error)
569{ 568{
570 struct bio_list bios; 569 struct bio_list bios;
571 unsigned long flags; 570 unsigned long flags;
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
608 __merge_bio_list(&bios, &tc->retry_on_resume_list); 607 __merge_bio_list(&bios, &tc->retry_on_resume_list);
609 spin_unlock_irqrestore(&tc->lock, flags); 608 spin_unlock_irqrestore(&tc->lock, flags);
610 609
611 error_bio_list(&bios, DM_ENDIO_REQUEUE); 610 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
612 requeue_deferred_cells(tc); 611 requeue_deferred_cells(tc);
613} 612}
614 613
615static void error_retry_list_with_code(struct pool *pool, int error) 614static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
616{ 615{
617 struct thin_c *tc; 616 struct thin_c *tc;
618 617
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
624 623
625static void error_retry_list(struct pool *pool) 624static void error_retry_list(struct pool *pool)
626{ 625{
627 int error = get_pool_io_error_code(pool); 626 error_retry_list_with_code(pool, get_pool_io_error_code(pool));
628
629 error_retry_list_with_code(pool, error);
630} 627}
631 628
632/* 629/*
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
774 */ 771 */
775 atomic_t prepare_actions; 772 atomic_t prepare_actions;
776 773
777 int err; 774 blk_status_t status;
778 struct thin_c *tc; 775 struct thin_c *tc;
779 dm_block_t virt_begin, virt_end; 776 dm_block_t virt_begin, virt_end;
780 dm_block_t data_block; 777 dm_block_t data_block;
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
814{ 811{
815 struct dm_thin_new_mapping *m = context; 812 struct dm_thin_new_mapping *m = context;
816 813
817 m->err = read_err || write_err ? -EIO : 0; 814 m->status = read_err || write_err ? BLK_STS_IOERR : 0;
818 complete_mapping_preparation(m); 815 complete_mapping_preparation(m);
819} 816}
820 817
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
825 822
826 bio->bi_end_io = m->saved_bi_end_io; 823 bio->bi_end_io = m->saved_bi_end_io;
827 824
828 m->err = bio->bi_error; 825 m->status = bio->bi_status;
829 complete_mapping_preparation(m); 826 complete_mapping_preparation(m);
830} 827}
831 828
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
925 struct bio *bio = m->bio; 922 struct bio *bio = m->bio;
926 int r; 923 int r;
927 924
928 if (m->err) { 925 if (m->status) {
929 cell_error(pool, m->cell); 926 cell_error(pool, m->cell);
930 goto out; 927 goto out;
931 } 928 }
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
1495 spin_unlock_irqrestore(&tc->lock, flags); 1492 spin_unlock_irqrestore(&tc->lock, flags);
1496} 1493}
1497 1494
1498static int should_error_unserviceable_bio(struct pool *pool) 1495static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1499{ 1496{
1500 enum pool_mode m = get_pool_mode(pool); 1497 enum pool_mode m = get_pool_mode(pool);
1501 1498
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
1503 case PM_WRITE: 1500 case PM_WRITE:
1504 /* Shouldn't get here */ 1501 /* Shouldn't get here */
1505 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); 1502 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1506 return -EIO; 1503 return BLK_STS_IOERR;
1507 1504
1508 case PM_OUT_OF_DATA_SPACE: 1505 case PM_OUT_OF_DATA_SPACE:
1509 return pool->pf.error_if_no_space ? -ENOSPC : 0; 1506 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1510 1507
1511 case PM_READ_ONLY: 1508 case PM_READ_ONLY:
1512 case PM_FAIL: 1509 case PM_FAIL:
1513 return -EIO; 1510 return BLK_STS_IOERR;
1514 default: 1511 default:
1515 /* Shouldn't get here */ 1512 /* Shouldn't get here */
1516 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); 1513 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1517 return -EIO; 1514 return BLK_STS_IOERR;
1518 } 1515 }
1519} 1516}
1520 1517
1521static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1518static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1522{ 1519{
1523 int error = should_error_unserviceable_bio(pool); 1520 blk_status_t error = should_error_unserviceable_bio(pool);
1524 1521
1525 if (error) { 1522 if (error) {
1526 bio->bi_error = error; 1523 bio->bi_status = error;
1527 bio_endio(bio); 1524 bio_endio(bio);
1528 } else 1525 } else
1529 retry_on_resume(bio); 1526 retry_on_resume(bio);
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
1533{ 1530{
1534 struct bio *bio; 1531 struct bio *bio;
1535 struct bio_list bios; 1532 struct bio_list bios;
1536 int error; 1533 blk_status_t error;
1537 1534
1538 error = should_error_unserviceable_bio(pool); 1535 error = should_error_unserviceable_bio(pool);
1539 if (error) { 1536 if (error) {
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
2071 unsigned count = 0; 2068 unsigned count = 0;
2072 2069
2073 if (tc->requeue_mode) { 2070 if (tc->requeue_mode) {
2074 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); 2071 error_thin_bio_list(tc, &tc->deferred_bio_list,
2072 BLK_STS_DM_REQUEUE);
2075 return; 2073 return;
2076 } 2074 }
2077 2075
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
2322 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2320 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2323 pool->pf.error_if_no_space = true; 2321 pool->pf.error_if_no_space = true;
2324 notify_of_pool_mode_change_to_oods(pool); 2322 notify_of_pool_mode_change_to_oods(pool);
2325 error_retry_list_with_code(pool, -ENOSPC); 2323 error_retry_list_with_code(pool, BLK_STS_NOSPC);
2326 } 2324 }
2327} 2325}
2328 2326
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2624 thin_hook_bio(tc, bio); 2622 thin_hook_bio(tc, bio);
2625 2623
2626 if (tc->requeue_mode) { 2624 if (tc->requeue_mode) {
2627 bio->bi_error = DM_ENDIO_REQUEUE; 2625 bio->bi_status = BLK_STS_DM_REQUEUE;
2628 bio_endio(bio); 2626 bio_endio(bio);
2629 return DM_MAPIO_SUBMITTED; 2627 return DM_MAPIO_SUBMITTED;
2630 } 2628 }
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
4177 return thin_bio_map(ti, bio); 4175 return thin_bio_map(ti, bio);
4178} 4176}
4179 4177
4180static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) 4178static int thin_endio(struct dm_target *ti, struct bio *bio,
4179 blk_status_t *err)
4181{ 4180{
4182 unsigned long flags; 4181 unsigned long flags;
4183 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 4182 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));