aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-07-20 09:29:37 -0400
committerJens Axboe <axboe@fb.com>2015-07-29 10:55:15 -0400
commit4246a0b63bd8f56a1469b12eafeb875b1041a451 (patch)
tree3281bb158d658ef7f208ad380c0ecee600a5ab5e /drivers/md/dm-thin.c
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..2ade2c46dca9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -614,8 +614,10 @@ static void error_bio_list(struct bio_list *bios, int error)
614{ 614{
615 struct bio *bio; 615 struct bio *bio;
616 616
617 while ((bio = bio_list_pop(bios))) 617 while ((bio = bio_list_pop(bios))) {
618 bio_endio(bio, error); 618 bio->bi_error = error;
619 bio_endio(bio);
620 }
619} 621}
620 622
621static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) 623static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
@@ -864,14 +866,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
864 complete_mapping_preparation(m); 866 complete_mapping_preparation(m);
865} 867}
866 868
867static void overwrite_endio(struct bio *bio, int err) 869static void overwrite_endio(struct bio *bio)
868{ 870{
869 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 871 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
870 struct dm_thin_new_mapping *m = h->overwrite_mapping; 872 struct dm_thin_new_mapping *m = h->overwrite_mapping;
871 873
872 bio->bi_end_io = m->saved_bi_end_io; 874 bio->bi_end_io = m->saved_bi_end_io;
873 875
874 m->err = err; 876 m->err = bio->bi_error;
875 complete_mapping_preparation(m); 877 complete_mapping_preparation(m);
876} 878}
877 879
@@ -996,7 +998,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
996 */ 998 */
997 if (bio) { 999 if (bio) {
998 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1000 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
999 bio_endio(bio, 0); 1001 bio_endio(bio);
1000 } else { 1002 } else {
1001 inc_all_io_entry(tc->pool, m->cell->holder); 1003 inc_all_io_entry(tc->pool, m->cell->holder);
1002 remap_and_issue(tc, m->cell->holder, m->data_block); 1004 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1026,7 +1028,7 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
1026 1028
1027static void process_prepared_discard_success(struct dm_thin_new_mapping *m) 1029static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
1028{ 1030{
1029 bio_endio(m->bio, 0); 1031 bio_endio(m->bio);
1030 free_discard_mapping(m); 1032 free_discard_mapping(m);
1031} 1033}
1032 1034
@@ -1040,7 +1042,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
1040 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); 1042 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1041 bio_io_error(m->bio); 1043 bio_io_error(m->bio);
1042 } else 1044 } else
1043 bio_endio(m->bio, 0); 1045 bio_endio(m->bio);
1044 1046
1045 cell_defer_no_holder(tc, m->cell); 1047 cell_defer_no_holder(tc, m->cell);
1046 mempool_free(m, tc->pool->mapping_pool); 1048 mempool_free(m, tc->pool->mapping_pool);
@@ -1111,7 +1113,8 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1111 * Even if r is set, there could be sub discards in flight that we 1113 * Even if r is set, there could be sub discards in flight that we
1112 * need to wait for. 1114 * need to wait for.
1113 */ 1115 */
1114 bio_endio(m->bio, r); 1116 m->bio->bi_error = r;
1117 bio_endio(m->bio);
1115 cell_defer_no_holder(tc, m->cell); 1118 cell_defer_no_holder(tc, m->cell);
1116 mempool_free(m, pool->mapping_pool); 1119 mempool_free(m, pool->mapping_pool);
1117} 1120}
@@ -1487,9 +1490,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1487{ 1490{
1488 int error = should_error_unserviceable_bio(pool); 1491 int error = should_error_unserviceable_bio(pool);
1489 1492
1490 if (error) 1493 if (error) {
1491 bio_endio(bio, error); 1494 bio->bi_error = error;
1492 else 1495 bio_endio(bio);
1496 } else
1493 retry_on_resume(bio); 1497 retry_on_resume(bio);
1494} 1498}
1495 1499
@@ -1625,7 +1629,7 @@ static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_priso
1625 * will prevent completion until the sub range discards have 1629 * will prevent completion until the sub range discards have
1626 * completed. 1630 * completed.
1627 */ 1631 */
1628 bio_endio(bio, 0); 1632 bio_endio(bio);
1629} 1633}
1630 1634
1631static void process_discard_bio(struct thin_c *tc, struct bio *bio) 1635static void process_discard_bio(struct thin_c *tc, struct bio *bio)
@@ -1639,7 +1643,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1639 /* 1643 /*
1640 * The discard covers less than a block. 1644 * The discard covers less than a block.
1641 */ 1645 */
1642 bio_endio(bio, 0); 1646 bio_endio(bio);
1643 return; 1647 return;
1644 } 1648 }
1645 1649
@@ -1784,7 +1788,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1784 if (bio_data_dir(bio) == READ) { 1788 if (bio_data_dir(bio) == READ) {
1785 zero_fill_bio(bio); 1789 zero_fill_bio(bio);
1786 cell_defer_no_holder(tc, cell); 1790 cell_defer_no_holder(tc, cell);
1787 bio_endio(bio, 0); 1791 bio_endio(bio);
1788 return; 1792 return;
1789 } 1793 }
1790 1794
@@ -1849,7 +1853,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1849 1853
1850 } else { 1854 } else {
1851 zero_fill_bio(bio); 1855 zero_fill_bio(bio);
1852 bio_endio(bio, 0); 1856 bio_endio(bio);
1853 } 1857 }
1854 } else 1858 } else
1855 provision_block(tc, bio, block, cell); 1859 provision_block(tc, bio, block, cell);
@@ -1920,7 +1924,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1920 } 1924 }
1921 1925
1922 zero_fill_bio(bio); 1926 zero_fill_bio(bio);
1923 bio_endio(bio, 0); 1927 bio_endio(bio);
1924 break; 1928 break;
1925 1929
1926 default: 1930 default:
@@ -1945,7 +1949,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell
1945 1949
1946static void process_bio_success(struct thin_c *tc, struct bio *bio) 1950static void process_bio_success(struct thin_c *tc, struct bio *bio)
1947{ 1951{
1948 bio_endio(bio, 0); 1952 bio_endio(bio);
1949} 1953}
1950 1954
1951static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1955static void process_bio_fail(struct thin_c *tc, struct bio *bio)
@@ -2581,7 +2585,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2581 thin_hook_bio(tc, bio); 2585 thin_hook_bio(tc, bio);
2582 2586
2583 if (tc->requeue_mode) { 2587 if (tc->requeue_mode) {
2584 bio_endio(bio, DM_ENDIO_REQUEUE); 2588 bio->bi_error = DM_ENDIO_REQUEUE;
2589 bio_endio(bio);
2585 return DM_MAPIO_SUBMITTED; 2590 return DM_MAPIO_SUBMITTED;
2586 } 2591 }
2587 2592