aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-07-20 09:29:37 -0400
committerJens Axboe <axboe@fb.com>2015-07-29 10:55:15 -0400
commit4246a0b63bd8f56a1469b12eafeb875b1041a451 (patch)
tree3281bb158d658ef7f208ad380c0ecee600a5ab5e /drivers/md/dm-cache-target.c
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..04d0dadc48b1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -919,14 +919,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
919 wake_worker(cache); 919 wake_worker(cache);
920} 920}
921 921
922static void writethrough_endio(struct bio *bio, int err) 922static void writethrough_endio(struct bio *bio)
923{ 923{
924 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 924 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
925 925
926 dm_unhook_bio(&pb->hook_info, bio); 926 dm_unhook_bio(&pb->hook_info, bio);
927 927
928 if (err) { 928 if (bio->bi_error) {
929 bio_endio(bio, err); 929 bio_endio(bio);
930 return; 930 return;
931 } 931 }
932 932
@@ -1231,7 +1231,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
1231 * The block was promoted via an overwrite, so it's dirty. 1231 * The block was promoted via an overwrite, so it's dirty.
1232 */ 1232 */
1233 set_dirty(cache, mg->new_oblock, mg->cblock); 1233 set_dirty(cache, mg->new_oblock, mg->cblock);
1234 bio_endio(mg->new_ocell->holder, 0); 1234 bio_endio(mg->new_ocell->holder);
1235 cell_defer(cache, mg->new_ocell, false); 1235 cell_defer(cache, mg->new_ocell, false);
1236 } 1236 }
1237 free_io_migration(mg); 1237 free_io_migration(mg);
@@ -1284,7 +1284,7 @@ static void issue_copy(struct dm_cache_migration *mg)
1284 } 1284 }
1285} 1285}
1286 1286
1287static void overwrite_endio(struct bio *bio, int err) 1287static void overwrite_endio(struct bio *bio)
1288{ 1288{
1289 struct dm_cache_migration *mg = bio->bi_private; 1289 struct dm_cache_migration *mg = bio->bi_private;
1290 struct cache *cache = mg->cache; 1290 struct cache *cache = mg->cache;
@@ -1294,7 +1294,7 @@ static void overwrite_endio(struct bio *bio, int err)
1294 1294
1295 dm_unhook_bio(&pb->hook_info, bio); 1295 dm_unhook_bio(&pb->hook_info, bio);
1296 1296
1297 if (err) 1297 if (bio->bi_error)
1298 mg->err = true; 1298 mg->err = true;
1299 1299
1300 mg->requeue_holder = false; 1300 mg->requeue_holder = false;
@@ -1358,7 +1358,7 @@ static void issue_discard(struct dm_cache_migration *mg)
1358 b = to_dblock(from_dblock(b) + 1); 1358 b = to_dblock(from_dblock(b) + 1);
1359 } 1359 }
1360 1360
1361 bio_endio(bio, 0); 1361 bio_endio(bio);
1362 cell_defer(mg->cache, mg->new_ocell, false); 1362 cell_defer(mg->cache, mg->new_ocell, false);
1363 free_migration(mg); 1363 free_migration(mg);
1364} 1364}
@@ -1631,7 +1631,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1631 1631
1632 calc_discard_block_range(cache, bio, &b, &e); 1632 calc_discard_block_range(cache, bio, &b, &e);
1633 if (b == e) { 1633 if (b == e) {
1634 bio_endio(bio, 0); 1634 bio_endio(bio);
1635 return; 1635 return;
1636 } 1636 }
1637 1637
@@ -2213,8 +2213,10 @@ static void requeue_deferred_bios(struct cache *cache)
2213 bio_list_merge(&bios, &cache->deferred_bios); 2213 bio_list_merge(&bios, &cache->deferred_bios);
2214 bio_list_init(&cache->deferred_bios); 2214 bio_list_init(&cache->deferred_bios);
2215 2215
2216 while ((bio = bio_list_pop(&bios))) 2216 while ((bio = bio_list_pop(&bios))) {
2217 bio_endio(bio, DM_ENDIO_REQUEUE); 2217 bio->bi_error = DM_ENDIO_REQUEUE;
2218 bio_endio(bio);
2219 }
2218} 2220}
2219 2221
2220static int more_work(struct cache *cache) 2222static int more_work(struct cache *cache)
@@ -3119,7 +3121,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
3119 * This is a duplicate writethrough io that is no 3121 * This is a duplicate writethrough io that is no
3120 * longer needed because the block has been demoted. 3122 * longer needed because the block has been demoted.
3121 */ 3123 */
3122 bio_endio(bio, 0); 3124 bio_endio(bio);
3123 // FIXME: remap everything as a miss 3125 // FIXME: remap everything as a miss
3124 cell_defer(cache, cell, false); 3126 cell_defer(cache, cell, false);
3125 r = DM_MAPIO_SUBMITTED; 3127 r = DM_MAPIO_SUBMITTED;