aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-07-20 09:29:37 -0400
committerJens Axboe <axboe@fb.com>2015-07-29 10:55:15 -0400
commit4246a0b63bd8f56a1469b12eafeb875b1041a451 (patch)
tree3281bb158d658ef7f208ad380c0ecee600a5ab5e /drivers/md/bcache/request.c
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f292790997d7..a09b9462ff49 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -173,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
173 bch_data_insert_keys(cl); 173 bch_data_insert_keys(cl);
174} 174}
175 175
176static void bch_data_insert_endio(struct bio *bio, int error) 176static void bch_data_insert_endio(struct bio *bio)
177{ 177{
178 struct closure *cl = bio->bi_private; 178 struct closure *cl = bio->bi_private;
179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
180 180
181 if (error) { 181 if (bio->bi_error) {
182 /* TODO: We could try to recover from this. */ 182 /* TODO: We could try to recover from this. */
183 if (op->writeback) 183 if (op->writeback)
184 op->error = error; 184 op->error = bio->bi_error;
185 else if (!op->replace) 185 else if (!op->replace)
186 set_closure_fn(cl, bch_data_insert_error, op->wq); 186 set_closure_fn(cl, bch_data_insert_error, op->wq);
187 else 187 else
188 set_closure_fn(cl, NULL, NULL); 188 set_closure_fn(cl, NULL, NULL);
189 } 189 }
190 190
191 bch_bbio_endio(op->c, bio, error, "writing data to cache"); 191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192} 192}
193 193
194static void bch_data_insert_start(struct closure *cl) 194static void bch_data_insert_start(struct closure *cl)
@@ -477,7 +477,7 @@ struct search {
477 struct data_insert_op iop; 477 struct data_insert_op iop;
478}; 478};
479 479
480static void bch_cache_read_endio(struct bio *bio, int error) 480static void bch_cache_read_endio(struct bio *bio)
481{ 481{
482 struct bbio *b = container_of(bio, struct bbio, bio); 482 struct bbio *b = container_of(bio, struct bbio, bio);
483 struct closure *cl = bio->bi_private; 483 struct closure *cl = bio->bi_private;
@@ -490,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
490 * from the backing device. 490 * from the backing device.
491 */ 491 */
492 492
493 if (error) 493 if (bio->bi_error)
494 s->iop.error = error; 494 s->iop.error = bio->bi_error;
495 else if (!KEY_DIRTY(&b->key) && 495 else if (!KEY_DIRTY(&b->key) &&
496 ptr_stale(s->iop.c, &b->key, 0)) { 496 ptr_stale(s->iop.c, &b->key, 0)) {
497 atomic_long_inc(&s->iop.c->cache_read_races); 497 atomic_long_inc(&s->iop.c->cache_read_races);
498 s->iop.error = -EINTR; 498 s->iop.error = -EINTR;
499 } 499 }
500 500
501 bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); 501 bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
502} 502}
503 503
504/* 504/*
@@ -591,13 +591,13 @@ static void cache_lookup(struct closure *cl)
591 591
592/* Common code for the make_request functions */ 592/* Common code for the make_request functions */
593 593
594static void request_endio(struct bio *bio, int error) 594static void request_endio(struct bio *bio)
595{ 595{
596 struct closure *cl = bio->bi_private; 596 struct closure *cl = bio->bi_private;
597 597
598 if (error) { 598 if (bio->bi_error) {
599 struct search *s = container_of(cl, struct search, cl); 599 struct search *s = container_of(cl, struct search, cl);
600 s->iop.error = error; 600 s->iop.error = bio->bi_error;
601 /* Only cache read errors are recoverable */ 601 /* Only cache read errors are recoverable */
602 s->recoverable = false; 602 s->recoverable = false;
603 } 603 }
@@ -613,7 +613,8 @@ static void bio_complete(struct search *s)
613 &s->d->disk->part0, s->start_time); 613 &s->d->disk->part0, s->start_time);
614 614
615 trace_bcache_request_end(s->d, s->orig_bio); 615 trace_bcache_request_end(s->d, s->orig_bio);
616 bio_endio(s->orig_bio, s->iop.error); 616 s->orig_bio->bi_error = s->iop.error;
617 bio_endio(s->orig_bio);
617 s->orig_bio = NULL; 618 s->orig_bio = NULL;
618 } 619 }
619} 620}
@@ -992,7 +993,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
992 } else { 993 } else {
993 if ((bio->bi_rw & REQ_DISCARD) && 994 if ((bio->bi_rw & REQ_DISCARD) &&
994 !blk_queue_discard(bdev_get_queue(dc->bdev))) 995 !blk_queue_discard(bdev_get_queue(dc->bdev)))
995 bio_endio(bio, 0); 996 bio_endio(bio);
996 else 997 else
997 bch_generic_make_request(bio, &d->bio_split_hook); 998 bch_generic_make_request(bio, &d->bio_split_hook);
998 } 999 }