aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-03 03:38:06 -0400
committerJens Axboe <axboe@fb.com>2017-06-09 11:27:32 -0400
commit4e4cbee93d56137ebff722be022cae5f70ef84fb (patch)
tree4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md/bcache/request.c
parentfc17b6534eb8395f0b3133eb31d87deec32c642b (diff)
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 709c9cc34369..019b3df9f1c6 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)
81 if (ret == -ESRCH) { 81 if (ret == -ESRCH) {
82 op->replace_collision = true; 82 op->replace_collision = true;
83 } else if (ret) { 83 } else if (ret) {
84 op->error = -ENOMEM; 84 op->status = BLK_STS_RESOURCE;
85 op->insert_data_done = true; 85 op->insert_data_done = true;
86 } 86 }
87 87
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)
178 struct closure *cl = bio->bi_private; 178 struct closure *cl = bio->bi_private;
179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
180 180
181 if (bio->bi_error) { 181 if (bio->bi_status) {
182 /* TODO: We could try to recover from this. */ 182 /* TODO: We could try to recover from this. */
183 if (op->writeback) 183 if (op->writeback)
184 op->error = bio->bi_error; 184 op->status = bio->bi_status;
185 else if (!op->replace) 185 else if (!op->replace)
186 set_closure_fn(cl, bch_data_insert_error, op->wq); 186 set_closure_fn(cl, bch_data_insert_error, op->wq);
187 else 187 else
188 set_closure_fn(cl, NULL, NULL); 188 set_closure_fn(cl, NULL, NULL);
189 } 189 }
190 190
191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); 191 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
192} 192}
193 193
194static void bch_data_insert_start(struct closure *cl) 194static void bch_data_insert_start(struct closure *cl)
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio)
488 * from the backing device. 488 * from the backing device.
489 */ 489 */
490 490
491 if (bio->bi_error) 491 if (bio->bi_status)
492 s->iop.error = bio->bi_error; 492 s->iop.status = bio->bi_status;
493 else if (!KEY_DIRTY(&b->key) && 493 else if (!KEY_DIRTY(&b->key) &&
494 ptr_stale(s->iop.c, &b->key, 0)) { 494 ptr_stale(s->iop.c, &b->key, 0)) {
495 atomic_long_inc(&s->iop.c->cache_read_races); 495 atomic_long_inc(&s->iop.c->cache_read_races);
496 s->iop.error = -EINTR; 496 s->iop.status = BLK_STS_IOERR;
497 } 497 }
498 498
499 bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); 499 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
500} 500}
501 501
502/* 502/*
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio)
593{ 593{
594 struct closure *cl = bio->bi_private; 594 struct closure *cl = bio->bi_private;
595 595
596 if (bio->bi_error) { 596 if (bio->bi_status) {
597 struct search *s = container_of(cl, struct search, cl); 597 struct search *s = container_of(cl, struct search, cl);
598 s->iop.error = bio->bi_error; 598 s->iop.status = bio->bi_status;
599 /* Only cache read errors are recoverable */ 599 /* Only cache read errors are recoverable */
600 s->recoverable = false; 600 s->recoverable = false;
601 } 601 }
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s)
611 &s->d->disk->part0, s->start_time); 611 &s->d->disk->part0, s->start_time);
612 612
613 trace_bcache_request_end(s->d, s->orig_bio); 613 trace_bcache_request_end(s->d, s->orig_bio);
614 s->orig_bio->bi_error = s->iop.error; 614 s->orig_bio->bi_status = s->iop.status;
615 bio_endio(s->orig_bio); 615 bio_endio(s->orig_bio);
616 s->orig_bio = NULL; 616 s->orig_bio = NULL;
617 } 617 }
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
664 s->iop.inode = d->id; 664 s->iop.inode = d->id;
665 s->iop.write_point = hash_long((unsigned long) current, 16); 665 s->iop.write_point = hash_long((unsigned long) current, 16);
666 s->iop.write_prio = 0; 666 s->iop.write_prio = 0;
667 s->iop.error = 0; 667 s->iop.status = 0;
668 s->iop.flags = 0; 668 s->iop.flags = 0;
669 s->iop.flush_journal = op_is_flush(bio->bi_opf); 669 s->iop.flush_journal = op_is_flush(bio->bi_opf);
670 s->iop.wq = bcache_wq; 670 s->iop.wq = bcache_wq;
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl)
707 /* Retry from the backing device: */ 707 /* Retry from the backing device: */
708 trace_bcache_read_retry(s->orig_bio); 708 trace_bcache_read_retry(s->orig_bio);
709 709
710 s->iop.error = 0; 710 s->iop.status = 0;
711 do_bio_hook(s, s->orig_bio); 711 do_bio_hook(s, s->orig_bio);
712 712
713 /* XXX: invalidate cache */ 713 /* XXX: invalidate cache */
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
767 !s->cache_miss, s->iop.bypass); 767 !s->cache_miss, s->iop.bypass);
768 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); 768 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
769 769
770 if (s->iop.error) 770 if (s->iop.status)
771 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 771 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
772 else if (s->iop.bio || verify(dc, &s->bio.bio)) 772 else if (s->iop.bio || verify(dc, &s->bio.bio))
773 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 773 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);