diff options
Diffstat (limited to 'drivers/md')
33 files changed, 248 insertions, 238 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index c3ea03c9a1a8..dee542fff68e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c) | |||
849 | 849 | ||
850 | /* Forward declarations */ | 850 | /* Forward declarations */ |
851 | 851 | ||
852 | void bch_count_io_errors(struct cache *, int, const char *); | 852 | void bch_count_io_errors(struct cache *, blk_status_t, const char *); |
853 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | 853 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, |
854 | int, const char *); | 854 | blk_status_t, const char *); |
855 | void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); | 855 | void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, |
856 | const char *); | ||
856 | void bch_bbio_free(struct bio *, struct cache_set *); | 857 | void bch_bbio_free(struct bio *, struct cache_set *); |
857 | struct bio *bch_bbio_alloc(struct cache_set *); | 858 | struct bio *bch_bbio_alloc(struct cache_set *); |
858 | 859 | ||
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 450d0e848ae4..866dcf78ff8e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b) | |||
307 | bch_submit_bbio(bio, b->c, &b->key, 0); | 307 | bch_submit_bbio(bio, b->c, &b->key, 0); |
308 | closure_sync(&cl); | 308 | closure_sync(&cl); |
309 | 309 | ||
310 | if (bio->bi_error) | 310 | if (bio->bi_status) |
311 | set_btree_node_io_error(b); | 311 | set_btree_node_io_error(b); |
312 | 312 | ||
313 | bch_bbio_free(bio, b->c); | 313 | bch_bbio_free(bio, b->c); |
@@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio) | |||
374 | struct closure *cl = bio->bi_private; | 374 | struct closure *cl = bio->bi_private; |
375 | struct btree *b = container_of(cl, struct btree, io); | 375 | struct btree *b = container_of(cl, struct btree, io); |
376 | 376 | ||
377 | if (bio->bi_error) | 377 | if (bio->bi_status) |
378 | set_btree_node_io_error(b); | 378 | set_btree_node_io_error(b); |
379 | 379 | ||
380 | bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); | 380 | bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); |
381 | closure_put(cl); | 381 | closure_put(cl); |
382 | } | 382 | } |
383 | 383 | ||
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index db45a88c0ce9..6a9b85095e7b 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |||
50 | 50 | ||
51 | /* IO errors */ | 51 | /* IO errors */ |
52 | 52 | ||
53 | void bch_count_io_errors(struct cache *ca, int error, const char *m) | 53 | void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m) |
54 | { | 54 | { |
55 | /* | 55 | /* |
56 | * The halflife of an error is: | 56 | * The halflife of an error is: |
@@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | 105 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, |
106 | int error, const char *m) | 106 | blk_status_t error, const char *m) |
107 | { | 107 | { |
108 | struct bbio *b = container_of(bio, struct bbio, bio); | 108 | struct bbio *b = container_of(bio, struct bbio, bio); |
109 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | 109 | struct cache *ca = PTR_CACHE(c, &b->key, 0); |
@@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | 134 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, |
135 | int error, const char *m) | 135 | blk_status_t error, const char *m) |
136 | { | 136 | { |
137 | struct closure *cl = bio->bi_private; | 137 | struct closure *cl = bio->bi_private; |
138 | 138 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1198e53d5670..0352d05e495c 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio) | |||
549 | { | 549 | { |
550 | struct journal_write *w = bio->bi_private; | 550 | struct journal_write *w = bio->bi_private; |
551 | 551 | ||
552 | cache_set_err_on(bio->bi_error, w->c, "journal io error"); | 552 | cache_set_err_on(bio->bi_status, w->c, "journal io error"); |
553 | closure_put(&w->c->journal.io); | 553 | closure_put(&w->c->journal.io); |
554 | } | 554 | } |
555 | 555 | ||
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 13b8a907006d..f633b30c962e 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio) | |||
63 | struct moving_io *io = container_of(bio->bi_private, | 63 | struct moving_io *io = container_of(bio->bi_private, |
64 | struct moving_io, cl); | 64 | struct moving_io, cl); |
65 | 65 | ||
66 | if (bio->bi_error) | 66 | if (bio->bi_status) |
67 | io->op.error = bio->bi_error; | 67 | io->op.status = bio->bi_status; |
68 | else if (!KEY_DIRTY(&b->key) && | 68 | else if (!KEY_DIRTY(&b->key) && |
69 | ptr_stale(io->op.c, &b->key, 0)) { | 69 | ptr_stale(io->op.c, &b->key, 0)) { |
70 | io->op.error = -EINTR; | 70 | io->op.status = BLK_STS_IOERR; |
71 | } | 71 | } |
72 | 72 | ||
73 | bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); | 73 | bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void moving_init(struct moving_io *io) | 76 | static void moving_init(struct moving_io *io) |
@@ -92,7 +92,7 @@ static void write_moving(struct closure *cl) | |||
92 | struct moving_io *io = container_of(cl, struct moving_io, cl); | 92 | struct moving_io *io = container_of(cl, struct moving_io, cl); |
93 | struct data_insert_op *op = &io->op; | 93 | struct data_insert_op *op = &io->op; |
94 | 94 | ||
95 | if (!op->error) { | 95 | if (!op->status) { |
96 | moving_init(io); | 96 | moving_init(io); |
97 | 97 | ||
98 | io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); | 98 | io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 709c9cc34369..019b3df9f1c6 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl) | |||
81 | if (ret == -ESRCH) { | 81 | if (ret == -ESRCH) { |
82 | op->replace_collision = true; | 82 | op->replace_collision = true; |
83 | } else if (ret) { | 83 | } else if (ret) { |
84 | op->error = -ENOMEM; | 84 | op->status = BLK_STS_RESOURCE; |
85 | op->insert_data_done = true; | 85 | op->insert_data_done = true; |
86 | } | 86 | } |
87 | 87 | ||
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio) | |||
178 | struct closure *cl = bio->bi_private; | 178 | struct closure *cl = bio->bi_private; |
179 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 179 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
180 | 180 | ||
181 | if (bio->bi_error) { | 181 | if (bio->bi_status) { |
182 | /* TODO: We could try to recover from this. */ | 182 | /* TODO: We could try to recover from this. */ |
183 | if (op->writeback) | 183 | if (op->writeback) |
184 | op->error = bio->bi_error; | 184 | op->status = bio->bi_status; |
185 | else if (!op->replace) | 185 | else if (!op->replace) |
186 | set_closure_fn(cl, bch_data_insert_error, op->wq); | 186 | set_closure_fn(cl, bch_data_insert_error, op->wq); |
187 | else | 187 | else |
188 | set_closure_fn(cl, NULL, NULL); | 188 | set_closure_fn(cl, NULL, NULL); |
189 | } | 189 | } |
190 | 190 | ||
191 | bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); | 191 | bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void bch_data_insert_start(struct closure *cl) | 194 | static void bch_data_insert_start(struct closure *cl) |
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio) | |||
488 | * from the backing device. | 488 | * from the backing device. |
489 | */ | 489 | */ |
490 | 490 | ||
491 | if (bio->bi_error) | 491 | if (bio->bi_status) |
492 | s->iop.error = bio->bi_error; | 492 | s->iop.status = bio->bi_status; |
493 | else if (!KEY_DIRTY(&b->key) && | 493 | else if (!KEY_DIRTY(&b->key) && |
494 | ptr_stale(s->iop.c, &b->key, 0)) { | 494 | ptr_stale(s->iop.c, &b->key, 0)) { |
495 | atomic_long_inc(&s->iop.c->cache_read_races); | 495 | atomic_long_inc(&s->iop.c->cache_read_races); |
496 | s->iop.error = -EINTR; | 496 | s->iop.status = BLK_STS_IOERR; |
497 | } | 497 | } |
498 | 498 | ||
499 | bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); | 499 | bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); |
500 | } | 500 | } |
501 | 501 | ||
502 | /* | 502 | /* |
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio) | |||
593 | { | 593 | { |
594 | struct closure *cl = bio->bi_private; | 594 | struct closure *cl = bio->bi_private; |
595 | 595 | ||
596 | if (bio->bi_error) { | 596 | if (bio->bi_status) { |
597 | struct search *s = container_of(cl, struct search, cl); | 597 | struct search *s = container_of(cl, struct search, cl); |
598 | s->iop.error = bio->bi_error; | 598 | s->iop.status = bio->bi_status; |
599 | /* Only cache read errors are recoverable */ | 599 | /* Only cache read errors are recoverable */ |
600 | s->recoverable = false; | 600 | s->recoverable = false; |
601 | } | 601 | } |
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s) | |||
611 | &s->d->disk->part0, s->start_time); | 611 | &s->d->disk->part0, s->start_time); |
612 | 612 | ||
613 | trace_bcache_request_end(s->d, s->orig_bio); | 613 | trace_bcache_request_end(s->d, s->orig_bio); |
614 | s->orig_bio->bi_error = s->iop.error; | 614 | s->orig_bio->bi_status = s->iop.status; |
615 | bio_endio(s->orig_bio); | 615 | bio_endio(s->orig_bio); |
616 | s->orig_bio = NULL; | 616 | s->orig_bio = NULL; |
617 | } | 617 | } |
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
664 | s->iop.inode = d->id; | 664 | s->iop.inode = d->id; |
665 | s->iop.write_point = hash_long((unsigned long) current, 16); | 665 | s->iop.write_point = hash_long((unsigned long) current, 16); |
666 | s->iop.write_prio = 0; | 666 | s->iop.write_prio = 0; |
667 | s->iop.error = 0; | 667 | s->iop.status = 0; |
668 | s->iop.flags = 0; | 668 | s->iop.flags = 0; |
669 | s->iop.flush_journal = op_is_flush(bio->bi_opf); | 669 | s->iop.flush_journal = op_is_flush(bio->bi_opf); |
670 | s->iop.wq = bcache_wq; | 670 | s->iop.wq = bcache_wq; |
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl) | |||
707 | /* Retry from the backing device: */ | 707 | /* Retry from the backing device: */ |
708 | trace_bcache_read_retry(s->orig_bio); | 708 | trace_bcache_read_retry(s->orig_bio); |
709 | 709 | ||
710 | s->iop.error = 0; | 710 | s->iop.status = 0; |
711 | do_bio_hook(s, s->orig_bio); | 711 | do_bio_hook(s, s->orig_bio); |
712 | 712 | ||
713 | /* XXX: invalidate cache */ | 713 | /* XXX: invalidate cache */ |
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl) | |||
767 | !s->cache_miss, s->iop.bypass); | 767 | !s->cache_miss, s->iop.bypass); |
768 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); | 768 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); |
769 | 769 | ||
770 | if (s->iop.error) | 770 | if (s->iop.status) |
771 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); | 771 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
772 | else if (s->iop.bio || verify(dc, &s->bio.bio)) | 772 | else if (s->iop.bio || verify(dc, &s->bio.bio)) |
773 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); | 773 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 1ff36875c2b3..7689176951ce 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h | |||
@@ -10,7 +10,7 @@ struct data_insert_op { | |||
10 | unsigned inode; | 10 | unsigned inode; |
11 | uint16_t write_point; | 11 | uint16_t write_point; |
12 | uint16_t write_prio; | 12 | uint16_t write_prio; |
13 | short error; | 13 | blk_status_t status; |
14 | 14 | ||
15 | union { | 15 | union { |
16 | uint16_t flags; | 16 | uint16_t flags; |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e57353e39168..fbc4f5412dec 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio) | |||
271 | { | 271 | { |
272 | struct cache *ca = bio->bi_private; | 272 | struct cache *ca = bio->bi_private; |
273 | 273 | ||
274 | bch_count_io_errors(ca, bio->bi_error, "writing superblock"); | 274 | bch_count_io_errors(ca, bio->bi_status, "writing superblock"); |
275 | closure_put(&ca->set->sb_write); | 275 | closure_put(&ca->set->sb_write); |
276 | } | 276 | } |
277 | 277 | ||
@@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio) | |||
321 | struct closure *cl = bio->bi_private; | 321 | struct closure *cl = bio->bi_private; |
322 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); | 322 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
323 | 323 | ||
324 | cache_set_err_on(bio->bi_error, c, "accessing uuids"); | 324 | cache_set_err_on(bio->bi_status, c, "accessing uuids"); |
325 | bch_bbio_free(bio, c); | 325 | bch_bbio_free(bio, c); |
326 | closure_put(cl); | 326 | closure_put(cl); |
327 | } | 327 | } |
@@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio) | |||
494 | { | 494 | { |
495 | struct cache *ca = bio->bi_private; | 495 | struct cache *ca = bio->bi_private; |
496 | 496 | ||
497 | cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); | 497 | cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); |
498 | bch_bbio_free(bio, ca->set); | 498 | bch_bbio_free(bio, ca->set); |
499 | closure_put(&ca->prio); | 499 | closure_put(&ca->prio); |
500 | } | 500 | } |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6ac2e48b9235..42c66e76f05e 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio) | |||
167 | struct keybuf_key *w = bio->bi_private; | 167 | struct keybuf_key *w = bio->bi_private; |
168 | struct dirty_io *io = w->private; | 168 | struct dirty_io *io = w->private; |
169 | 169 | ||
170 | if (bio->bi_error) | 170 | if (bio->bi_status) |
171 | SET_KEY_DIRTY(&w->key, false); | 171 | SET_KEY_DIRTY(&w->key, false); |
172 | 172 | ||
173 | closure_put(&io->cl); | 173 | closure_put(&io->cl); |
@@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio) | |||
195 | struct dirty_io *io = w->private; | 195 | struct dirty_io *io = w->private; |
196 | 196 | ||
197 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), | 197 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), |
198 | bio->bi_error, "reading dirty data from cache"); | 198 | bio->bi_status, "reading dirty data from cache"); |
199 | 199 | ||
200 | dirty_endio(bio); | 200 | dirty_endio(bio); |
201 | } | 201 | } |
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index ae7da2c30a57..82d27384d31f 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c | |||
@@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, | |||
229 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); | 229 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); |
230 | 230 | ||
231 | void dm_cell_error(struct dm_bio_prison *prison, | 231 | void dm_cell_error(struct dm_bio_prison *prison, |
232 | struct dm_bio_prison_cell *cell, int error) | 232 | struct dm_bio_prison_cell *cell, blk_status_t error) |
233 | { | 233 | { |
234 | struct bio_list bios; | 234 | struct bio_list bios; |
235 | struct bio *bio; | 235 | struct bio *bio; |
@@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison, | |||
238 | dm_cell_release(prison, cell, &bios); | 238 | dm_cell_release(prison, cell, &bios); |
239 | 239 | ||
240 | while ((bio = bio_list_pop(&bios))) { | 240 | while ((bio = bio_list_pop(&bios))) { |
241 | bio->bi_error = error; | 241 | bio->bi_status = error; |
242 | bio_endio(bio); | 242 | bio_endio(bio); |
243 | } | 243 | } |
244 | } | 244 | } |
diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h index cddd4ac07e2c..cec52ac5e1ae 100644 --- a/drivers/md/dm-bio-prison-v1.h +++ b/drivers/md/dm-bio-prison-v1.h | |||
@@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, | |||
91 | struct dm_bio_prison_cell *cell, | 91 | struct dm_bio_prison_cell *cell, |
92 | struct bio_list *inmates); | 92 | struct bio_list *inmates); |
93 | void dm_cell_error(struct dm_bio_prison *prison, | 93 | void dm_cell_error(struct dm_bio_prison *prison, |
94 | struct dm_bio_prison_cell *cell, int error); | 94 | struct dm_bio_prison_cell *cell, blk_status_t error); |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Visits the cell and then releases. Guarantees no new inmates are | 97 | * Visits the cell and then releases. Guarantees no new inmates are |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd8139593ccd..0902d2fd1743 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -145,8 +145,8 @@ struct dm_buffer { | |||
145 | enum data_mode data_mode; | 145 | enum data_mode data_mode; |
146 | unsigned char list_mode; /* LIST_* */ | 146 | unsigned char list_mode; /* LIST_* */ |
147 | unsigned hold_count; | 147 | unsigned hold_count; |
148 | int read_error; | 148 | blk_status_t read_error; |
149 | int write_error; | 149 | blk_status_t write_error; |
150 | unsigned long state; | 150 | unsigned long state; |
151 | unsigned long last_accessed; | 151 | unsigned long last_accessed; |
152 | struct dm_bufio_client *c; | 152 | struct dm_bufio_client *c; |
@@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context) | |||
555 | { | 555 | { |
556 | struct dm_buffer *b = context; | 556 | struct dm_buffer *b = context; |
557 | 557 | ||
558 | b->bio.bi_error = error ? -EIO : 0; | 558 | b->bio.bi_status = error ? BLK_STS_IOERR : 0; |
559 | b->bio.bi_end_io(&b->bio); | 559 | b->bio.bi_end_io(&b->bio); |
560 | } | 560 | } |
561 | 561 | ||
@@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | |||
588 | 588 | ||
589 | r = dm_io(&io_req, 1, ®ion, NULL); | 589 | r = dm_io(&io_req, 1, ®ion, NULL); |
590 | if (r) { | 590 | if (r) { |
591 | b->bio.bi_error = r; | 591 | b->bio.bi_status = errno_to_blk_status(r); |
592 | end_io(&b->bio); | 592 | end_io(&b->bio); |
593 | } | 593 | } |
594 | } | 594 | } |
@@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, | |||
596 | static void inline_endio(struct bio *bio) | 596 | static void inline_endio(struct bio *bio) |
597 | { | 597 | { |
598 | bio_end_io_t *end_fn = bio->bi_private; | 598 | bio_end_io_t *end_fn = bio->bi_private; |
599 | int error = bio->bi_error; | 599 | blk_status_t status = bio->bi_status; |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Reset the bio to free any attached resources | 602 | * Reset the bio to free any attached resources |
@@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio) | |||
604 | */ | 604 | */ |
605 | bio_reset(bio); | 605 | bio_reset(bio); |
606 | 606 | ||
607 | bio->bi_error = error; | 607 | bio->bi_status = status; |
608 | end_fn(bio); | 608 | end_fn(bio); |
609 | } | 609 | } |
610 | 610 | ||
@@ -685,11 +685,12 @@ static void write_endio(struct bio *bio) | |||
685 | { | 685 | { |
686 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | 686 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); |
687 | 687 | ||
688 | b->write_error = bio->bi_error; | 688 | b->write_error = bio->bi_status; |
689 | if (unlikely(bio->bi_error)) { | 689 | if (unlikely(bio->bi_status)) { |
690 | struct dm_bufio_client *c = b->c; | 690 | struct dm_bufio_client *c = b->c; |
691 | int error = bio->bi_error; | 691 | |
692 | (void)cmpxchg(&c->async_write_error, 0, error); | 692 | (void)cmpxchg(&c->async_write_error, 0, |
693 | blk_status_to_errno(bio->bi_status)); | ||
693 | } | 694 | } |
694 | 695 | ||
695 | BUG_ON(!test_bit(B_WRITING, &b->state)); | 696 | BUG_ON(!test_bit(B_WRITING, &b->state)); |
@@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio) | |||
1063 | { | 1064 | { |
1064 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | 1065 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); |
1065 | 1066 | ||
1066 | b->read_error = bio->bi_error; | 1067 | b->read_error = bio->bi_status; |
1067 | 1068 | ||
1068 | BUG_ON(!test_bit(B_READING, &b->state)); | 1069 | BUG_ON(!test_bit(B_READING, &b->state)); |
1069 | 1070 | ||
@@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
1107 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); | 1108 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1108 | 1109 | ||
1109 | if (b->read_error) { | 1110 | if (b->read_error) { |
1110 | int error = b->read_error; | 1111 | int error = blk_status_to_errno(b->read_error); |
1111 | 1112 | ||
1112 | dm_bufio_release(b); | 1113 | dm_bufio_release(b); |
1113 | 1114 | ||
@@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | |||
1257 | */ | 1258 | */ |
1258 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) | 1259 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
1259 | { | 1260 | { |
1260 | int a, f; | 1261 | blk_status_t a; |
1262 | int f; | ||
1261 | unsigned long buffers_processed = 0; | 1263 | unsigned long buffers_processed = 0; |
1262 | struct dm_buffer *b, *tmp; | 1264 | struct dm_buffer *b, *tmp; |
1263 | 1265 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index c48612e6d525..c5ea03fc7ee1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) | |||
119 | */ | 119 | */ |
120 | struct continuation { | 120 | struct continuation { |
121 | struct work_struct ws; | 121 | struct work_struct ws; |
122 | int input; | 122 | blk_status_t input; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | static inline void init_continuation(struct continuation *k, | 125 | static inline void init_continuation(struct continuation *k, |
@@ -145,7 +145,7 @@ struct batcher { | |||
145 | /* | 145 | /* |
146 | * The operation that everyone is waiting for. | 146 | * The operation that everyone is waiting for. |
147 | */ | 147 | */ |
148 | int (*commit_op)(void *context); | 148 | blk_status_t (*commit_op)(void *context); |
149 | void *commit_context; | 149 | void *commit_context; |
150 | 150 | ||
151 | /* | 151 | /* |
@@ -171,8 +171,7 @@ struct batcher { | |||
171 | static void __commit(struct work_struct *_ws) | 171 | static void __commit(struct work_struct *_ws) |
172 | { | 172 | { |
173 | struct batcher *b = container_of(_ws, struct batcher, commit_work); | 173 | struct batcher *b = container_of(_ws, struct batcher, commit_work); |
174 | 174 | blk_status_t r; | |
175 | int r; | ||
176 | unsigned long flags; | 175 | unsigned long flags; |
177 | struct list_head work_items; | 176 | struct list_head work_items; |
178 | struct work_struct *ws, *tmp; | 177 | struct work_struct *ws, *tmp; |
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws) | |||
205 | 204 | ||
206 | while ((bio = bio_list_pop(&bios))) { | 205 | while ((bio = bio_list_pop(&bios))) { |
207 | if (r) { | 206 | if (r) { |
208 | bio->bi_error = r; | 207 | bio->bi_status = r; |
209 | bio_endio(bio); | 208 | bio_endio(bio); |
210 | } else | 209 | } else |
211 | b->issue_op(bio, b->issue_context); | 210 | b->issue_op(bio, b->issue_context); |
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws) | |||
213 | } | 212 | } |
214 | 213 | ||
215 | static void batcher_init(struct batcher *b, | 214 | static void batcher_init(struct batcher *b, |
216 | int (*commit_op)(void *), | 215 | blk_status_t (*commit_op)(void *), |
217 | void *commit_context, | 216 | void *commit_context, |
218 | void (*issue_op)(struct bio *bio, void *), | 217 | void (*issue_op)(struct bio *bio, void *), |
219 | void *issue_context, | 218 | void *issue_context, |
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio) | |||
955 | 954 | ||
956 | dm_unhook_bio(&pb->hook_info, bio); | 955 | dm_unhook_bio(&pb->hook_info, bio); |
957 | 956 | ||
958 | if (bio->bi_error) { | 957 | if (bio->bi_status) { |
959 | bio_endio(bio); | 958 | bio_endio(bio); |
960 | return; | 959 | return; |
961 | } | 960 | } |
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
1220 | struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); | 1219 | struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); |
1221 | 1220 | ||
1222 | if (read_err || write_err) | 1221 | if (read_err || write_err) |
1223 | mg->k.input = -EIO; | 1222 | mg->k.input = BLK_STS_IOERR; |
1224 | 1223 | ||
1225 | queue_continuation(mg->cache->wq, &mg->k); | 1224 | queue_continuation(mg->cache->wq, &mg->k); |
1226 | } | 1225 | } |
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio) | |||
1266 | 1265 | ||
1267 | dm_unhook_bio(&pb->hook_info, bio); | 1266 | dm_unhook_bio(&pb->hook_info, bio); |
1268 | 1267 | ||
1269 | if (bio->bi_error) | 1268 | if (bio->bi_status) |
1270 | mg->k.input = bio->bi_error; | 1269 | mg->k.input = bio->bi_status; |
1271 | 1270 | ||
1272 | queue_continuation(mg->cache->wq, &mg->k); | 1271 | queue_continuation(mg->cache->wq, &mg->k); |
1273 | } | 1272 | } |
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success) | |||
1323 | if (mg->overwrite_bio) { | 1322 | if (mg->overwrite_bio) { |
1324 | if (success) | 1323 | if (success) |
1325 | force_set_dirty(cache, cblock); | 1324 | force_set_dirty(cache, cblock); |
1325 | else if (mg->k.input) | ||
1326 | mg->overwrite_bio->bi_status = mg->k.input; | ||
1326 | else | 1327 | else |
1327 | mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); | 1328 | mg->overwrite_bio->bi_status = BLK_STS_IOERR; |
1328 | bio_endio(mg->overwrite_bio); | 1329 | bio_endio(mg->overwrite_bio); |
1329 | } else { | 1330 | } else { |
1330 | if (success) | 1331 | if (success) |
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws) | |||
1504 | r = copy(mg, is_policy_promote); | 1505 | r = copy(mg, is_policy_promote); |
1505 | if (r) { | 1506 | if (r) { |
1506 | DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); | 1507 | DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); |
1507 | mg->k.input = -EIO; | 1508 | mg->k.input = BLK_STS_IOERR; |
1508 | mg_complete(mg, false); | 1509 | mg_complete(mg, false); |
1509 | } | 1510 | } |
1510 | } | 1511 | } |
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown) | |||
1907 | /* | 1908 | /* |
1908 | * Used by the batcher. | 1909 | * Used by the batcher. |
1909 | */ | 1910 | */ |
1910 | static int commit_op(void *context) | 1911 | static blk_status_t commit_op(void *context) |
1911 | { | 1912 | { |
1912 | struct cache *cache = context; | 1913 | struct cache *cache = context; |
1913 | 1914 | ||
1914 | if (dm_cache_changed_this_transaction(cache->cmd)) | 1915 | if (dm_cache_changed_this_transaction(cache->cmd)) |
1915 | return commit(cache, false); | 1916 | return errno_to_blk_status(commit(cache, false)); |
1916 | 1917 | ||
1917 | return 0; | 1918 | return 0; |
1918 | } | 1919 | } |
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache) | |||
2018 | bio_list_init(&cache->deferred_bios); | 2019 | bio_list_init(&cache->deferred_bios); |
2019 | 2020 | ||
2020 | while ((bio = bio_list_pop(&bios))) { | 2021 | while ((bio = bio_list_pop(&bios))) { |
2021 | bio->bi_error = DM_ENDIO_REQUEUE; | 2022 | bio->bi_status = BLK_STS_DM_REQUEUE; |
2022 | bio_endio(bio); | 2023 | bio_endio(bio); |
2023 | } | 2024 | } |
2024 | } | 2025 | } |
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2820 | return r; | 2821 | return r; |
2821 | } | 2822 | } |
2822 | 2823 | ||
2823 | static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) | 2824 | static int cache_end_io(struct dm_target *ti, struct bio *bio, |
2825 | blk_status_t *error) | ||
2824 | { | 2826 | { |
2825 | struct cache *cache = ti->private; | 2827 | struct cache *cache = ti->private; |
2826 | unsigned long flags; | 2828 | unsigned long flags; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f4b51809db21..586cef085c6a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -71,7 +71,7 @@ struct dm_crypt_io { | |||
71 | struct convert_context ctx; | 71 | struct convert_context ctx; |
72 | 72 | ||
73 | atomic_t io_pending; | 73 | atomic_t io_pending; |
74 | int error; | 74 | blk_status_t error; |
75 | sector_t sector; | 75 | sector_t sector; |
76 | 76 | ||
77 | struct rb_node rb_node; | 77 | struct rb_node rb_node; |
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ | |||
1292 | /* | 1292 | /* |
1293 | * Encrypt / decrypt data from one bio to another one (can be the same one) | 1293 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
1294 | */ | 1294 | */ |
1295 | static int crypt_convert(struct crypt_config *cc, | 1295 | static blk_status_t crypt_convert(struct crypt_config *cc, |
1296 | struct convert_context *ctx) | 1296 | struct convert_context *ctx) |
1297 | { | 1297 | { |
1298 | unsigned int tag_offset = 0; | 1298 | unsigned int tag_offset = 0; |
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc, | |||
1343 | */ | 1343 | */ |
1344 | case -EBADMSG: | 1344 | case -EBADMSG: |
1345 | atomic_dec(&ctx->cc_pending); | 1345 | atomic_dec(&ctx->cc_pending); |
1346 | return -EILSEQ; | 1346 | return BLK_STS_PROTECTION; |
1347 | /* | 1347 | /* |
1348 | * There was an error while processing the request. | 1348 | * There was an error while processing the request. |
1349 | */ | 1349 | */ |
1350 | default: | 1350 | default: |
1351 | atomic_dec(&ctx->cc_pending); | 1351 | atomic_dec(&ctx->cc_pending); |
1352 | return -EIO; | 1352 | return BLK_STS_IOERR; |
1353 | } | 1353 | } |
1354 | } | 1354 | } |
1355 | 1355 | ||
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1463 | { | 1463 | { |
1464 | struct crypt_config *cc = io->cc; | 1464 | struct crypt_config *cc = io->cc; |
1465 | struct bio *base_bio = io->base_bio; | 1465 | struct bio *base_bio = io->base_bio; |
1466 | int error = io->error; | 1466 | blk_status_t error = io->error; |
1467 | 1467 | ||
1468 | if (!atomic_dec_and_test(&io->io_pending)) | 1468 | if (!atomic_dec_and_test(&io->io_pending)) |
1469 | return; | 1469 | return; |
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1476 | else | 1476 | else |
1477 | kfree(io->integrity_metadata); | 1477 | kfree(io->integrity_metadata); |
1478 | 1478 | ||
1479 | base_bio->bi_error = error; | 1479 | base_bio->bi_status = error; |
1480 | bio_endio(base_bio); | 1480 | bio_endio(base_bio); |
1481 | } | 1481 | } |
1482 | 1482 | ||
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone) | |||
1502 | struct dm_crypt_io *io = clone->bi_private; | 1502 | struct dm_crypt_io *io = clone->bi_private; |
1503 | struct crypt_config *cc = io->cc; | 1503 | struct crypt_config *cc = io->cc; |
1504 | unsigned rw = bio_data_dir(clone); | 1504 | unsigned rw = bio_data_dir(clone); |
1505 | int error; | 1505 | blk_status_t error; |
1506 | 1506 | ||
1507 | /* | 1507 | /* |
1508 | * free the processed pages | 1508 | * free the processed pages |
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone) | |||
1510 | if (rw == WRITE) | 1510 | if (rw == WRITE) |
1511 | crypt_free_buffer_pages(cc, clone); | 1511 | crypt_free_buffer_pages(cc, clone); |
1512 | 1512 | ||
1513 | error = clone->bi_error; | 1513 | error = clone->bi_status; |
1514 | bio_put(clone); | 1514 | bio_put(clone); |
1515 | 1515 | ||
1516 | if (rw == READ && !error) { | 1516 | if (rw == READ && !error) { |
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work) | |||
1570 | 1570 | ||
1571 | crypt_inc_pending(io); | 1571 | crypt_inc_pending(io); |
1572 | if (kcryptd_io_read(io, GFP_NOIO)) | 1572 | if (kcryptd_io_read(io, GFP_NOIO)) |
1573 | io->error = -ENOMEM; | 1573 | io->error = BLK_STS_RESOURCE; |
1574 | crypt_dec_pending(io); | 1574 | crypt_dec_pending(io); |
1575 | } | 1575 | } |
1576 | 1576 | ||
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |||
1656 | sector_t sector; | 1656 | sector_t sector; |
1657 | struct rb_node **rbp, *parent; | 1657 | struct rb_node **rbp, *parent; |
1658 | 1658 | ||
1659 | if (unlikely(io->error < 0)) { | 1659 | if (unlikely(io->error)) { |
1660 | crypt_free_buffer_pages(cc, clone); | 1660 | crypt_free_buffer_pages(cc, clone); |
1661 | bio_put(clone); | 1661 | bio_put(clone); |
1662 | crypt_dec_pending(io); | 1662 | crypt_dec_pending(io); |
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1697 | struct bio *clone; | 1697 | struct bio *clone; |
1698 | int crypt_finished; | 1698 | int crypt_finished; |
1699 | sector_t sector = io->sector; | 1699 | sector_t sector = io->sector; |
1700 | int r; | 1700 | blk_status_t r; |
1701 | 1701 | ||
1702 | /* | 1702 | /* |
1703 | * Prevent io from disappearing until this function completes. | 1703 | * Prevent io from disappearing until this function completes. |
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1707 | 1707 | ||
1708 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); | 1708 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); |
1709 | if (unlikely(!clone)) { | 1709 | if (unlikely(!clone)) { |
1710 | io->error = -EIO; | 1710 | io->error = BLK_STS_IOERR; |
1711 | goto dec; | 1711 | goto dec; |
1712 | } | 1712 | } |
1713 | 1713 | ||
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1718 | 1718 | ||
1719 | crypt_inc_pending(io); | 1719 | crypt_inc_pending(io); |
1720 | r = crypt_convert(cc, &io->ctx); | 1720 | r = crypt_convert(cc, &io->ctx); |
1721 | if (r < 0) | 1721 | if (r) |
1722 | io->error = r; | 1722 | io->error = r; |
1723 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); | 1723 | crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); |
1724 | 1724 | ||
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) | |||
1740 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | 1740 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
1741 | { | 1741 | { |
1742 | struct crypt_config *cc = io->cc; | 1742 | struct crypt_config *cc = io->cc; |
1743 | int r = 0; | 1743 | blk_status_t r; |
1744 | 1744 | ||
1745 | crypt_inc_pending(io); | 1745 | crypt_inc_pending(io); |
1746 | 1746 | ||
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |||
1748 | io->sector); | 1748 | io->sector); |
1749 | 1749 | ||
1750 | r = crypt_convert(cc, &io->ctx); | 1750 | r = crypt_convert(cc, &io->ctx); |
1751 | if (r < 0) | 1751 | if (r) |
1752 | io->error = r; | 1752 | io->error = r; |
1753 | 1753 | ||
1754 | if (atomic_dec_and_test(&io->ctx.cc_pending)) | 1754 | if (atomic_dec_and_test(&io->ctx.cc_pending)) |
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1781 | if (error == -EBADMSG) { | 1781 | if (error == -EBADMSG) { |
1782 | DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", | 1782 | DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
1783 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); | 1783 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); |
1784 | io->error = -EILSEQ; | 1784 | io->error = BLK_STS_PROTECTION; |
1785 | } else if (error < 0) | 1785 | } else if (error < 0) |
1786 | io->error = -EIO; | 1786 | io->error = BLK_STS_IOERR; |
1787 | 1787 | ||
1788 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | 1788 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
1789 | 1789 | ||
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c9539917a59b..3d04d5ce19d9 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -358,7 +358,8 @@ map_bio: | |||
358 | return DM_MAPIO_REMAPPED; | 358 | return DM_MAPIO_REMAPPED; |
359 | } | 359 | } |
360 | 360 | ||
361 | static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) | 361 | static int flakey_end_io(struct dm_target *ti, struct bio *bio, |
362 | blk_status_t *error) | ||
362 | { | 363 | { |
363 | struct flakey_c *fc = ti->private; | 364 | struct flakey_c *fc = ti->private; |
364 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 365 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
@@ -377,7 +378,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
377 | * Error read during the down_interval if drop_writes | 378 | * Error read during the down_interval if drop_writes |
378 | * and error_writes were not configured. | 379 | * and error_writes were not configured. |
379 | */ | 380 | */ |
380 | *error = -EIO; | 381 | *error = BLK_STS_IOERR; |
381 | } | 382 | } |
382 | } | 383 | } |
383 | 384 | ||
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ee78fb471229..ccc6ef4d00b9 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -246,7 +246,7 @@ struct dm_integrity_io { | |||
246 | unsigned metadata_offset; | 246 | unsigned metadata_offset; |
247 | 247 | ||
248 | atomic_t in_flight; | 248 | atomic_t in_flight; |
249 | int bi_error; | 249 | blk_status_t bi_status; |
250 | 250 | ||
251 | struct completion *completion; | 251 | struct completion *completion; |
252 | 252 | ||
@@ -1114,8 +1114,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io * | |||
1114 | static void do_endio(struct dm_integrity_c *ic, struct bio *bio) | 1114 | static void do_endio(struct dm_integrity_c *ic, struct bio *bio) |
1115 | { | 1115 | { |
1116 | int r = dm_integrity_failed(ic); | 1116 | int r = dm_integrity_failed(ic); |
1117 | if (unlikely(r) && !bio->bi_error) | 1117 | if (unlikely(r) && !bio->bi_status) |
1118 | bio->bi_error = r; | 1118 | bio->bi_status = errno_to_blk_status(r); |
1119 | bio_endio(bio); | 1119 | bio_endio(bio); |
1120 | } | 1120 | } |
1121 | 1121 | ||
@@ -1123,7 +1123,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di | |||
1123 | { | 1123 | { |
1124 | struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1124 | struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
1125 | 1125 | ||
1126 | if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) | 1126 | if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) |
1127 | submit_flush_bio(ic, dio); | 1127 | submit_flush_bio(ic, dio); |
1128 | else | 1128 | else |
1129 | do_endio(ic, bio); | 1129 | do_endio(ic, bio); |
@@ -1142,9 +1142,9 @@ static void dec_in_flight(struct dm_integrity_io *dio) | |||
1142 | 1142 | ||
1143 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1143 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
1144 | 1144 | ||
1145 | if (unlikely(dio->bi_error) && !bio->bi_error) | 1145 | if (unlikely(dio->bi_status) && !bio->bi_status) |
1146 | bio->bi_error = dio->bi_error; | 1146 | bio->bi_status = dio->bi_status; |
1147 | if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { | 1147 | if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { |
1148 | dio->range.logical_sector += dio->range.n_sectors; | 1148 | dio->range.logical_sector += dio->range.n_sectors; |
1149 | bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); | 1149 | bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); |
1150 | INIT_WORK(&dio->work, integrity_bio_wait); | 1150 | INIT_WORK(&dio->work, integrity_bio_wait); |
@@ -1318,7 +1318,7 @@ skip_io: | |||
1318 | dec_in_flight(dio); | 1318 | dec_in_flight(dio); |
1319 | return; | 1319 | return; |
1320 | error: | 1320 | error: |
1321 | dio->bi_error = r; | 1321 | dio->bi_status = errno_to_blk_status(r); |
1322 | dec_in_flight(dio); | 1322 | dec_in_flight(dio); |
1323 | } | 1323 | } |
1324 | 1324 | ||
@@ -1331,7 +1331,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) | |||
1331 | sector_t area, offset; | 1331 | sector_t area, offset; |
1332 | 1332 | ||
1333 | dio->ic = ic; | 1333 | dio->ic = ic; |
1334 | dio->bi_error = 0; | 1334 | dio->bi_status = 0; |
1335 | 1335 | ||
1336 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { | 1336 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
1337 | submit_flush_bio(ic, dio); | 1337 | submit_flush_bio(ic, dio); |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..c8f8f3004085 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -124,7 +124,7 @@ static void complete_io(struct io *io) | |||
124 | fn(error_bits, context); | 124 | fn(error_bits, context); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void dec_count(struct io *io, unsigned int region, int error) | 127 | static void dec_count(struct io *io, unsigned int region, blk_status_t error) |
128 | { | 128 | { |
129 | if (error) | 129 | if (error) |
130 | set_bit(region, &io->error_bits); | 130 | set_bit(region, &io->error_bits); |
@@ -137,9 +137,9 @@ static void endio(struct bio *bio) | |||
137 | { | 137 | { |
138 | struct io *io; | 138 | struct io *io; |
139 | unsigned region; | 139 | unsigned region; |
140 | int error; | 140 | blk_status_t error; |
141 | 141 | ||
142 | if (bio->bi_error && bio_data_dir(bio) == READ) | 142 | if (bio->bi_status && bio_data_dir(bio) == READ) |
143 | zero_fill_bio(bio); | 143 | zero_fill_bio(bio); |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -147,7 +147,7 @@ static void endio(struct bio *bio) | |||
147 | */ | 147 | */ |
148 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | 148 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
149 | 149 | ||
150 | error = bio->bi_error; | 150 | error = bio->bi_status; |
151 | bio_put(bio); | 151 | bio_put(bio); |
152 | 152 | ||
153 | dec_count(io, region, error); | 153 | dec_count(io, region, error); |
@@ -319,7 +319,7 @@ static void do_region(int op, int op_flags, unsigned region, | |||
319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || | 319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || |
320 | op == REQ_OP_WRITE_SAME) && | 320 | op == REQ_OP_WRITE_SAME) && |
321 | special_cmd_max_sectors == 0) { | 321 | special_cmd_max_sectors == 0) { |
322 | dec_count(io, region, -EOPNOTSUPP); | 322 | dec_count(io, region, BLK_STS_NOTSUPP); |
323 | return; | 323 | return; |
324 | } | 324 | } |
325 | 325 | ||
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index cc57c7fa1268..a1da0eb58a93 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio) | |||
150 | { | 150 | { |
151 | struct log_writes_c *lc = bio->bi_private; | 151 | struct log_writes_c *lc = bio->bi_private; |
152 | 152 | ||
153 | if (bio->bi_error) { | 153 | if (bio->bi_status) { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | 155 | ||
156 | DMERR("Error writing log block, error=%d", bio->bi_error); | 156 | DMERR("Error writing log block, error=%d", bio->bi_status); |
157 | spin_lock_irqsave(&lc->blocks_lock, flags); | 157 | spin_lock_irqsave(&lc->blocks_lock, flags); |
158 | lc->logging_enabled = false; | 158 | lc->logging_enabled = false; |
159 | spin_unlock_irqrestore(&lc->blocks_lock, flags); | 159 | spin_unlock_irqrestore(&lc->blocks_lock, flags); |
@@ -664,7 +664,8 @@ map_bio: | |||
664 | return DM_MAPIO_REMAPPED; | 664 | return DM_MAPIO_REMAPPED; |
665 | } | 665 | } |
666 | 666 | ||
667 | static int normal_end_io(struct dm_target *ti, struct bio *bio, int *error) | 667 | static int normal_end_io(struct dm_target *ti, struct bio *bio, |
668 | blk_status_t *error) | ||
668 | { | 669 | { |
669 | struct log_writes_c *lc = ti->private; | 670 | struct log_writes_c *lc = ti->private; |
670 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 671 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 39262e344ae1..a7d2e0840cc5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -565,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
565 | mpio->pgpath = pgpath; | 565 | mpio->pgpath = pgpath; |
566 | mpio->nr_bytes = nr_bytes; | 566 | mpio->nr_bytes = nr_bytes; |
567 | 567 | ||
568 | bio->bi_error = 0; | 568 | bio->bi_status = 0; |
569 | bio->bi_bdev = pgpath->path.dev->bdev; | 569 | bio->bi_bdev = pgpath->path.dev->bdev; |
570 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; | 570 | bio->bi_opf |= REQ_FAILFAST_TRANSPORT; |
571 | 571 | ||
@@ -623,10 +623,10 @@ static void process_queued_bios(struct work_struct *work) | |||
623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); | 623 | r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); |
624 | switch (r) { | 624 | switch (r) { |
625 | case DM_MAPIO_KILL: | 625 | case DM_MAPIO_KILL: |
626 | r = -EIO; | 626 | bio->bi_status = BLK_STS_IOERR; |
627 | /*FALLTHRU*/ | 627 | bio_endio(bio); |
628 | case DM_MAPIO_REQUEUE: | 628 | case DM_MAPIO_REQUEUE: |
629 | bio->bi_error = r; | 629 | bio->bi_status = BLK_STS_DM_REQUEUE; |
630 | bio_endio(bio); | 630 | bio_endio(bio); |
631 | break; | 631 | break; |
632 | case DM_MAPIO_REMAPPED: | 632 | case DM_MAPIO_REMAPPED: |
@@ -1510,7 +1510,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
1510 | return r; | 1510 | return r; |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *error) | 1513 | static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, |
1514 | blk_status_t *error) | ||
1514 | { | 1515 | { |
1515 | struct multipath *m = ti->private; | 1516 | struct multipath *m = ti->private; |
1516 | struct dm_mpath_io *mpio = get_mpio_from_bio(clone); | 1517 | struct dm_mpath_io *mpio = get_mpio_from_bio(clone); |
@@ -1518,7 +1519,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er | |||
1518 | unsigned long flags; | 1519 | unsigned long flags; |
1519 | int r = DM_ENDIO_DONE; | 1520 | int r = DM_ENDIO_DONE; |
1520 | 1521 | ||
1521 | if (!*error || noretry_error(errno_to_blk_status(*error))) | 1522 | if (!*error || noretry_error(*error)) |
1522 | goto done; | 1523 | goto done; |
1523 | 1524 | ||
1524 | if (pgpath) | 1525 | if (pgpath) |
@@ -1527,7 +1528,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er | |||
1527 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1528 | if (atomic_read(&m->nr_valid_paths) == 0 && |
1528 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1529 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1529 | dm_report_EIO(m); | 1530 | dm_report_EIO(m); |
1530 | *error = -EIO; | 1531 | *error = BLK_STS_IOERR; |
1531 | goto done; | 1532 | goto done; |
1532 | } | 1533 | } |
1533 | 1534 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 77bcf50ce75f..0822e4a6f67d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio) | |||
490 | * If device is suspended, complete the bio. | 490 | * If device is suspended, complete the bio. |
491 | */ | 491 | */ |
492 | if (dm_noflush_suspending(ms->ti)) | 492 | if (dm_noflush_suspending(ms->ti)) |
493 | bio->bi_error = DM_ENDIO_REQUEUE; | 493 | bio->bi_status = BLK_STS_DM_REQUEUE; |
494 | else | 494 | else |
495 | bio->bi_error = -EIO; | 495 | bio->bi_status = BLK_STS_IOERR; |
496 | 496 | ||
497 | bio_endio(bio); | 497 | bio_endio(bio); |
498 | return; | 498 | return; |
@@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context) | |||
626 | * degrade the array. | 626 | * degrade the array. |
627 | */ | 627 | */ |
628 | if (bio_op(bio) == REQ_OP_DISCARD) { | 628 | if (bio_op(bio) == REQ_OP_DISCARD) { |
629 | bio->bi_error = -EOPNOTSUPP; | 629 | bio->bi_status = BLK_STS_NOTSUPP; |
630 | bio_endio(bio); | 630 | bio_endio(bio); |
631 | return; | 631 | return; |
632 | } | 632 | } |
@@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1236 | return DM_MAPIO_REMAPPED; | 1236 | return DM_MAPIO_REMAPPED; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | 1239 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, |
1240 | blk_status_t *error) | ||
1240 | { | 1241 | { |
1241 | int rw = bio_data_dir(bio); | 1242 | int rw = bio_data_dir(bio); |
1242 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 1243 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
@@ -1255,7 +1256,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
1255 | return DM_ENDIO_DONE; | 1256 | return DM_ENDIO_DONE; |
1256 | } | 1257 | } |
1257 | 1258 | ||
1258 | if (*error == -EOPNOTSUPP) | 1259 | if (*error == BLK_STS_NOTSUPP) |
1259 | return DM_ENDIO_DONE; | 1260 | return DM_ENDIO_DONE; |
1260 | 1261 | ||
1261 | if (bio->bi_opf & REQ_RAHEAD) | 1262 | if (bio->bi_opf & REQ_RAHEAD) |
@@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
1277 | bd = &bio_record->details; | 1278 | bd = &bio_record->details; |
1278 | 1279 | ||
1279 | dm_bio_restore(bd, bio); | 1280 | dm_bio_restore(bd, bio); |
1280 | bio->bi_error = 0; | 1281 | bio->bi_status = 0; |
1281 | 1282 | ||
1282 | queue_bio(ms, bio, rw); | 1283 | queue_bio(ms, bio, rw); |
1283 | return DM_ENDIO_INCOMPLETE; | 1284 | return DM_ENDIO_INCOMPLETE; |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 63402f8a38de..fafd5326e572 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone) | |||
119 | struct dm_rq_target_io *tio = info->tio; | 119 | struct dm_rq_target_io *tio = info->tio; |
120 | struct bio *bio = info->orig; | 120 | struct bio *bio = info->orig; |
121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
122 | blk_status_t error = errno_to_blk_status(clone->bi_error); | 122 | blk_status_t error = clone->bi_status; |
123 | 123 | ||
124 | bio_put(clone); | 124 | bio_put(clone); |
125 | 125 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 79a845798e2f..1ba41048b438 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio) | |||
1590 | { | 1590 | { |
1591 | void *callback_data = bio->bi_private; | 1591 | void *callback_data = bio->bi_private; |
1592 | 1592 | ||
1593 | dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); | 1593 | dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static void start_full_bio(struct dm_snap_pending_exception *pe, | 1596 | static void start_full_bio(struct dm_snap_pending_exception *pe, |
@@ -1851,7 +1851,8 @@ out_unlock: | |||
1851 | return r; | 1851 | return r; |
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int *error) | 1854 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1855 | blk_status_t *error) | ||
1855 | { | 1856 | { |
1856 | struct dm_snapshot *s = ti->private; | 1857 | struct dm_snapshot *s = ti->private; |
1857 | 1858 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 49888bc2c909..11621a0af887 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -375,7 +375,8 @@ static void stripe_status(struct dm_target *ti, status_type_t type, | |||
375 | } | 375 | } |
376 | } | 376 | } |
377 | 377 | ||
378 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) | 378 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, |
379 | blk_status_t *error) | ||
379 | { | 380 | { |
380 | unsigned i; | 381 | unsigned i; |
381 | char major_minor[16]; | 382 | char major_minor[16]; |
@@ -387,7 +388,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) | |||
387 | if (bio->bi_opf & REQ_RAHEAD) | 388 | if (bio->bi_opf & REQ_RAHEAD) |
388 | return DM_ENDIO_DONE; | 389 | return DM_ENDIO_DONE; |
389 | 390 | ||
390 | if (*error == -EOPNOTSUPP) | 391 | if (*error == BLK_STS_NOTSUPP) |
391 | return DM_ENDIO_DONE; | 392 | return DM_ENDIO_DONE; |
392 | 393 | ||
393 | memset(major_minor, 0, sizeof(major_minor)); | 394 | memset(major_minor, 0, sizeof(major_minor)); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 22b1a64c44b7..3490b300cbff 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r) | |||
383 | * Even if r is set, there could be sub discards in flight that we | 383 | * Even if r is set, there could be sub discards in flight that we |
384 | * need to wait for. | 384 | * need to wait for. |
385 | */ | 385 | */ |
386 | if (r && !op->parent_bio->bi_error) | 386 | if (r && !op->parent_bio->bi_status) |
387 | op->parent_bio->bi_error = r; | 387 | op->parent_bio->bi_status = errno_to_blk_status(r); |
388 | bio_endio(op->parent_bio); | 388 | bio_endio(op->parent_bio); |
389 | } | 389 | } |
390 | 390 | ||
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool, | |||
450 | } | 450 | } |
451 | 451 | ||
452 | static void cell_error_with_code(struct pool *pool, | 452 | static void cell_error_with_code(struct pool *pool, |
453 | struct dm_bio_prison_cell *cell, int error_code) | 453 | struct dm_bio_prison_cell *cell, blk_status_t error_code) |
454 | { | 454 | { |
455 | dm_cell_error(pool->prison, cell, error_code); | 455 | dm_cell_error(pool->prison, cell, error_code); |
456 | dm_bio_prison_free_cell(pool->prison, cell); | 456 | dm_bio_prison_free_cell(pool->prison, cell); |
457 | } | 457 | } |
458 | 458 | ||
459 | static int get_pool_io_error_code(struct pool *pool) | 459 | static blk_status_t get_pool_io_error_code(struct pool *pool) |
460 | { | 460 | { |
461 | return pool->out_of_data_space ? -ENOSPC : -EIO; | 461 | return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; |
462 | } | 462 | } |
463 | 463 | ||
464 | static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) | 464 | static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) |
465 | { | 465 | { |
466 | int error = get_pool_io_error_code(pool); | 466 | cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); |
467 | |||
468 | cell_error_with_code(pool, cell, error); | ||
469 | } | 467 | } |
470 | 468 | ||
471 | static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) | 469 | static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) |
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) | |||
475 | 473 | ||
476 | static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) | 474 | static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) |
477 | { | 475 | { |
478 | cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); | 476 | cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); |
479 | } | 477 | } |
480 | 478 | ||
481 | /*----------------------------------------------------------------*/ | 479 | /*----------------------------------------------------------------*/ |
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) | |||
555 | bio_list_init(master); | 553 | bio_list_init(master); |
556 | } | 554 | } |
557 | 555 | ||
558 | static void error_bio_list(struct bio_list *bios, int error) | 556 | static void error_bio_list(struct bio_list *bios, blk_status_t error) |
559 | { | 557 | { |
560 | struct bio *bio; | 558 | struct bio *bio; |
561 | 559 | ||
562 | while ((bio = bio_list_pop(bios))) { | 560 | while ((bio = bio_list_pop(bios))) { |
563 | bio->bi_error = error; | 561 | bio->bi_status = error; |
564 | bio_endio(bio); | 562 | bio_endio(bio); |
565 | } | 563 | } |
566 | } | 564 | } |
567 | 565 | ||
568 | static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) | 566 | static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, |
567 | blk_status_t error) | ||
569 | { | 568 | { |
570 | struct bio_list bios; | 569 | struct bio_list bios; |
571 | unsigned long flags; | 570 | unsigned long flags; |
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc) | |||
608 | __merge_bio_list(&bios, &tc->retry_on_resume_list); | 607 | __merge_bio_list(&bios, &tc->retry_on_resume_list); |
609 | spin_unlock_irqrestore(&tc->lock, flags); | 608 | spin_unlock_irqrestore(&tc->lock, flags); |
610 | 609 | ||
611 | error_bio_list(&bios, DM_ENDIO_REQUEUE); | 610 | error_bio_list(&bios, BLK_STS_DM_REQUEUE); |
612 | requeue_deferred_cells(tc); | 611 | requeue_deferred_cells(tc); |
613 | } | 612 | } |
614 | 613 | ||
615 | static void error_retry_list_with_code(struct pool *pool, int error) | 614 | static void error_retry_list_with_code(struct pool *pool, blk_status_t error) |
616 | { | 615 | { |
617 | struct thin_c *tc; | 616 | struct thin_c *tc; |
618 | 617 | ||
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error) | |||
624 | 623 | ||
625 | static void error_retry_list(struct pool *pool) | 624 | static void error_retry_list(struct pool *pool) |
626 | { | 625 | { |
627 | int error = get_pool_io_error_code(pool); | 626 | error_retry_list_with_code(pool, get_pool_io_error_code(pool)); |
628 | |||
629 | error_retry_list_with_code(pool, error); | ||
630 | } | 627 | } |
631 | 628 | ||
632 | /* | 629 | /* |
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping { | |||
774 | */ | 771 | */ |
775 | atomic_t prepare_actions; | 772 | atomic_t prepare_actions; |
776 | 773 | ||
777 | int err; | 774 | blk_status_t status; |
778 | struct thin_c *tc; | 775 | struct thin_c *tc; |
779 | dm_block_t virt_begin, virt_end; | 776 | dm_block_t virt_begin, virt_end; |
780 | dm_block_t data_block; | 777 | dm_block_t data_block; |
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) | |||
814 | { | 811 | { |
815 | struct dm_thin_new_mapping *m = context; | 812 | struct dm_thin_new_mapping *m = context; |
816 | 813 | ||
817 | m->err = read_err || write_err ? -EIO : 0; | 814 | m->status = read_err || write_err ? BLK_STS_IOERR : 0; |
818 | complete_mapping_preparation(m); | 815 | complete_mapping_preparation(m); |
819 | } | 816 | } |
820 | 817 | ||
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio) | |||
825 | 822 | ||
826 | bio->bi_end_io = m->saved_bi_end_io; | 823 | bio->bi_end_io = m->saved_bi_end_io; |
827 | 824 | ||
828 | m->err = bio->bi_error; | 825 | m->status = bio->bi_status; |
829 | complete_mapping_preparation(m); | 826 | complete_mapping_preparation(m); |
830 | } | 827 | } |
831 | 828 | ||
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
925 | struct bio *bio = m->bio; | 922 | struct bio *bio = m->bio; |
926 | int r; | 923 | int r; |
927 | 924 | ||
928 | if (m->err) { | 925 | if (m->status) { |
929 | cell_error(pool, m->cell); | 926 | cell_error(pool, m->cell); |
930 | goto out; | 927 | goto out; |
931 | } | 928 | } |
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio) | |||
1495 | spin_unlock_irqrestore(&tc->lock, flags); | 1492 | spin_unlock_irqrestore(&tc->lock, flags); |
1496 | } | 1493 | } |
1497 | 1494 | ||
1498 | static int should_error_unserviceable_bio(struct pool *pool) | 1495 | static blk_status_t should_error_unserviceable_bio(struct pool *pool) |
1499 | { | 1496 | { |
1500 | enum pool_mode m = get_pool_mode(pool); | 1497 | enum pool_mode m = get_pool_mode(pool); |
1501 | 1498 | ||
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool) | |||
1503 | case PM_WRITE: | 1500 | case PM_WRITE: |
1504 | /* Shouldn't get here */ | 1501 | /* Shouldn't get here */ |
1505 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); | 1502 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); |
1506 | return -EIO; | 1503 | return BLK_STS_IOERR; |
1507 | 1504 | ||
1508 | case PM_OUT_OF_DATA_SPACE: | 1505 | case PM_OUT_OF_DATA_SPACE: |
1509 | return pool->pf.error_if_no_space ? -ENOSPC : 0; | 1506 | return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; |
1510 | 1507 | ||
1511 | case PM_READ_ONLY: | 1508 | case PM_READ_ONLY: |
1512 | case PM_FAIL: | 1509 | case PM_FAIL: |
1513 | return -EIO; | 1510 | return BLK_STS_IOERR; |
1514 | default: | 1511 | default: |
1515 | /* Shouldn't get here */ | 1512 | /* Shouldn't get here */ |
1516 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); | 1513 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); |
1517 | return -EIO; | 1514 | return BLK_STS_IOERR; |
1518 | } | 1515 | } |
1519 | } | 1516 | } |
1520 | 1517 | ||
1521 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | 1518 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) |
1522 | { | 1519 | { |
1523 | int error = should_error_unserviceable_bio(pool); | 1520 | blk_status_t error = should_error_unserviceable_bio(pool); |
1524 | 1521 | ||
1525 | if (error) { | 1522 | if (error) { |
1526 | bio->bi_error = error; | 1523 | bio->bi_status = error; |
1527 | bio_endio(bio); | 1524 | bio_endio(bio); |
1528 | } else | 1525 | } else |
1529 | retry_on_resume(bio); | 1526 | retry_on_resume(bio); |
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c | |||
1533 | { | 1530 | { |
1534 | struct bio *bio; | 1531 | struct bio *bio; |
1535 | struct bio_list bios; | 1532 | struct bio_list bios; |
1536 | int error; | 1533 | blk_status_t error; |
1537 | 1534 | ||
1538 | error = should_error_unserviceable_bio(pool); | 1535 | error = should_error_unserviceable_bio(pool); |
1539 | if (error) { | 1536 | if (error) { |
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc) | |||
2071 | unsigned count = 0; | 2068 | unsigned count = 0; |
2072 | 2069 | ||
2073 | if (tc->requeue_mode) { | 2070 | if (tc->requeue_mode) { |
2074 | error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); | 2071 | error_thin_bio_list(tc, &tc->deferred_bio_list, |
2072 | BLK_STS_DM_REQUEUE); | ||
2075 | return; | 2073 | return; |
2076 | } | 2074 | } |
2077 | 2075 | ||
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws) | |||
2322 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { | 2320 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { |
2323 | pool->pf.error_if_no_space = true; | 2321 | pool->pf.error_if_no_space = true; |
2324 | notify_of_pool_mode_change_to_oods(pool); | 2322 | notify_of_pool_mode_change_to_oods(pool); |
2325 | error_retry_list_with_code(pool, -ENOSPC); | 2323 | error_retry_list_with_code(pool, BLK_STS_NOSPC); |
2326 | } | 2324 | } |
2327 | } | 2325 | } |
2328 | 2326 | ||
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2624 | thin_hook_bio(tc, bio); | 2622 | thin_hook_bio(tc, bio); |
2625 | 2623 | ||
2626 | if (tc->requeue_mode) { | 2624 | if (tc->requeue_mode) { |
2627 | bio->bi_error = DM_ENDIO_REQUEUE; | 2625 | bio->bi_status = BLK_STS_DM_REQUEUE; |
2628 | bio_endio(bio); | 2626 | bio_endio(bio); |
2629 | return DM_MAPIO_SUBMITTED; | 2627 | return DM_MAPIO_SUBMITTED; |
2630 | } | 2628 | } |
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio) | |||
4177 | return thin_bio_map(ti, bio); | 4175 | return thin_bio_map(ti, bio); |
4178 | } | 4176 | } |
4179 | 4177 | ||
4180 | static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) | 4178 | static int thin_endio(struct dm_target *ti, struct bio *bio, |
4179 | blk_status_t *err) | ||
4181 | { | 4180 | { |
4182 | unsigned long flags; | 4181 | unsigned long flags; |
4183 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 4182 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 9ed55468b98b..2dca66eb67e1 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io) | |||
538 | /* | 538 | /* |
539 | * End one "io" structure with a given error. | 539 | * End one "io" structure with a given error. |
540 | */ | 540 | */ |
541 | static void verity_finish_io(struct dm_verity_io *io, int error) | 541 | static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) |
542 | { | 542 | { |
543 | struct dm_verity *v = io->v; | 543 | struct dm_verity *v = io->v; |
544 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); | 544 | struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); |
545 | 545 | ||
546 | bio->bi_end_io = io->orig_bi_end_io; | 546 | bio->bi_end_io = io->orig_bi_end_io; |
547 | bio->bi_error = error; | 547 | bio->bi_status = status; |
548 | 548 | ||
549 | verity_fec_finish_io(io); | 549 | verity_fec_finish_io(io); |
550 | 550 | ||
@@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w) | |||
555 | { | 555 | { |
556 | struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); | 556 | struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); |
557 | 557 | ||
558 | verity_finish_io(io, verity_verify_io(io)); | 558 | verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); |
559 | } | 559 | } |
560 | 560 | ||
561 | static void verity_end_io(struct bio *bio) | 561 | static void verity_end_io(struct bio *bio) |
562 | { | 562 | { |
563 | struct dm_verity_io *io = bio->bi_private; | 563 | struct dm_verity_io *io = bio->bi_private; |
564 | 564 | ||
565 | if (bio->bi_error && !verity_fec_is_enabled(io->v)) { | 565 | if (bio->bi_status && !verity_fec_is_enabled(io->v)) { |
566 | verity_finish_io(io, bio->bi_error); | 566 | verity_finish_io(io, bio->bi_status); |
567 | return; | 567 | return; |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7a7047211c64..f38f9dd5cbdd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue; | |||
63 | */ | 63 | */ |
64 | struct dm_io { | 64 | struct dm_io { |
65 | struct mapped_device *md; | 65 | struct mapped_device *md; |
66 | int error; | 66 | blk_status_t status; |
67 | atomic_t io_count; | 67 | atomic_t io_count; |
68 | struct bio *bio; | 68 | struct bio *bio; |
69 | unsigned long start_time; | 69 | unsigned long start_time; |
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md) | |||
768 | * Decrements the number of outstanding ios that a bio has been | 768 | * Decrements the number of outstanding ios that a bio has been |
769 | * cloned into, completing the original io if necc. | 769 | * cloned into, completing the original io if necc. |
770 | */ | 770 | */ |
771 | static void dec_pending(struct dm_io *io, int error) | 771 | static void dec_pending(struct dm_io *io, blk_status_t error) |
772 | { | 772 | { |
773 | unsigned long flags; | 773 | unsigned long flags; |
774 | int io_error; | 774 | blk_status_t io_error; |
775 | struct bio *bio; | 775 | struct bio *bio; |
776 | struct mapped_device *md = io->md; | 776 | struct mapped_device *md = io->md; |
777 | 777 | ||
778 | /* Push-back supersedes any I/O errors */ | 778 | /* Push-back supersedes any I/O errors */ |
779 | if (unlikely(error)) { | 779 | if (unlikely(error)) { |
780 | spin_lock_irqsave(&io->endio_lock, flags); | 780 | spin_lock_irqsave(&io->endio_lock, flags); |
781 | if (!(io->error > 0 && __noflush_suspending(md))) | 781 | if (!(io->status == BLK_STS_DM_REQUEUE && |
782 | io->error = error; | 782 | __noflush_suspending(md))) |
783 | io->status = error; | ||
783 | spin_unlock_irqrestore(&io->endio_lock, flags); | 784 | spin_unlock_irqrestore(&io->endio_lock, flags); |
784 | } | 785 | } |
785 | 786 | ||
786 | if (atomic_dec_and_test(&io->io_count)) { | 787 | if (atomic_dec_and_test(&io->io_count)) { |
787 | if (io->error == DM_ENDIO_REQUEUE) { | 788 | if (io->status == BLK_STS_DM_REQUEUE) { |
788 | /* | 789 | /* |
789 | * Target requested pushing back the I/O. | 790 | * Target requested pushing back the I/O. |
790 | */ | 791 | */ |
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error) | |||
793 | bio_list_add_head(&md->deferred, io->bio); | 794 | bio_list_add_head(&md->deferred, io->bio); |
794 | else | 795 | else |
795 | /* noflush suspend was interrupted. */ | 796 | /* noflush suspend was interrupted. */ |
796 | io->error = -EIO; | 797 | io->status = BLK_STS_IOERR; |
797 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 798 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
798 | } | 799 | } |
799 | 800 | ||
800 | io_error = io->error; | 801 | io_error = io->status; |
801 | bio = io->bio; | 802 | bio = io->bio; |
802 | end_io_acct(io); | 803 | end_io_acct(io); |
803 | free_io(md, io); | 804 | free_io(md, io); |
804 | 805 | ||
805 | if (io_error == DM_ENDIO_REQUEUE) | 806 | if (io_error == BLK_STS_DM_REQUEUE) |
806 | return; | 807 | return; |
807 | 808 | ||
808 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { | 809 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { |
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
814 | queue_io(md, bio); | 815 | queue_io(md, bio); |
815 | } else { | 816 | } else { |
816 | /* done with normal IO or empty flush */ | 817 | /* done with normal IO or empty flush */ |
817 | bio->bi_error = io_error; | 818 | bio->bi_status = io_error; |
818 | bio_endio(bio); | 819 | bio_endio(bio); |
819 | } | 820 | } |
820 | } | 821 | } |
@@ -838,14 +839,13 @@ void disable_write_zeroes(struct mapped_device *md) | |||
838 | 839 | ||
839 | static void clone_endio(struct bio *bio) | 840 | static void clone_endio(struct bio *bio) |
840 | { | 841 | { |
841 | int error = bio->bi_error; | 842 | blk_status_t error = bio->bi_status; |
842 | int r = error; | ||
843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
844 | struct dm_io *io = tio->io; | 844 | struct dm_io *io = tio->io; |
845 | struct mapped_device *md = tio->io->md; | 845 | struct mapped_device *md = tio->io->md; |
846 | dm_endio_fn endio = tio->ti->type->end_io; | 846 | dm_endio_fn endio = tio->ti->type->end_io; |
847 | 847 | ||
848 | if (unlikely(error == -EREMOTEIO)) { | 848 | if (unlikely(error == BLK_STS_TARGET)) { |
849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && |
850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) | 850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) |
851 | disable_write_same(md); | 851 | disable_write_same(md); |
@@ -855,10 +855,10 @@ static void clone_endio(struct bio *bio) | |||
855 | } | 855 | } |
856 | 856 | ||
857 | if (endio) { | 857 | if (endio) { |
858 | r = endio(tio->ti, bio, &error); | 858 | int r = endio(tio->ti, bio, &error); |
859 | switch (r) { | 859 | switch (r) { |
860 | case DM_ENDIO_REQUEUE: | 860 | case DM_ENDIO_REQUEUE: |
861 | error = DM_ENDIO_REQUEUE; | 861 | error = BLK_STS_DM_REQUEUE; |
862 | /*FALLTHRU*/ | 862 | /*FALLTHRU*/ |
863 | case DM_ENDIO_DONE: | 863 | case DM_ENDIO_DONE: |
864 | break; | 864 | break; |
@@ -1094,11 +1094,11 @@ static void __map_bio(struct dm_target_io *tio) | |||
1094 | generic_make_request(clone); | 1094 | generic_make_request(clone); |
1095 | break; | 1095 | break; |
1096 | case DM_MAPIO_KILL: | 1096 | case DM_MAPIO_KILL: |
1097 | r = -EIO; | 1097 | dec_pending(tio->io, BLK_STS_IOERR); |
1098 | /*FALLTHRU*/ | 1098 | free_tio(tio); |
1099 | break; | ||
1099 | case DM_MAPIO_REQUEUE: | 1100 | case DM_MAPIO_REQUEUE: |
1100 | /* error the io and bail out, or requeue it if needed */ | 1101 | dec_pending(tio->io, BLK_STS_DM_REQUEUE); |
1101 | dec_pending(tio->io, r); | ||
1102 | free_tio(tio); | 1102 | free_tio(tio); |
1103 | break; | 1103 | break; |
1104 | default: | 1104 | default: |
@@ -1366,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1366 | ci.map = map; | 1366 | ci.map = map; |
1367 | ci.md = md; | 1367 | ci.md = md; |
1368 | ci.io = alloc_io(md); | 1368 | ci.io = alloc_io(md); |
1369 | ci.io->error = 0; | 1369 | ci.io->status = 0; |
1370 | atomic_set(&ci.io->io_count, 1); | 1370 | atomic_set(&ci.io->io_count, 1); |
1371 | ci.io->bio = bio; | 1371 | ci.io->bio = bio; |
1372 | ci.io->md = md; | 1372 | ci.io->md = md; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 10367ffe92e3..6452e83fd650 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
273 | } | 273 | } |
274 | if (mddev->ro == 1 && unlikely(rw == WRITE)) { | 274 | if (mddev->ro == 1 && unlikely(rw == WRITE)) { |
275 | if (bio_sectors(bio) != 0) | 275 | if (bio_sectors(bio) != 0) |
276 | bio->bi_error = -EROFS; | 276 | bio->bi_status = BLK_STS_IOERR; |
277 | bio_endio(bio); | 277 | bio_endio(bio); |
278 | return BLK_QC_T_NONE; | 278 | return BLK_QC_T_NONE; |
279 | } | 279 | } |
@@ -719,8 +719,8 @@ static void super_written(struct bio *bio) | |||
719 | struct md_rdev *rdev = bio->bi_private; | 719 | struct md_rdev *rdev = bio->bi_private; |
720 | struct mddev *mddev = rdev->mddev; | 720 | struct mddev *mddev = rdev->mddev; |
721 | 721 | ||
722 | if (bio->bi_error) { | 722 | if (bio->bi_status) { |
723 | pr_err("md: super_written gets error=%d\n", bio->bi_error); | 723 | pr_err("md: super_written gets error=%d\n", bio->bi_status); |
724 | md_error(mddev, rdev); | 724 | md_error(mddev, rdev); |
725 | if (!test_bit(Faulty, &rdev->flags) | 725 | if (!test_bit(Faulty, &rdev->flags) |
726 | && (bio->bi_opf & MD_FAILFAST)) { | 726 | && (bio->bi_opf & MD_FAILFAST)) { |
@@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
801 | 801 | ||
802 | submit_bio_wait(bio); | 802 | submit_bio_wait(bio); |
803 | 803 | ||
804 | ret = !bio->bi_error; | 804 | ret = !bio->bi_status; |
805 | bio_put(bio); | 805 | bio_put(bio); |
806 | return ret; | 806 | return ret; |
807 | } | 807 | } |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index e95d521d93e9..68d036e64041 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
73 | * operation and are ready to return a success/failure code to the buffer | 73 | * operation and are ready to return a success/failure code to the buffer |
74 | * cache layer. | 74 | * cache layer. |
75 | */ | 75 | */ |
76 | static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) | 76 | static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status) |
77 | { | 77 | { |
78 | struct bio *bio = mp_bh->master_bio; | 78 | struct bio *bio = mp_bh->master_bio; |
79 | struct mpconf *conf = mp_bh->mddev->private; | 79 | struct mpconf *conf = mp_bh->mddev->private; |
80 | 80 | ||
81 | bio->bi_error = err; | 81 | bio->bi_status = status; |
82 | bio_endio(bio); | 82 | bio_endio(bio); |
83 | mempool_free(mp_bh, conf->pool); | 83 | mempool_free(mp_bh, conf->pool); |
84 | } | 84 | } |
@@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio) | |||
89 | struct mpconf *conf = mp_bh->mddev->private; | 89 | struct mpconf *conf = mp_bh->mddev->private; |
90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; | 90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; |
91 | 91 | ||
92 | if (!bio->bi_error) | 92 | if (!bio->bi_status) |
93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
94 | else if (!(bio->bi_opf & REQ_RAHEAD)) { | 94 | else if (!(bio->bi_opf & REQ_RAHEAD)) { |
95 | /* | 95 | /* |
@@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio) | |||
102 | (unsigned long long)bio->bi_iter.bi_sector); | 102 | (unsigned long long)bio->bi_iter.bi_sector); |
103 | multipath_reschedule_retry(mp_bh); | 103 | multipath_reschedule_retry(mp_bh); |
104 | } else | 104 | } else |
105 | multipath_end_bh_io(mp_bh, bio->bi_error); | 105 | multipath_end_bh_io(mp_bh, bio->bi_status); |
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
@@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread) | |||
347 | pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", | 347 | pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", |
348 | bdevname(bio->bi_bdev,b), | 348 | bdevname(bio->bi_bdev,b), |
349 | (unsigned long long)bio->bi_iter.bi_sector); | 349 | (unsigned long long)bio->bi_iter.bi_sector); |
350 | multipath_end_bh_io(mp_bh, -EIO); | 350 | multipath_end_bh_io(mp_bh, BLK_STS_IOERR); |
351 | } else { | 351 | } else { |
352 | pr_err("multipath: %s: redirecting sector %llu to another IO path\n", | 352 | pr_err("multipath: %s: redirecting sector %llu to another IO path\n", |
353 | bdevname(bio->bi_bdev,b), | 353 | bdevname(bio->bi_bdev,b), |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af5056d56878..94b87c4d0f7b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
277 | struct r1conf *conf = r1_bio->mddev->private; | 277 | struct r1conf *conf = r1_bio->mddev->private; |
278 | 278 | ||
279 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) | 279 | if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) |
280 | bio->bi_error = -EIO; | 280 | bio->bi_status = BLK_STS_IOERR; |
281 | 281 | ||
282 | bio_endio(bio); | 282 | bio_endio(bio); |
283 | /* | 283 | /* |
@@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) | |||
335 | 335 | ||
336 | static void raid1_end_read_request(struct bio *bio) | 336 | static void raid1_end_read_request(struct bio *bio) |
337 | { | 337 | { |
338 | int uptodate = !bio->bi_error; | 338 | int uptodate = !bio->bi_status; |
339 | struct r1bio *r1_bio = bio->bi_private; | 339 | struct r1bio *r1_bio = bio->bi_private; |
340 | struct r1conf *conf = r1_bio->mddev->private; | 340 | struct r1conf *conf = r1_bio->mddev->private; |
341 | struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; | 341 | struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; |
@@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio) | |||
426 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; | 426 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; |
427 | bool discard_error; | 427 | bool discard_error; |
428 | 428 | ||
429 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | 429 | discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; |
430 | 430 | ||
431 | /* | 431 | /* |
432 | * 'one mirror IO has finished' event handler: | 432 | * 'one mirror IO has finished' event handler: |
433 | */ | 433 | */ |
434 | if (bio->bi_error && !discard_error) { | 434 | if (bio->bi_status && !discard_error) { |
435 | set_bit(WriteErrorSeen, &rdev->flags); | 435 | set_bit(WriteErrorSeen, &rdev->flags); |
436 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 436 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
437 | set_bit(MD_RECOVERY_NEEDED, & | 437 | set_bit(MD_RECOVERY_NEEDED, & |
@@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) | |||
802 | bio->bi_next = NULL; | 802 | bio->bi_next = NULL; |
803 | bio->bi_bdev = rdev->bdev; | 803 | bio->bi_bdev = rdev->bdev; |
804 | if (test_bit(Faulty, &rdev->flags)) { | 804 | if (test_bit(Faulty, &rdev->flags)) { |
805 | bio->bi_error = -EIO; | 805 | bio->bi_status = BLK_STS_IOERR; |
806 | bio_endio(bio); | 806 | bio_endio(bio); |
807 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 807 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
808 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 808 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio) | |||
1856 | * or re-read if the read failed. | 1856 | * or re-read if the read failed. |
1857 | * We don't do much here, just schedule handling by raid1d | 1857 | * We don't do much here, just schedule handling by raid1d |
1858 | */ | 1858 | */ |
1859 | if (!bio->bi_error) | 1859 | if (!bio->bi_status) |
1860 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 1860 | set_bit(R1BIO_Uptodate, &r1_bio->state); |
1861 | 1861 | ||
1862 | if (atomic_dec_and_test(&r1_bio->remaining)) | 1862 | if (atomic_dec_and_test(&r1_bio->remaining)) |
@@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio) | |||
1865 | 1865 | ||
1866 | static void end_sync_write(struct bio *bio) | 1866 | static void end_sync_write(struct bio *bio) |
1867 | { | 1867 | { |
1868 | int uptodate = !bio->bi_error; | 1868 | int uptodate = !bio->bi_status; |
1869 | struct r1bio *r1_bio = get_resync_r1bio(bio); | 1869 | struct r1bio *r1_bio = get_resync_r1bio(bio); |
1870 | struct mddev *mddev = r1_bio->mddev; | 1870 | struct mddev *mddev = r1_bio->mddev; |
1871 | struct r1conf *conf = mddev->private; | 1871 | struct r1conf *conf = mddev->private; |
@@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) | |||
2058 | idx ++; | 2058 | idx ++; |
2059 | } | 2059 | } |
2060 | set_bit(R1BIO_Uptodate, &r1_bio->state); | 2060 | set_bit(R1BIO_Uptodate, &r1_bio->state); |
2061 | bio->bi_error = 0; | 2061 | bio->bi_status = 0; |
2062 | return 1; | 2062 | return 1; |
2063 | } | 2063 | } |
2064 | 2064 | ||
@@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio) | |||
2082 | for (i = 0; i < conf->raid_disks * 2; i++) { | 2082 | for (i = 0; i < conf->raid_disks * 2; i++) { |
2083 | int j; | 2083 | int j; |
2084 | int size; | 2084 | int size; |
2085 | int error; | 2085 | blk_status_t status; |
2086 | struct bio_vec *bi; | 2086 | struct bio_vec *bi; |
2087 | struct bio *b = r1_bio->bios[i]; | 2087 | struct bio *b = r1_bio->bios[i]; |
2088 | struct resync_pages *rp = get_resync_pages(b); | 2088 | struct resync_pages *rp = get_resync_pages(b); |
2089 | if (b->bi_end_io != end_sync_read) | 2089 | if (b->bi_end_io != end_sync_read) |
2090 | continue; | 2090 | continue; |
2091 | /* fixup the bio for reuse, but preserve errno */ | 2091 | /* fixup the bio for reuse, but preserve errno */ |
2092 | error = b->bi_error; | 2092 | status = b->bi_status; |
2093 | bio_reset(b); | 2093 | bio_reset(b); |
2094 | b->bi_error = error; | 2094 | b->bi_status = status; |
2095 | b->bi_vcnt = vcnt; | 2095 | b->bi_vcnt = vcnt; |
2096 | b->bi_iter.bi_size = r1_bio->sectors << 9; | 2096 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
2097 | b->bi_iter.bi_sector = r1_bio->sector + | 2097 | b->bi_iter.bi_sector = r1_bio->sector + |
@@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2113 | } | 2113 | } |
2114 | for (primary = 0; primary < conf->raid_disks * 2; primary++) | 2114 | for (primary = 0; primary < conf->raid_disks * 2; primary++) |
2115 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && | 2115 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && |
2116 | !r1_bio->bios[primary]->bi_error) { | 2116 | !r1_bio->bios[primary]->bi_status) { |
2117 | r1_bio->bios[primary]->bi_end_io = NULL; | 2117 | r1_bio->bios[primary]->bi_end_io = NULL; |
2118 | rdev_dec_pending(conf->mirrors[primary].rdev, mddev); | 2118 | rdev_dec_pending(conf->mirrors[primary].rdev, mddev); |
2119 | break; | 2119 | break; |
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2123 | int j; | 2123 | int j; |
2124 | struct bio *pbio = r1_bio->bios[primary]; | 2124 | struct bio *pbio = r1_bio->bios[primary]; |
2125 | struct bio *sbio = r1_bio->bios[i]; | 2125 | struct bio *sbio = r1_bio->bios[i]; |
2126 | int error = sbio->bi_error; | 2126 | blk_status_t status = sbio->bi_status; |
2127 | struct page **ppages = get_resync_pages(pbio)->pages; | 2127 | struct page **ppages = get_resync_pages(pbio)->pages; |
2128 | struct page **spages = get_resync_pages(sbio)->pages; | 2128 | struct page **spages = get_resync_pages(sbio)->pages; |
2129 | struct bio_vec *bi; | 2129 | struct bio_vec *bi; |
@@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio) | |||
2132 | if (sbio->bi_end_io != end_sync_read) | 2132 | if (sbio->bi_end_io != end_sync_read) |
2133 | continue; | 2133 | continue; |
2134 | /* Now we can 'fixup' the error value */ | 2134 | /* Now we can 'fixup' the error value */ |
2135 | sbio->bi_error = 0; | 2135 | sbio->bi_status = 0; |
2136 | 2136 | ||
2137 | bio_for_each_segment_all(bi, sbio, j) | 2137 | bio_for_each_segment_all(bi, sbio, j) |
2138 | page_len[j] = bi->bv_len; | 2138 | page_len[j] = bi->bv_len; |
2139 | 2139 | ||
2140 | if (!error) { | 2140 | if (!status) { |
2141 | for (j = vcnt; j-- ; ) { | 2141 | for (j = vcnt; j-- ; ) { |
2142 | if (memcmp(page_address(ppages[j]), | 2142 | if (memcmp(page_address(ppages[j]), |
2143 | page_address(spages[j]), | 2143 | page_address(spages[j]), |
@@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
2149 | if (j >= 0) | 2149 | if (j >= 0) |
2150 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); | 2150 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); |
2151 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) | 2151 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) |
2152 | && !error)) { | 2152 | && !status)) { |
2153 | /* No need to write to this device. */ | 2153 | /* No need to write to this device. */ |
2154 | sbio->bi_end_io = NULL; | 2154 | sbio->bi_end_io = NULL; |
2155 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); | 2155 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); |
@@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio | |||
2400 | struct bio *bio = r1_bio->bios[m]; | 2400 | struct bio *bio = r1_bio->bios[m]; |
2401 | if (bio->bi_end_io == NULL) | 2401 | if (bio->bi_end_io == NULL) |
2402 | continue; | 2402 | continue; |
2403 | if (!bio->bi_error && | 2403 | if (!bio->bi_status && |
2404 | test_bit(R1BIO_MadeGood, &r1_bio->state)) { | 2404 | test_bit(R1BIO_MadeGood, &r1_bio->state)) { |
2405 | rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); | 2405 | rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); |
2406 | } | 2406 | } |
2407 | if (bio->bi_error && | 2407 | if (bio->bi_status && |
2408 | test_bit(R1BIO_WriteError, &r1_bio->state)) { | 2408 | test_bit(R1BIO_WriteError, &r1_bio->state)) { |
2409 | if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) | 2409 | if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) |
2410 | md_error(conf->mddev, rdev); | 2410 | md_error(conf->mddev, rdev); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4343d7ff9916..89ad1cd29037 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio) | |||
336 | struct r10conf *conf = r10_bio->mddev->private; | 336 | struct r10conf *conf = r10_bio->mddev->private; |
337 | 337 | ||
338 | if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) | 338 | if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) |
339 | bio->bi_error = -EIO; | 339 | bio->bi_status = BLK_STS_IOERR; |
340 | 340 | ||
341 | bio_endio(bio); | 341 | bio_endio(bio); |
342 | /* | 342 | /* |
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, | |||
389 | 389 | ||
390 | static void raid10_end_read_request(struct bio *bio) | 390 | static void raid10_end_read_request(struct bio *bio) |
391 | { | 391 | { |
392 | int uptodate = !bio->bi_error; | 392 | int uptodate = !bio->bi_status; |
393 | struct r10bio *r10_bio = bio->bi_private; | 393 | struct r10bio *r10_bio = bio->bi_private; |
394 | int slot, dev; | 394 | int slot, dev; |
395 | struct md_rdev *rdev; | 395 | struct md_rdev *rdev; |
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
477 | struct bio *to_put = NULL; | 477 | struct bio *to_put = NULL; |
478 | bool discard_error; | 478 | bool discard_error; |
479 | 479 | ||
480 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | 480 | discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; |
481 | 481 | ||
482 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); | 482 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); |
483 | 483 | ||
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
491 | /* | 491 | /* |
492 | * this branch is our 'one mirror IO has finished' event handler: | 492 | * this branch is our 'one mirror IO has finished' event handler: |
493 | */ | 493 | */ |
494 | if (bio->bi_error && !discard_error) { | 494 | if (bio->bi_status && !discard_error) { |
495 | if (repl) | 495 | if (repl) |
496 | /* Never record new bad blocks to replacement, | 496 | /* Never record new bad blocks to replacement, |
497 | * just fail it. | 497 | * just fail it. |
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
913 | bio->bi_next = NULL; | 913 | bio->bi_next = NULL; |
914 | bio->bi_bdev = rdev->bdev; | 914 | bio->bi_bdev = rdev->bdev; |
915 | if (test_bit(Faulty, &rdev->flags)) { | 915 | if (test_bit(Faulty, &rdev->flags)) { |
916 | bio->bi_error = -EIO; | 916 | bio->bi_status = BLK_STS_IOERR; |
917 | bio_endio(bio); | 917 | bio_endio(bio); |
918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1098 | bio->bi_next = NULL; | 1098 | bio->bi_next = NULL; |
1099 | bio->bi_bdev = rdev->bdev; | 1099 | bio->bi_bdev = rdev->bdev; |
1100 | if (test_bit(Faulty, &rdev->flags)) { | 1100 | if (test_bit(Faulty, &rdev->flags)) { |
1101 | bio->bi_error = -EIO; | 1101 | bio->bi_status = BLK_STS_IOERR; |
1102 | bio_endio(bio); | 1102 | bio_endio(bio); |
1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) | |||
1888 | { | 1888 | { |
1889 | struct r10conf *conf = r10_bio->mddev->private; | 1889 | struct r10conf *conf = r10_bio->mddev->private; |
1890 | 1890 | ||
1891 | if (!bio->bi_error) | 1891 | if (!bio->bi_status) |
1892 | set_bit(R10BIO_Uptodate, &r10_bio->state); | 1892 | set_bit(R10BIO_Uptodate, &r10_bio->state); |
1893 | else | 1893 | else |
1894 | /* The write handler will notice the lack of | 1894 | /* The write handler will notice the lack of |
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio) | |||
1972 | else | 1972 | else |
1973 | rdev = conf->mirrors[d].rdev; | 1973 | rdev = conf->mirrors[d].rdev; |
1974 | 1974 | ||
1975 | if (bio->bi_error) { | 1975 | if (bio->bi_status) { |
1976 | if (repl) | 1976 | if (repl) |
1977 | md_error(mddev, rdev); | 1977 | md_error(mddev, rdev); |
1978 | else { | 1978 | else { |
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2021 | 2021 | ||
2022 | /* find the first device with a block */ | 2022 | /* find the first device with a block */ |
2023 | for (i=0; i<conf->copies; i++) | 2023 | for (i=0; i<conf->copies; i++) |
2024 | if (!r10_bio->devs[i].bio->bi_error) | 2024 | if (!r10_bio->devs[i].bio->bi_status) |
2025 | break; | 2025 | break; |
2026 | 2026 | ||
2027 | if (i == conf->copies) | 2027 | if (i == conf->copies) |
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2050 | tpages = get_resync_pages(tbio)->pages; | 2050 | tpages = get_resync_pages(tbio)->pages; |
2051 | d = r10_bio->devs[i].devnum; | 2051 | d = r10_bio->devs[i].devnum; |
2052 | rdev = conf->mirrors[d].rdev; | 2052 | rdev = conf->mirrors[d].rdev; |
2053 | if (!r10_bio->devs[i].bio->bi_error) { | 2053 | if (!r10_bio->devs[i].bio->bi_status) { |
2054 | /* We know that the bi_io_vec layout is the same for | 2054 | /* We know that the bi_io_vec layout is the same for |
2055 | * both 'first' and 'i', so we just compare them. | 2055 | * both 'first' and 'i', so we just compare them. |
2056 | * All vec entries are PAGE_SIZE; | 2056 | * All vec entries are PAGE_SIZE; |
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2633 | rdev = conf->mirrors[dev].rdev; | 2633 | rdev = conf->mirrors[dev].rdev; |
2634 | if (r10_bio->devs[m].bio == NULL) | 2634 | if (r10_bio->devs[m].bio == NULL) |
2635 | continue; | 2635 | continue; |
2636 | if (!r10_bio->devs[m].bio->bi_error) { | 2636 | if (!r10_bio->devs[m].bio->bi_status) { |
2637 | rdev_clear_badblocks( | 2637 | rdev_clear_badblocks( |
2638 | rdev, | 2638 | rdev, |
2639 | r10_bio->devs[m].addr, | 2639 | r10_bio->devs[m].addr, |
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2649 | if (r10_bio->devs[m].repl_bio == NULL) | 2649 | if (r10_bio->devs[m].repl_bio == NULL) |
2650 | continue; | 2650 | continue; |
2651 | 2651 | ||
2652 | if (!r10_bio->devs[m].repl_bio->bi_error) { | 2652 | if (!r10_bio->devs[m].repl_bio->bi_status) { |
2653 | rdev_clear_badblocks( | 2653 | rdev_clear_badblocks( |
2654 | rdev, | 2654 | rdev, |
2655 | r10_bio->devs[m].addr, | 2655 | r10_bio->devs[m].addr, |
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2675 | r10_bio->devs[m].addr, | 2675 | r10_bio->devs[m].addr, |
2676 | r10_bio->sectors, 0); | 2676 | r10_bio->sectors, 0); |
2677 | rdev_dec_pending(rdev, conf->mddev); | 2677 | rdev_dec_pending(rdev, conf->mddev); |
2678 | } else if (bio != NULL && bio->bi_error) { | 2678 | } else if (bio != NULL && bio->bi_status) { |
2679 | fail = true; | 2679 | fail = true; |
2680 | if (!narrow_write_error(r10_bio, m)) { | 2680 | if (!narrow_write_error(r10_bio, m)) { |
2681 | md_error(conf->mddev, rdev); | 2681 | md_error(conf->mddev, rdev); |
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3267 | r10_bio->devs[i].repl_bio->bi_end_io = NULL; | 3267 | r10_bio->devs[i].repl_bio->bi_end_io = NULL; |
3268 | 3268 | ||
3269 | bio = r10_bio->devs[i].bio; | 3269 | bio = r10_bio->devs[i].bio; |
3270 | bio->bi_error = -EIO; | 3270 | bio->bi_status = BLK_STS_IOERR; |
3271 | rcu_read_lock(); | 3271 | rcu_read_lock(); |
3272 | rdev = rcu_dereference(conf->mirrors[d].rdev); | 3272 | rdev = rcu_dereference(conf->mirrors[d].rdev); |
3273 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { | 3273 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { |
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3309 | 3309 | ||
3310 | /* Need to set up for writing to the replacement */ | 3310 | /* Need to set up for writing to the replacement */ |
3311 | bio = r10_bio->devs[i].repl_bio; | 3311 | bio = r10_bio->devs[i].repl_bio; |
3312 | bio->bi_error = -EIO; | 3312 | bio->bi_status = BLK_STS_IOERR; |
3313 | 3313 | ||
3314 | sector = r10_bio->devs[i].addr; | 3314 | sector = r10_bio->devs[i].addr; |
3315 | bio->bi_next = biolist; | 3315 | bio->bi_next = biolist; |
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3375 | 3375 | ||
3376 | if (bio->bi_end_io == end_sync_read) { | 3376 | if (bio->bi_end_io == end_sync_read) { |
3377 | md_sync_acct(bio->bi_bdev, nr_sectors); | 3377 | md_sync_acct(bio->bi_bdev, nr_sectors); |
3378 | bio->bi_error = 0; | 3378 | bio->bi_status = 0; |
3379 | generic_make_request(bio); | 3379 | generic_make_request(bio); |
3380 | } | 3380 | } |
3381 | } | 3381 | } |
@@ -4394,7 +4394,7 @@ read_more: | |||
4394 | read_bio->bi_end_io = end_reshape_read; | 4394 | read_bio->bi_end_io = end_reshape_read; |
4395 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); | 4395 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); |
4396 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); | 4396 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); |
4397 | read_bio->bi_error = 0; | 4397 | read_bio->bi_status = 0; |
4398 | read_bio->bi_vcnt = 0; | 4398 | read_bio->bi_vcnt = 0; |
4399 | read_bio->bi_iter.bi_size = 0; | 4399 | read_bio->bi_iter.bi_size = 0; |
4400 | r10_bio->master_bio = read_bio; | 4400 | r10_bio->master_bio = read_bio; |
@@ -4638,7 +4638,7 @@ static void end_reshape_write(struct bio *bio) | |||
4638 | rdev = conf->mirrors[d].rdev; | 4638 | rdev = conf->mirrors[d].rdev; |
4639 | } | 4639 | } |
4640 | 4640 | ||
4641 | if (bio->bi_error) { | 4641 | if (bio->bi_status) { |
4642 | /* FIXME should record badblock */ | 4642 | /* FIXME should record badblock */ |
4643 | md_error(mddev, rdev); | 4643 | md_error(mddev, rdev); |
4644 | } | 4644 | } |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4c00bc248287..3ed6a0d89db8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio) | |||
572 | struct r5l_log *log = io->log; | 572 | struct r5l_log *log = io->log; |
573 | unsigned long flags; | 573 | unsigned long flags; |
574 | 574 | ||
575 | if (bio->bi_error) | 575 | if (bio->bi_status) |
576 | md_error(log->rdev->mddev, log->rdev); | 576 | md_error(log->rdev->mddev, log->rdev); |
577 | 577 | ||
578 | bio_put(bio); | 578 | bio_put(bio); |
@@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio) | |||
1247 | unsigned long flags; | 1247 | unsigned long flags; |
1248 | struct r5l_io_unit *io; | 1248 | struct r5l_io_unit *io; |
1249 | 1249 | ||
1250 | if (bio->bi_error) | 1250 | if (bio->bi_status) |
1251 | md_error(log->rdev->mddev, log->rdev); | 1251 | md_error(log->rdev->mddev, log->rdev); |
1252 | 1252 | ||
1253 | spin_lock_irqsave(&log->io_list_lock, flags); | 1253 | spin_lock_irqsave(&log->io_list_lock, flags); |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 5d25bebf3328..09e04be34e5f 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio) | |||
397 | 397 | ||
398 | pr_debug("%s: seq: %llu\n", __func__, io->seq); | 398 | pr_debug("%s: seq: %llu\n", __func__, io->seq); |
399 | 399 | ||
400 | if (bio->bi_error) | 400 | if (bio->bi_status) |
401 | md_error(ppl_conf->mddev, log->rdev); | 401 | md_error(ppl_conf->mddev, log->rdev); |
402 | 402 | ||
403 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { | 403 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9c4f7659f8b1..e1bdc320f664 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2476 | 2476 | ||
2477 | pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", | 2477 | pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", |
2478 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2478 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2479 | bi->bi_error); | 2479 | bi->bi_status); |
2480 | if (i == disks) { | 2480 | if (i == disks) { |
2481 | bio_reset(bi); | 2481 | bio_reset(bi); |
2482 | BUG(); | 2482 | BUG(); |
@@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2496 | s = sh->sector + rdev->new_data_offset; | 2496 | s = sh->sector + rdev->new_data_offset; |
2497 | else | 2497 | else |
2498 | s = sh->sector + rdev->data_offset; | 2498 | s = sh->sector + rdev->data_offset; |
2499 | if (!bi->bi_error) { | 2499 | if (!bi->bi_status) { |
2500 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | 2500 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
2501 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | 2501 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
2502 | /* Note that this cannot happen on a | 2502 | /* Note that this cannot happen on a |
@@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2613 | } | 2613 | } |
2614 | pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", | 2614 | pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", |
2615 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2615 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2616 | bi->bi_error); | 2616 | bi->bi_status); |
2617 | if (i == disks) { | 2617 | if (i == disks) { |
2618 | bio_reset(bi); | 2618 | bio_reset(bi); |
2619 | BUG(); | 2619 | BUG(); |
@@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi) | |||
2621 | } | 2621 | } |
2622 | 2622 | ||
2623 | if (replacement) { | 2623 | if (replacement) { |
2624 | if (bi->bi_error) | 2624 | if (bi->bi_status) |
2625 | md_error(conf->mddev, rdev); | 2625 | md_error(conf->mddev, rdev); |
2626 | else if (is_badblock(rdev, sh->sector, | 2626 | else if (is_badblock(rdev, sh->sector, |
2627 | STRIPE_SECTORS, | 2627 | STRIPE_SECTORS, |
2628 | &first_bad, &bad_sectors)) | 2628 | &first_bad, &bad_sectors)) |
2629 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); | 2629 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); |
2630 | } else { | 2630 | } else { |
2631 | if (bi->bi_error) { | 2631 | if (bi->bi_status) { |
2632 | set_bit(STRIPE_DEGRADED, &sh->state); | 2632 | set_bit(STRIPE_DEGRADED, &sh->state); |
2633 | set_bit(WriteErrorSeen, &rdev->flags); | 2633 | set_bit(WriteErrorSeen, &rdev->flags); |
2634 | set_bit(R5_WriteError, &sh->dev[i].flags); | 2634 | set_bit(R5_WriteError, &sh->dev[i].flags); |
@@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2649 | } | 2649 | } |
2650 | rdev_dec_pending(rdev, conf->mddev); | 2650 | rdev_dec_pending(rdev, conf->mddev); |
2651 | 2651 | ||
2652 | if (sh->batch_head && bi->bi_error && !replacement) | 2652 | if (sh->batch_head && bi->bi_status && !replacement) |
2653 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); | 2653 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); |
2654 | 2654 | ||
2655 | bio_reset(bi); | 2655 | bio_reset(bi); |
@@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3381 | sh->dev[i].sector + STRIPE_SECTORS) { | 3381 | sh->dev[i].sector + STRIPE_SECTORS) { |
3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
3383 | 3383 | ||
3384 | bi->bi_error = -EIO; | 3384 | bi->bi_status = BLK_STS_IOERR; |
3385 | md_write_end(conf->mddev); | 3385 | md_write_end(conf->mddev); |
3386 | bio_endio(bi); | 3386 | bio_endio(bi); |
3387 | bi = nextbi; | 3387 | bi = nextbi; |
@@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3403 | sh->dev[i].sector + STRIPE_SECTORS) { | 3403 | sh->dev[i].sector + STRIPE_SECTORS) { |
3404 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 3404 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
3405 | 3405 | ||
3406 | bi->bi_error = -EIO; | 3406 | bi->bi_status = BLK_STS_IOERR; |
3407 | md_write_end(conf->mddev); | 3407 | md_write_end(conf->mddev); |
3408 | bio_endio(bi); | 3408 | bio_endio(bi); |
3409 | bi = bi2; | 3409 | bi = bi2; |
@@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3429 | struct bio *nextbi = | 3429 | struct bio *nextbi = |
3430 | r5_next_bio(bi, sh->dev[i].sector); | 3430 | r5_next_bio(bi, sh->dev[i].sector); |
3431 | 3431 | ||
3432 | bi->bi_error = -EIO; | 3432 | bi->bi_status = BLK_STS_IOERR; |
3433 | bio_endio(bi); | 3433 | bio_endio(bi); |
3434 | bi = nextbi; | 3434 | bi = nextbi; |
3435 | } | 3435 | } |
@@ -5144,7 +5144,7 @@ static void raid5_align_endio(struct bio *bi) | |||
5144 | struct mddev *mddev; | 5144 | struct mddev *mddev; |
5145 | struct r5conf *conf; | 5145 | struct r5conf *conf; |
5146 | struct md_rdev *rdev; | 5146 | struct md_rdev *rdev; |
5147 | int error = bi->bi_error; | 5147 | blk_status_t error = bi->bi_status; |
5148 | 5148 | ||
5149 | bio_put(bi); | 5149 | bio_put(bi); |
5150 | 5150 | ||
@@ -5721,7 +5721,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
5721 | release_stripe_plug(mddev, sh); | 5721 | release_stripe_plug(mddev, sh); |
5722 | } else { | 5722 | } else { |
5723 | /* cannot get stripe for read-ahead, just give-up */ | 5723 | /* cannot get stripe for read-ahead, just give-up */ |
5724 | bi->bi_error = -EIO; | 5724 | bi->bi_status = BLK_STS_IOERR; |
5725 | break; | 5725 | break; |
5726 | } | 5726 | } |
5727 | } | 5727 | } |