summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-03 03:38:06 -0400
committerJens Axboe <axboe@fb.com>2017-06-09 11:27:32 -0400
commit4e4cbee93d56137ebff722be022cae5f70ef84fb (patch)
tree4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md/raid10.c
parentfc17b6534eb8395f0b3133eb31d87deec32c642b (diff)
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4343d7ff9916..89ad1cd29037 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
336 struct r10conf *conf = r10_bio->mddev->private; 336 struct r10conf *conf = r10_bio->mddev->private;
337 337
338 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 338 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
339 bio->bi_error = -EIO; 339 bio->bi_status = BLK_STS_IOERR;
340 340
341 bio_endio(bio); 341 bio_endio(bio);
342 /* 342 /*
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
389 389
390static void raid10_end_read_request(struct bio *bio) 390static void raid10_end_read_request(struct bio *bio)
391{ 391{
392 int uptodate = !bio->bi_error; 392 int uptodate = !bio->bi_status;
393 struct r10bio *r10_bio = bio->bi_private; 393 struct r10bio *r10_bio = bio->bi_private;
394 int slot, dev; 394 int slot, dev;
395 struct md_rdev *rdev; 395 struct md_rdev *rdev;
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio)
477 struct bio *to_put = NULL; 477 struct bio *to_put = NULL;
478 bool discard_error; 478 bool discard_error;
479 479
480 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 480 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
481 481
482 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 482 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
483 483
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio)
491 /* 491 /*
492 * this branch is our 'one mirror IO has finished' event handler: 492 * this branch is our 'one mirror IO has finished' event handler:
493 */ 493 */
494 if (bio->bi_error && !discard_error) { 494 if (bio->bi_status && !discard_error) {
495 if (repl) 495 if (repl)
496 /* Never record new bad blocks to replacement, 496 /* Never record new bad blocks to replacement,
497 * just fail it. 497 * just fail it.
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
913 bio->bi_next = NULL; 913 bio->bi_next = NULL;
914 bio->bi_bdev = rdev->bdev; 914 bio->bi_bdev = rdev->bdev;
915 if (test_bit(Faulty, &rdev->flags)) { 915 if (test_bit(Faulty, &rdev->flags)) {
916 bio->bi_error = -EIO; 916 bio->bi_status = BLK_STS_IOERR;
917 bio_endio(bio); 917 bio_endio(bio);
918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1098 bio->bi_next = NULL; 1098 bio->bi_next = NULL;
1099 bio->bi_bdev = rdev->bdev; 1099 bio->bi_bdev = rdev->bdev;
1100 if (test_bit(Faulty, &rdev->flags)) { 1100 if (test_bit(Faulty, &rdev->flags)) {
1101 bio->bi_error = -EIO; 1101 bio->bi_status = BLK_STS_IOERR;
1102 bio_endio(bio); 1102 bio_endio(bio);
1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
1888{ 1888{
1889 struct r10conf *conf = r10_bio->mddev->private; 1889 struct r10conf *conf = r10_bio->mddev->private;
1890 1890
1891 if (!bio->bi_error) 1891 if (!bio->bi_status)
1892 set_bit(R10BIO_Uptodate, &r10_bio->state); 1892 set_bit(R10BIO_Uptodate, &r10_bio->state);
1893 else 1893 else
1894 /* The write handler will notice the lack of 1894 /* The write handler will notice the lack of
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio)
1972 else 1972 else
1973 rdev = conf->mirrors[d].rdev; 1973 rdev = conf->mirrors[d].rdev;
1974 1974
1975 if (bio->bi_error) { 1975 if (bio->bi_status) {
1976 if (repl) 1976 if (repl)
1977 md_error(mddev, rdev); 1977 md_error(mddev, rdev);
1978 else { 1978 else {
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2021 2021
2022 /* find the first device with a block */ 2022 /* find the first device with a block */
2023 for (i=0; i<conf->copies; i++) 2023 for (i=0; i<conf->copies; i++)
2024 if (!r10_bio->devs[i].bio->bi_error) 2024 if (!r10_bio->devs[i].bio->bi_status)
2025 break; 2025 break;
2026 2026
2027 if (i == conf->copies) 2027 if (i == conf->copies)
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2050 tpages = get_resync_pages(tbio)->pages; 2050 tpages = get_resync_pages(tbio)->pages;
2051 d = r10_bio->devs[i].devnum; 2051 d = r10_bio->devs[i].devnum;
2052 rdev = conf->mirrors[d].rdev; 2052 rdev = conf->mirrors[d].rdev;
2053 if (!r10_bio->devs[i].bio->bi_error) { 2053 if (!r10_bio->devs[i].bio->bi_status) {
2054 /* We know that the bi_io_vec layout is the same for 2054 /* We know that the bi_io_vec layout is the same for
2055 * both 'first' and 'i', so we just compare them. 2055 * both 'first' and 'i', so we just compare them.
2056 * All vec entries are PAGE_SIZE; 2056 * All vec entries are PAGE_SIZE;
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2633 rdev = conf->mirrors[dev].rdev; 2633 rdev = conf->mirrors[dev].rdev;
2634 if (r10_bio->devs[m].bio == NULL) 2634 if (r10_bio->devs[m].bio == NULL)
2635 continue; 2635 continue;
2636 if (!r10_bio->devs[m].bio->bi_error) { 2636 if (!r10_bio->devs[m].bio->bi_status) {
2637 rdev_clear_badblocks( 2637 rdev_clear_badblocks(
2638 rdev, 2638 rdev,
2639 r10_bio->devs[m].addr, 2639 r10_bio->devs[m].addr,
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2649 if (r10_bio->devs[m].repl_bio == NULL) 2649 if (r10_bio->devs[m].repl_bio == NULL)
2650 continue; 2650 continue;
2651 2651
2652 if (!r10_bio->devs[m].repl_bio->bi_error) { 2652 if (!r10_bio->devs[m].repl_bio->bi_status) {
2653 rdev_clear_badblocks( 2653 rdev_clear_badblocks(
2654 rdev, 2654 rdev,
2655 r10_bio->devs[m].addr, 2655 r10_bio->devs[m].addr,
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2675 r10_bio->devs[m].addr, 2675 r10_bio->devs[m].addr,
2676 r10_bio->sectors, 0); 2676 r10_bio->sectors, 0);
2677 rdev_dec_pending(rdev, conf->mddev); 2677 rdev_dec_pending(rdev, conf->mddev);
2678 } else if (bio != NULL && bio->bi_error) { 2678 } else if (bio != NULL && bio->bi_status) {
2679 fail = true; 2679 fail = true;
2680 if (!narrow_write_error(r10_bio, m)) { 2680 if (!narrow_write_error(r10_bio, m)) {
2681 md_error(conf->mddev, rdev); 2681 md_error(conf->mddev, rdev);
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3267 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3267 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3268 3268
3269 bio = r10_bio->devs[i].bio; 3269 bio = r10_bio->devs[i].bio;
3270 bio->bi_error = -EIO; 3270 bio->bi_status = BLK_STS_IOERR;
3271 rcu_read_lock(); 3271 rcu_read_lock();
3272 rdev = rcu_dereference(conf->mirrors[d].rdev); 3272 rdev = rcu_dereference(conf->mirrors[d].rdev);
3273 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3273 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3309 3309
3310 /* Need to set up for writing to the replacement */ 3310 /* Need to set up for writing to the replacement */
3311 bio = r10_bio->devs[i].repl_bio; 3311 bio = r10_bio->devs[i].repl_bio;
3312 bio->bi_error = -EIO; 3312 bio->bi_status = BLK_STS_IOERR;
3313 3313
3314 sector = r10_bio->devs[i].addr; 3314 sector = r10_bio->devs[i].addr;
3315 bio->bi_next = biolist; 3315 bio->bi_next = biolist;
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3375 3375
3376 if (bio->bi_end_io == end_sync_read) { 3376 if (bio->bi_end_io == end_sync_read) {
3377 md_sync_acct(bio->bi_bdev, nr_sectors); 3377 md_sync_acct(bio->bi_bdev, nr_sectors);
3378 bio->bi_error = 0; 3378 bio->bi_status = 0;
3379 generic_make_request(bio); 3379 generic_make_request(bio);
3380 } 3380 }
3381 } 3381 }
@@ -4394,7 +4394,7 @@ read_more:
4394 read_bio->bi_end_io = end_reshape_read; 4394 read_bio->bi_end_io = end_reshape_read;
4395 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4395 bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
4396 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4396 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4397 read_bio->bi_error = 0; 4397 read_bio->bi_status = 0;
4398 read_bio->bi_vcnt = 0; 4398 read_bio->bi_vcnt = 0;
4399 read_bio->bi_iter.bi_size = 0; 4399 read_bio->bi_iter.bi_size = 0;
4400 r10_bio->master_bio = read_bio; 4400 r10_bio->master_bio = read_bio;
@@ -4638,7 +4638,7 @@ static void end_reshape_write(struct bio *bio)
4638 rdev = conf->mirrors[d].rdev; 4638 rdev = conf->mirrors[d].rdev;
4639 } 4639 }
4640 4640
4641 if (bio->bi_error) { 4641 if (bio->bi_status) {
4642 /* FIXME should record badblock */ 4642 /* FIXME should record badblock */
4643 md_error(mddev, rdev); 4643 md_error(mddev, rdev);
4644 } 4644 }