diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-03 03:38:06 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-06-09 11:27:32 -0400 |
commit | 4e4cbee93d56137ebff722be022cae5f70ef84fb (patch) | |
tree | 4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md/dm.c | |
parent | fc17b6534eb8395f0b3133eb31d87deec32c642b (diff) |
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7a7047211c64..f38f9dd5cbdd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue; | |||
63 | */ | 63 | */ |
64 | struct dm_io { | 64 | struct dm_io { |
65 | struct mapped_device *md; | 65 | struct mapped_device *md; |
66 | int error; | 66 | blk_status_t status; |
67 | atomic_t io_count; | 67 | atomic_t io_count; |
68 | struct bio *bio; | 68 | struct bio *bio; |
69 | unsigned long start_time; | 69 | unsigned long start_time; |
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md) | |||
768 | * Decrements the number of outstanding ios that a bio has been | 768 | * Decrements the number of outstanding ios that a bio has been |
769 | * cloned into, completing the original io if necc. | 769 | * cloned into, completing the original io if necc. |
770 | */ | 770 | */ |
771 | static void dec_pending(struct dm_io *io, int error) | 771 | static void dec_pending(struct dm_io *io, blk_status_t error) |
772 | { | 772 | { |
773 | unsigned long flags; | 773 | unsigned long flags; |
774 | int io_error; | 774 | blk_status_t io_error; |
775 | struct bio *bio; | 775 | struct bio *bio; |
776 | struct mapped_device *md = io->md; | 776 | struct mapped_device *md = io->md; |
777 | 777 | ||
778 | /* Push-back supersedes any I/O errors */ | 778 | /* Push-back supersedes any I/O errors */ |
779 | if (unlikely(error)) { | 779 | if (unlikely(error)) { |
780 | spin_lock_irqsave(&io->endio_lock, flags); | 780 | spin_lock_irqsave(&io->endio_lock, flags); |
781 | if (!(io->error > 0 && __noflush_suspending(md))) | 781 | if (!(io->status == BLK_STS_DM_REQUEUE && |
782 | io->error = error; | 782 | __noflush_suspending(md))) |
783 | io->status = error; | ||
783 | spin_unlock_irqrestore(&io->endio_lock, flags); | 784 | spin_unlock_irqrestore(&io->endio_lock, flags); |
784 | } | 785 | } |
785 | 786 | ||
786 | if (atomic_dec_and_test(&io->io_count)) { | 787 | if (atomic_dec_and_test(&io->io_count)) { |
787 | if (io->error == DM_ENDIO_REQUEUE) { | 788 | if (io->status == BLK_STS_DM_REQUEUE) { |
788 | /* | 789 | /* |
789 | * Target requested pushing back the I/O. | 790 | * Target requested pushing back the I/O. |
790 | */ | 791 | */ |
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error) | |||
793 | bio_list_add_head(&md->deferred, io->bio); | 794 | bio_list_add_head(&md->deferred, io->bio); |
794 | else | 795 | else |
795 | /* noflush suspend was interrupted. */ | 796 | /* noflush suspend was interrupted. */ |
796 | io->error = -EIO; | 797 | io->status = BLK_STS_IOERR; |
797 | spin_unlock_irqrestore(&md->deferred_lock, flags); | 798 | spin_unlock_irqrestore(&md->deferred_lock, flags); |
798 | } | 799 | } |
799 | 800 | ||
800 | io_error = io->error; | 801 | io_error = io->status; |
801 | bio = io->bio; | 802 | bio = io->bio; |
802 | end_io_acct(io); | 803 | end_io_acct(io); |
803 | free_io(md, io); | 804 | free_io(md, io); |
804 | 805 | ||
805 | if (io_error == DM_ENDIO_REQUEUE) | 806 | if (io_error == BLK_STS_DM_REQUEUE) |
806 | return; | 807 | return; |
807 | 808 | ||
808 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { | 809 | if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { |
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
814 | queue_io(md, bio); | 815 | queue_io(md, bio); |
815 | } else { | 816 | } else { |
816 | /* done with normal IO or empty flush */ | 817 | /* done with normal IO or empty flush */ |
817 | bio->bi_error = io_error; | 818 | bio->bi_status = io_error; |
818 | bio_endio(bio); | 819 | bio_endio(bio); |
819 | } | 820 | } |
820 | } | 821 | } |
@@ -838,14 +839,13 @@ void disable_write_zeroes(struct mapped_device *md) | |||
838 | 839 | ||
839 | static void clone_endio(struct bio *bio) | 840 | static void clone_endio(struct bio *bio) |
840 | { | 841 | { |
841 | int error = bio->bi_error; | 842 | blk_status_t error = bio->bi_status; |
842 | int r = error; | ||
843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 843 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
844 | struct dm_io *io = tio->io; | 844 | struct dm_io *io = tio->io; |
845 | struct mapped_device *md = tio->io->md; | 845 | struct mapped_device *md = tio->io->md; |
846 | dm_endio_fn endio = tio->ti->type->end_io; | 846 | dm_endio_fn endio = tio->ti->type->end_io; |
847 | 847 | ||
848 | if (unlikely(error == -EREMOTEIO)) { | 848 | if (unlikely(error == BLK_STS_TARGET)) { |
849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 849 | if (bio_op(bio) == REQ_OP_WRITE_SAME && |
850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) | 850 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) |
851 | disable_write_same(md); | 851 | disable_write_same(md); |
@@ -855,10 +855,10 @@ static void clone_endio(struct bio *bio) | |||
855 | } | 855 | } |
856 | 856 | ||
857 | if (endio) { | 857 | if (endio) { |
858 | r = endio(tio->ti, bio, &error); | 858 | int r = endio(tio->ti, bio, &error); |
859 | switch (r) { | 859 | switch (r) { |
860 | case DM_ENDIO_REQUEUE: | 860 | case DM_ENDIO_REQUEUE: |
861 | error = DM_ENDIO_REQUEUE; | 861 | error = BLK_STS_DM_REQUEUE; |
862 | /*FALLTHRU*/ | 862 | /*FALLTHRU*/ |
863 | case DM_ENDIO_DONE: | 863 | case DM_ENDIO_DONE: |
864 | break; | 864 | break; |
@@ -1094,11 +1094,11 @@ static void __map_bio(struct dm_target_io *tio) | |||
1094 | generic_make_request(clone); | 1094 | generic_make_request(clone); |
1095 | break; | 1095 | break; |
1096 | case DM_MAPIO_KILL: | 1096 | case DM_MAPIO_KILL: |
1097 | r = -EIO; | 1097 | dec_pending(tio->io, BLK_STS_IOERR); |
1098 | /*FALLTHRU*/ | 1098 | free_tio(tio); |
1099 | break; | ||
1099 | case DM_MAPIO_REQUEUE: | 1100 | case DM_MAPIO_REQUEUE: |
1100 | /* error the io and bail out, or requeue it if needed */ | 1101 | dec_pending(tio->io, BLK_STS_DM_REQUEUE); |
1101 | dec_pending(tio->io, r); | ||
1102 | free_tio(tio); | 1102 | free_tio(tio); |
1103 | break; | 1103 | break; |
1104 | default: | 1104 | default: |
@@ -1366,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1366 | ci.map = map; | 1366 | ci.map = map; |
1367 | ci.md = md; | 1367 | ci.md = md; |
1368 | ci.io = alloc_io(md); | 1368 | ci.io = alloc_io(md); |
1369 | ci.io->error = 0; | 1369 | ci.io->status = 0; |
1370 | atomic_set(&ci.io->io_count, 1); | 1370 | atomic_set(&ci.io->io_count, 1); |
1371 | ci.io->bio = bio; | 1371 | ci.io->bio = bio; |
1372 | ci.io->md = md; | 1372 | ci.io->md = md; |