diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2009-04-02 14:55:38 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-04-02 14:55:38 -0400 |
commit | f0b9a4502baa18f8a255a2866bb4e0655fb35974 (patch) | |
tree | 28d45275783b68d608d632a84371aa6e625c08f5 /drivers/md | |
parent | 8a53c28db42853591edbe8103e2ce3c4f2917f42 (diff) |
dm: move bio_io_error into __split_and_process_bio
Move the bio_io_error() calls directly into __split_and_process_bio().
This avoids some code duplication in later patches.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 75d710493b7b..385c2e8f90c8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -828,18 +828,20 @@ static int __clone_and_map(struct clone_info *ci) | |||
828 | /* | 828 | /* |
829 | * Split the bio into several clones and submit it to targets. | 829 | * Split the bio into several clones and submit it to targets. |
830 | */ | 830 | */ |
831 | static int __split_and_process_bio(struct mapped_device *md, struct bio *bio) | 831 | static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) |
832 | { | 832 | { |
833 | struct clone_info ci; | 833 | struct clone_info ci; |
834 | int error = 0; | 834 | int error = 0; |
835 | 835 | ||
836 | ci.map = dm_get_table(md); | 836 | ci.map = dm_get_table(md); |
837 | if (unlikely(!ci.map)) | 837 | if (unlikely(!ci.map)) { |
838 | return -EIO; | 838 | bio_io_error(bio); |
839 | return; | ||
840 | } | ||
839 | if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { | 841 | if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { |
840 | dm_table_put(ci.map); | 842 | dm_table_put(ci.map); |
841 | bio_endio(bio, -EOPNOTSUPP); | 843 | bio_endio(bio, -EOPNOTSUPP); |
842 | return 0; | 844 | return; |
843 | } | 845 | } |
844 | ci.md = md; | 846 | ci.md = md; |
845 | ci.bio = bio; | 847 | ci.bio = bio; |
@@ -859,8 +861,6 @@ static int __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
859 | /* drop the extra reference count */ | 861 | /* drop the extra reference count */ |
860 | dec_pending(ci.io, error); | 862 | dec_pending(ci.io, error); |
861 | dm_table_put(ci.map); | 863 | dm_table_put(ci.map); |
862 | |||
863 | return 0; | ||
864 | } | 864 | } |
865 | /*----------------------------------------------------------------- | 865 | /*----------------------------------------------------------------- |
866 | * CRUD END | 866 | * CRUD END |
@@ -951,8 +951,9 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
951 | down_read(&md->io_lock); | 951 | down_read(&md->io_lock); |
952 | } | 952 | } |
953 | 953 | ||
954 | r = __split_and_process_bio(md, bio); | 954 | __split_and_process_bio(md, bio); |
955 | up_read(&md->io_lock); | 955 | up_read(&md->io_lock); |
956 | return 0; | ||
956 | 957 | ||
957 | out_req: | 958 | out_req: |
958 | if (r < 0) | 959 | if (r < 0) |
@@ -1404,10 +1405,8 @@ static void __flush_deferred_io(struct mapped_device *md) | |||
1404 | { | 1405 | { |
1405 | struct bio *c; | 1406 | struct bio *c; |
1406 | 1407 | ||
1407 | while ((c = bio_list_pop(&md->deferred))) { | 1408 | while ((c = bio_list_pop(&md->deferred))) |
1408 | if (__split_and_process_bio(md, c)) | 1409 | __split_and_process_bio(md, c); |
1409 | bio_io_error(c); | ||
1410 | } | ||
1411 | 1410 | ||
1412 | clear_bit(DMF_BLOCK_IO, &md->flags); | 1411 | clear_bit(DMF_BLOCK_IO, &md->flags); |
1413 | } | 1412 | } |