aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-emc.c5
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c18
-rw-r--r--drivers/md/faulty.c10
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c25
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c30
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c48
16 files changed, 73 insertions, 156 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bdc52d6922b7..8216a6f75be5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
489 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
490 return; 490 return;
491 491
492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->error);
493 493
494 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
495} 495}
@@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
509 queue_work(_kcryptd_workqueue, &io->work); 509 queue_work(_kcryptd_workqueue, &io->work);
510} 510}
511 511
512static int crypt_endio(struct bio *clone, unsigned int done, int error) 512static void crypt_endio(struct bio *clone, int error)
513{ 513{
514 struct dm_crypt_io *io = clone->bi_private; 514 struct dm_crypt_io *io = clone->bi_private;
515 struct crypt_config *cc = io->target->private; 515 struct crypt_config *cc = io->target->private;
516 unsigned read_io = bio_data_dir(clone) == READ; 516 unsigned read_io = bio_data_dir(clone) == READ;
517 517
518 /* 518 /*
519 * free the processed pages, even if 519 * free the processed pages
520 * it's only a partially completed write
521 */ 520 */
522 if (!read_io) 521 if (!read_io) {
523 crypt_free_buffer_pages(cc, clone, done); 522 crypt_free_buffer_pages(cc, clone, clone->bi_size);
524
525 /* keep going - not finished yet */
526 if (unlikely(clone->bi_size))
527 return 1;
528
529 if (!read_io)
530 goto out; 523 goto out;
524 }
531 525
532 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { 526 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
533 error = -EIO; 527 error = -EIO;
@@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
537 bio_put(clone); 531 bio_put(clone);
538 io->post_process = 1; 532 io->post_process = 1;
539 kcryptd_queue_io(io); 533 kcryptd_queue_io(io);
540 return 0; 534 return;
541 535
542out: 536out:
543 bio_put(clone); 537 bio_put(clone);
544 dec_pending(io, error); 538 dec_pending(io, error);
545 return error;
546} 539}
547 540
548static void clone_init(struct dm_crypt_io *io, struct bio *clone) 541static void clone_init(struct dm_crypt_io *io, struct bio *clone)
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 71cc858b7860..a2191a4fcf77 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
38 bio_put(bio); 38 bio_put(bio);
39} 39}
40 40
41static int emc_endio(struct bio *bio, unsigned int bytes_done, int error) 41static void emc_endio(struct bio *bio, int error)
42{ 42{
43 struct dm_path *path = bio->bi_private; 43 struct dm_path *path = bio->bi_private;
44 44
45 if (bio->bi_size)
46 return 1;
47
48 /* We also need to look at the sense keys here whether or not to 45 /* We also need to look at the sense keys here whether or not to
49 * switch to the next PG etc. 46 * switch to the next PG etc.
50 * 47 *
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f3a772486437..b8e342fe7586 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
124 } 124 }
125} 125}
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static void endio(struct bio *bio, int error)
128{ 128{
129 struct io *io; 129 struct io *io;
130 unsigned region; 130 unsigned region;
131 131
132 /* keep going until we've finished */
133 if (bio->bi_size)
134 return 1;
135
136 if (error && bio_data_dir(bio) == READ) 132 if (error && bio_data_dir(bio) == READ)
137 zero_fill_bio(bio); 133 zero_fill_bio(bio);
138 134
@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
146 bio_put(bio); 142 bio_put(bio);
147 143
148 dec_count(io, region, error); 144 dec_count(io, region, error);
149
150 return 0;
151} 145}
152 146
153/*----------------------------------------------------------------- 147/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6ca9d0a6fd1..31056abca89d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
390 390
391 r = map_io(m, bio, mpio, 1); 391 r = map_io(m, bio, mpio, 1);
392 if (r < 0) 392 if (r < 0)
393 bio_endio(bio, bio->bi_size, r); 393 bio_endio(bio, r);
394 else if (r == DM_MAPIO_REMAPPED) 394 else if (r == DM_MAPIO_REMAPPED)
395 generic_make_request(bio); 395 generic_make_request(bio);
396 else if (r == DM_MAPIO_REQUEUE) 396 else if (r == DM_MAPIO_REQUEUE)
397 bio_endio(bio, bio->bi_size, -EIO); 397 bio_endio(bio, -EIO);
398 398
399 bio = next; 399 bio = next;
400 } 400 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 144071e70a93..d09ff15490a5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
820 break; 820 break;
821 } 821 }
822 } 822 }
823 bio_endio(bio, bio->bi_size, 0); 823 bio_endio(bio, 0);
824} 824}
825 825
826static void do_write(struct mirror_set *ms, struct bio *bio) 826static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
900 */ 900 */
901 if (unlikely(ms->log_failure)) 901 if (unlikely(ms->log_failure))
902 while ((bio = bio_list_pop(&sync))) 902 while ((bio = bio_list_pop(&sync)))
903 bio_endio(bio, bio->bi_size, -EIO); 903 bio_endio(bio, -EIO);
904 else while ((bio = bio_list_pop(&sync))) 904 else while ((bio = bio_list_pop(&sync)))
905 do_write(ms, bio); 905 do_write(ms, bio);
906 906
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 83ddbfe6b8a4..98a633f3d6b0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
636 while (bio) { 636 while (bio) {
637 n = bio->bi_next; 637 n = bio->bi_next;
638 bio->bi_next = NULL; 638 bio->bi_next = NULL;
639 bio_io_error(bio, bio->bi_size); 639 bio_io_error(bio);
640 bio = n; 640 bio = n;
641 } 641 }
642} 642}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index f314d7dc9c26..bdec206c404b 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
43 break; 43 break;
44 } 44 }
45 45
46 bio_endio(bio, bio->bi_size, 0); 46 bio_endio(bio, 0);
47 47
48 /* accepted bio, don't make new request */ 48 /* accepted bio, don't make new request */
49 return DM_MAPIO_SUBMITTED; 49 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2120155929a6..167765c47747 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
484 blk_add_trace_bio(io->md->queue, io->bio, 484 blk_add_trace_bio(io->md->queue, io->bio,
485 BLK_TA_COMPLETE); 485 BLK_TA_COMPLETE);
486 486
487 bio_endio(io->bio, io->bio->bi_size, io->error); 487 bio_endio(io->bio, io->error);
488 } 488 }
489 489
490 free_io(io->md, io); 490 free_io(io->md, io);
491 } 491 }
492} 492}
493 493
494static int clone_endio(struct bio *bio, unsigned int done, int error) 494static void clone_endio(struct bio *bio, int error)
495{ 495{
496 int r = 0; 496 int r = 0;
497 struct dm_target_io *tio = bio->bi_private; 497 struct dm_target_io *tio = bio->bi_private;
498 struct mapped_device *md = tio->io->md; 498 struct mapped_device *md = tio->io->md;
499 dm_endio_fn endio = tio->ti->type->end_io; 499 dm_endio_fn endio = tio->ti->type->end_io;
500 500
501 if (bio->bi_size)
502 return 1;
503
504 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 501 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
505 error = -EIO; 502 error = -EIO;
506 503
@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
514 error = r; 511 error = r;
515 else if (r == DM_ENDIO_INCOMPLETE) 512 else if (r == DM_ENDIO_INCOMPLETE)
516 /* The target will handle the io */ 513 /* The target will handle the io */
517 return 1; 514 return;
518 else if (r) { 515 else if (r) {
519 DMWARN("unimplemented target endio return value: %d", r); 516 DMWARN("unimplemented target endio return value: %d", r);
520 BUG(); 517 BUG();
@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
530 527
531 bio_put(bio); 528 bio_put(bio);
532 free_tio(md, tio); 529 free_tio(md, tio);
533 return r;
534} 530}
535 531
536static sector_t max_io_len(struct mapped_device *md, 532static sector_t max_io_len(struct mapped_device *md,
@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
761 757
762 ci.map = dm_get_table(md); 758 ci.map = dm_get_table(md);
763 if (!ci.map) { 759 if (!ci.map) {
764 bio_io_error(bio, bio->bi_size); 760 bio_io_error(bio);
765 return; 761 return;
766 } 762 }
767 763
@@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
803 * guarantee it is (or can be) handled by the targets correctly. 799 * guarantee it is (or can be) handled by the targets correctly.
804 */ 800 */
805 if (unlikely(bio_barrier(bio))) { 801 if (unlikely(bio_barrier(bio))) {
806 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 802 bio_endio(bio, -EOPNOTSUPP);
807 return 0; 803 return 0;
808 } 804 }
809 805
@@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
820 up_read(&md->io_lock); 816 up_read(&md->io_lock);
821 817
822 if (bio_rw(bio) == READA) { 818 if (bio_rw(bio) == READA) {
823 bio_io_error(bio, bio->bi_size); 819 bio_io_error(bio);
824 return 0; 820 return 0;
825 } 821 }
826 822
827 r = queue_io(md, bio); 823 r = queue_io(md, bio);
828 if (r < 0) { 824 if (r < 0) {
829 bio_io_error(bio, bio->bi_size); 825 bio_io_error(bio);
830 return 0; 826 return 0;
831 827
832 } else if (r == 0) 828 } else if (r == 0)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cb059cf14c2e..cf2ddce34118 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -65,18 +65,16 @@
65#include <linux/raid/md.h> 65#include <linux/raid/md.h>
66 66
67 67
68static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error) 68static void faulty_fail(struct bio *bio, int error)
69{ 69{
70 struct bio *b = bio->bi_private; 70 struct bio *b = bio->bi_private;
71 71
72 b->bi_size = bio->bi_size; 72 b->bi_size = bio->bi_size;
73 b->bi_sector = bio->bi_sector; 73 b->bi_sector = bio->bi_sector;
74 74
75 if (bio->bi_size == 0) 75 bio_put(bio);
76 bio_put(bio);
77 76
78 clear_bit(BIO_UPTODATE, &b->bi_flags); 77 bio_io_error(b);
79 return (b->bi_end_io)(b, bytes_done, -EIO);
80} 78}
81 79
82typedef struct faulty_conf { 80typedef struct faulty_conf {
@@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
179 /* special case - don't decrement, don't generic_make_request, 177 /* special case - don't decrement, don't generic_make_request,
180 * just fail immediately 178 * just fail immediately
181 */ 179 */
182 bio_endio(bio, bio->bi_size, -EIO); 180 bio_endio(bio, -EIO);
183 return 0; 181 return 0;
184 } 182 }
185 183
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 17f795c3e0ab..550148770bb2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -338,7 +338,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
338 sector_t block; 338 sector_t block;
339 339
340 if (unlikely(bio_barrier(bio))) { 340 if (unlikely(bio_barrier(bio))) {
341 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 341 bio_endio(bio, -EOPNOTSUPP);
342 return 0; 342 return 0;
343 } 343 }
344 344
@@ -358,7 +358,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
358 bdevname(tmp_dev->rdev->bdev, b), 358 bdevname(tmp_dev->rdev->bdev, b),
359 (unsigned long long)tmp_dev->size, 359 (unsigned long long)tmp_dev->size,
360 (unsigned long long)tmp_dev->offset); 360 (unsigned long long)tmp_dev->offset);
361 bio_io_error(bio, bio->bi_size); 361 bio_io_error(bio);
362 return 0; 362 return 0;
363 } 363 }
364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f883b7e37f3d..e8f102ea9b03 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,7 +213,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
213 213
214static int md_fail_request (struct request_queue *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio);
217 return 0; 217 return 0;
218} 218}
219 219
@@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
384} 384}
385 385
386 386
387static int super_written(struct bio *bio, unsigned int bytes_done, int error) 387static void super_written(struct bio *bio, int error)
388{ 388{
389 mdk_rdev_t *rdev = bio->bi_private; 389 mdk_rdev_t *rdev = bio->bi_private;
390 mddev_t *mddev = rdev->mddev; 390 mddev_t *mddev = rdev->mddev;
391 if (bio->bi_size)
392 return 1;
393 391
394 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
395 printk("md: super_written gets error=%d, uptodate=%d\n", 393 printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
401 if (atomic_dec_and_test(&mddev->pending_writes)) 399 if (atomic_dec_and_test(&mddev->pending_writes))
402 wake_up(&mddev->sb_wait); 400 wake_up(&mddev->sb_wait);
403 bio_put(bio); 401 bio_put(bio);
404 return 0;
405} 402}
406 403
407static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 404static void super_written_barrier(struct bio *bio, int error)
408{ 405{
409 struct bio *bio2 = bio->bi_private; 406 struct bio *bio2 = bio->bi_private;
410 mdk_rdev_t *rdev = bio2->bi_private; 407 mdk_rdev_t *rdev = bio2->bi_private;
411 mddev_t *mddev = rdev->mddev; 408 mddev_t *mddev = rdev->mddev;
412 if (bio->bi_size)
413 return 1;
414 409
415 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
416 error == -EOPNOTSUPP) { 411 error == -EOPNOTSUPP) {
@@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
424 spin_unlock_irqrestore(&mddev->write_lock, flags); 419 spin_unlock_irqrestore(&mddev->write_lock, flags);
425 wake_up(&mddev->sb_wait); 420 wake_up(&mddev->sb_wait);
426 bio_put(bio); 421 bio_put(bio);
427 return 0; 422 } else {
423 bio_put(bio2);
424 bio->bi_private = rdev;
425 super_written(bio, error);
428 } 426 }
429 bio_put(bio2);
430 bio->bi_private = rdev;
431 return super_written(bio, bytes_done, error);
432} 427}
433 428
434void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 429void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
489 finish_wait(&mddev->sb_wait, &wq); 484 finish_wait(&mddev->sb_wait, &wq);
490} 485}
491 486
492static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 487static void bi_complete(struct bio *bio, int error)
493{ 488{
494 if (bio->bi_size)
495 return 1;
496
497 complete((struct completion*)bio->bi_private); 489 complete((struct completion*)bio->bi_private);
498 return 0;
499} 490}
500 491
501int sync_page_io(struct block_device *bdev, sector_t sector, int size, 492int sync_page_io(struct block_device *bdev, sector_t sector, int size,
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1e2af43a73b9..f2a63f394ad9 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
82 struct bio *bio = mp_bh->master_bio; 82 struct bio *bio = mp_bh->master_bio;
83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
84 84
85 bio_endio(bio, bio->bi_size, err); 85 bio_endio(bio, err);
86 mempool_free(mp_bh, conf->pool); 86 mempool_free(mp_bh, conf->pool);
87} 87}
88 88
89static int multipath_end_request(struct bio *bio, unsigned int bytes_done, 89static void multipath_end_request(struct bio *bio, int error)
90 int error)
91{ 90{
92 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 91 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
93 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); 92 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
94 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 93 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
95 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; 94 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
96 95
97 if (bio->bi_size)
98 return 1;
99
100 if (uptodate) 96 if (uptodate)
101 multipath_end_bh_io(mp_bh, 0); 97 multipath_end_bh_io(mp_bh, 0);
102 else if (!bio_rw_ahead(bio)) { 98 else if (!bio_rw_ahead(bio)) {
@@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
112 } else 108 } else
113 multipath_end_bh_io(mp_bh, error); 109 multipath_end_bh_io(mp_bh, error);
114 rdev_dec_pending(rdev, conf->mddev); 110 rdev_dec_pending(rdev, conf->mddev);
115 return 0;
116} 111}
117 112
118static void unplug_slaves(mddev_t *mddev) 113static void unplug_slaves(mddev_t *mddev)
@@ -155,7 +150,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
155 const int rw = bio_data_dir(bio); 150 const int rw = bio_data_dir(bio);
156 151
157 if (unlikely(bio_barrier(bio))) { 152 if (unlikely(bio_barrier(bio))) {
158 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 153 bio_endio(bio, -EOPNOTSUPP);
159 return 0; 154 return 0;
160 } 155 }
161 156
@@ -169,7 +164,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
169 164
170 mp_bh->path = multipath_map(conf); 165 mp_bh->path = multipath_map(conf);
171 if (mp_bh->path < 0) { 166 if (mp_bh->path < 0) {
172 bio_endio(bio, bio->bi_size, -EIO); 167 bio_endio(bio, -EIO);
173 mempool_free(mp_bh, conf->pool); 168 mempool_free(mp_bh, conf->pool);
174 return 0; 169 return 0;
175 } 170 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b8216bc6db45..ef0da2d84959 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -420,7 +420,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
420 const int rw = bio_data_dir(bio); 420 const int rw = bio_data_dir(bio);
421 421
422 if (unlikely(bio_barrier(bio))) { 422 if (unlikely(bio_barrier(bio))) {
423 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 423 bio_endio(bio, -EOPNOTSUPP);
424 return 0; 424 return 0;
425 } 425 }
426 426
@@ -490,7 +490,7 @@ bad_map:
490 " or bigger than %dk %llu %d\n", chunk_size, 490 " or bigger than %dk %llu %d\n", chunk_size,
491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
492 492
493 bio_io_error(bio, bio->bi_size); 493 bio_io_error(bio);
494 return 0; 494 return 0;
495} 495}
496 496
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f33a729960ca..6d03bea6fa58 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
238 (unsigned long long) bio->bi_sector + 238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1); 239 (bio->bi_size >> 9) - 1);
240 240
241 bio_endio(bio, bio->bi_size, 241 bio_endio(bio,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 } 243 }
244 free_r1bio(r1_bio); 244 free_r1bio(r1_bio);
@@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
255 r1_bio->sector + (r1_bio->sectors); 255 r1_bio->sector + (r1_bio->sectors);
256} 256}
257 257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 258static void raid1_end_read_request(struct bio *bio, int error)
259{ 259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror; 262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev); 263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264 264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk; 265 mirror = r1_bio->read_disk;
269 /* 266 /*
270 * this branch is our 'one mirror IO has finished' event handler: 267 * this branch is our 'one mirror IO has finished' event handler:
@@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
301 } 298 }
302 299
303 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
304 return 0;
305} 301}
306 302
307static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 303static void raid1_end_write_request(struct bio *bio, int error)
308{ 304{
309 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
312 conf_t *conf = mddev_to_conf(r1_bio->mddev); 308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
313 struct bio *to_put = NULL; 309 struct bio *to_put = NULL;
314 310
315 if (bio->bi_size)
316 return 1;
317 311
318 for (mirror = 0; mirror < conf->raid_disks; mirror++) 312 for (mirror = 0; mirror < conf->raid_disks; mirror++)
319 if (r1_bio->bios[mirror] == bio) 313 if (r1_bio->bios[mirror] == bio)
@@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
366 (unsigned long long) mbio->bi_sector, 360 (unsigned long long) mbio->bi_sector,
367 (unsigned long long) mbio->bi_sector + 361 (unsigned long long) mbio->bi_sector +
368 (mbio->bi_size >> 9) - 1); 362 (mbio->bi_size >> 9) - 1);
369 bio_endio(mbio, mbio->bi_size, 0); 363 bio_endio(mbio, 0);
370 } 364 }
371 } 365 }
372 } 366 }
@@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
400 394
401 if (to_put) 395 if (to_put)
402 bio_put(to_put); 396 bio_put(to_put);
403
404 return 0;
405} 397}
406 398
407 399
@@ -796,7 +788,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 788 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
797 if (rw == WRITE) 789 if (rw == WRITE)
798 md_write_end(mddev); 790 md_write_end(mddev);
799 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 791 bio_endio(bio, -EOPNOTSUPP);
800 return 0; 792 return 0;
801 } 793 }
802 794
@@ -1137,14 +1129,11 @@ abort:
1137} 1129}
1138 1130
1139 1131
1140static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1132static void end_sync_read(struct bio *bio, int error)
1141{ 1133{
1142 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1134 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1143 int i; 1135 int i;
1144 1136
1145 if (bio->bi_size)
1146 return 1;
1147
1148 for (i=r1_bio->mddev->raid_disks; i--; ) 1137 for (i=r1_bio->mddev->raid_disks; i--; )
1149 if (r1_bio->bios[i] == bio) 1138 if (r1_bio->bios[i] == bio)
1150 break; 1139 break;
@@ -1160,10 +1149,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1160 1149
1161 if (atomic_dec_and_test(&r1_bio->remaining)) 1150 if (atomic_dec_and_test(&r1_bio->remaining))
1162 reschedule_retry(r1_bio); 1151 reschedule_retry(r1_bio);
1163 return 0;
1164} 1152}
1165 1153
1166static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1154static void end_sync_write(struct bio *bio, int error)
1167{ 1155{
1168 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1156 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1169 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1157 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -1172,9 +1160,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1172 int i; 1160 int i;
1173 int mirror=0; 1161 int mirror=0;
1174 1162
1175 if (bio->bi_size)
1176 return 1;
1177
1178 for (i = 0; i < conf->raid_disks; i++) 1163 for (i = 0; i < conf->raid_disks; i++)
1179 if (r1_bio->bios[i] == bio) { 1164 if (r1_bio->bios[i] == bio) {
1180 mirror = i; 1165 mirror = i;
@@ -1200,7 +1185,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1200 md_done_sync(mddev, r1_bio->sectors, uptodate); 1185 md_done_sync(mddev, r1_bio->sectors, uptodate);
1201 put_buf(r1_bio); 1186 put_buf(r1_bio);
1202 } 1187 }
1203 return 0;
1204} 1188}
1205 1189
1206static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1190static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4e53792aa520..25a96c42bdb0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
227{ 227{
228 struct bio *bio = r10_bio->master_bio; 228 struct bio *bio = r10_bio->master_bio;
229 229
230 bio_endio(bio, bio->bi_size, 230 bio_endio(bio,
231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); 231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
232 free_r10bio(r10_bio); 232 free_r10bio(r10_bio);
233} 233}
@@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
243 r10_bio->devs[slot].addr + (r10_bio->sectors); 243 r10_bio->devs[slot].addr + (r10_bio->sectors);
244} 244}
245 245
246static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 246static void raid10_end_read_request(struct bio *bio, int error)
247{ 247{
248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
250 int slot, dev; 250 int slot, dev;
251 conf_t *conf = mddev_to_conf(r10_bio->mddev); 251 conf_t *conf = mddev_to_conf(r10_bio->mddev);
252 252
253 if (bio->bi_size)
254 return 1;
255 253
256 slot = r10_bio->read_slot; 254 slot = r10_bio->read_slot;
257 dev = r10_bio->devs[slot].devnum; 255 dev = r10_bio->devs[slot].devnum;
@@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
284 } 282 }
285 283
286 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 284 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
287 return 0;
288} 285}
289 286
290static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 287static void raid10_end_write_request(struct bio *bio, int error)
291{ 288{
292 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
293 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 290 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
294 int slot, dev; 291 int slot, dev;
295 conf_t *conf = mddev_to_conf(r10_bio->mddev); 292 conf_t *conf = mddev_to_conf(r10_bio->mddev);
296 293
297 if (bio->bi_size)
298 return 1;
299
300 for (slot = 0; slot < conf->copies; slot++) 294 for (slot = 0; slot < conf->copies; slot++)
301 if (r10_bio->devs[slot].bio == bio) 295 if (r10_bio->devs[slot].bio == bio)
302 break; 296 break;
@@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
339 } 333 }
340 334
341 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 335 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
342 return 0;
343} 336}
344 337
345 338
@@ -787,7 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
787 unsigned long flags; 780 unsigned long flags;
788 781
789 if (unlikely(bio_barrier(bio))) { 782 if (unlikely(bio_barrier(bio))) {
790 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 783 bio_endio(bio, -EOPNOTSUPP);
791 return 0; 784 return 0;
792 } 785 }
793 786
@@ -819,7 +812,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
819 " or bigger than %dk %llu %d\n", chunk_sects/2, 812 " or bigger than %dk %llu %d\n", chunk_sects/2,
820 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 813 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
821 814
822 bio_io_error(bio, bio->bi_size); 815 bio_io_error(bio);
823 return 0; 816 return 0;
824 } 817 }
825 818
@@ -1155,15 +1148,12 @@ abort:
1155} 1148}
1156 1149
1157 1150
1158static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1151static void end_sync_read(struct bio *bio, int error)
1159{ 1152{
1160 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1153 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1161 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1154 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1162 int i,d; 1155 int i,d;
1163 1156
1164 if (bio->bi_size)
1165 return 1;
1166
1167 for (i=0; i<conf->copies; i++) 1157 for (i=0; i<conf->copies; i++)
1168 if (r10_bio->devs[i].bio == bio) 1158 if (r10_bio->devs[i].bio == bio)
1169 break; 1159 break;
@@ -1192,10 +1182,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1192 reschedule_retry(r10_bio); 1182 reschedule_retry(r10_bio);
1193 } 1183 }
1194 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1184 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1195 return 0;
1196} 1185}
1197 1186
1198static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1187static void end_sync_write(struct bio *bio, int error)
1199{ 1188{
1200 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1189 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1201 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1190 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@@ -1203,9 +1192,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1203 conf_t *conf = mddev_to_conf(mddev); 1192 conf_t *conf = mddev_to_conf(mddev);
1204 int i,d; 1193 int i,d;
1205 1194
1206 if (bio->bi_size)
1207 return 1;
1208
1209 for (i = 0; i < conf->copies; i++) 1195 for (i = 0; i < conf->copies; i++)
1210 if (r10_bio->devs[i].bio == bio) 1196 if (r10_bio->devs[i].bio == bio)
1211 break; 1197 break;
@@ -1228,7 +1214,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1228 } 1214 }
1229 } 1215 }
1230 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1216 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1231 return 0;
1232} 1217}
1233 1218
1234/* 1219/*
@@ -1374,7 +1359,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1374 if (test_bit(R10BIO_Uptodate, &r10_bio->state)) 1359 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1375 generic_make_request(wbio); 1360 generic_make_request(wbio);
1376 else 1361 else
1377 bio_endio(wbio, wbio->bi_size, -EIO); 1362 bio_endio(wbio, -EIO);
1378} 1363}
1379 1364
1380 1365
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f96dea975fa5..caaca9e178bc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
108{ 108{
109 struct bio *bi = return_bi; 109 struct bio *bi = return_bi;
110 while (bi) { 110 while (bi) {
111 int bytes = bi->bi_size;
112 111
113 return_bi = bi->bi_next; 112 return_bi = bi->bi_next;
114 bi->bi_next = NULL; 113 bi->bi_next = NULL;
115 bi->bi_size = 0; 114 bi->bi_size = 0;
116 bi->bi_end_io(bi, bytes, 115 bi->bi_end_io(bi,
117 test_bit(BIO_UPTODATE, &bi->bi_flags) 116 test_bit(BIO_UPTODATE, &bi->bi_flags)
118 ? 0 : -EIO); 117 ? 0 : -EIO);
119 bi = return_bi; 118 bi = return_bi;
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
382 return pending; 381 return pending;
383} 382}
384 383
385static int 384static void
386raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error); 385raid5_end_read_request(struct bio *bi, int error);
387static int 386static void
388raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error); 387raid5_end_write_request(struct bio *bi, int error);
389 388
390static void ops_run_io(struct stripe_head *sh) 389static void ops_run_io(struct stripe_head *sh)
391{ 390{
@@ -1110,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
1110 conf->slab_cache = NULL; 1109 conf->slab_cache = NULL;
1111} 1110}
1112 1111
1113static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 1112static void raid5_end_read_request(struct bio * bi, int error)
1114 int error)
1115{ 1113{
1116 struct stripe_head *sh = bi->bi_private; 1114 struct stripe_head *sh = bi->bi_private;
1117 raid5_conf_t *conf = sh->raid_conf; 1115 raid5_conf_t *conf = sh->raid_conf;
@@ -1120,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1120 char b[BDEVNAME_SIZE]; 1118 char b[BDEVNAME_SIZE];
1121 mdk_rdev_t *rdev; 1119 mdk_rdev_t *rdev;
1122 1120
1123 if (bi->bi_size)
1124 return 1;
1125 1121
1126 for (i=0 ; i<disks; i++) 1122 for (i=0 ; i<disks; i++)
1127 if (bi == &sh->dev[i].req) 1123 if (bi == &sh->dev[i].req)
@@ -1132,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1132 uptodate); 1128 uptodate);
1133 if (i == disks) { 1129 if (i == disks) {
1134 BUG(); 1130 BUG();
1135 return 0; 1131 return;
1136 } 1132 }
1137 1133
1138 if (uptodate) { 1134 if (uptodate) {
@@ -1185,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1185 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1181 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1186 set_bit(STRIPE_HANDLE, &sh->state); 1182 set_bit(STRIPE_HANDLE, &sh->state);
1187 release_stripe(sh); 1183 release_stripe(sh);
1188 return 0;
1189} 1184}
1190 1185
1191static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 1186static void raid5_end_write_request (struct bio *bi, int error)
1192 int error)
1193{ 1187{
1194 struct stripe_head *sh = bi->bi_private; 1188 struct stripe_head *sh = bi->bi_private;
1195 raid5_conf_t *conf = sh->raid_conf; 1189 raid5_conf_t *conf = sh->raid_conf;
1196 int disks = sh->disks, i; 1190 int disks = sh->disks, i;
1197 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1191 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1198 1192
1199 if (bi->bi_size)
1200 return 1;
1201
1202 for (i=0 ; i<disks; i++) 1193 for (i=0 ; i<disks; i++)
1203 if (bi == &sh->dev[i].req) 1194 if (bi == &sh->dev[i].req)
1204 break; 1195 break;
@@ -1208,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1208 uptodate); 1199 uptodate);
1209 if (i == disks) { 1200 if (i == disks) {
1210 BUG(); 1201 BUG();
1211 return 0; 1202 return;
1212 } 1203 }
1213 1204
1214 if (!uptodate) 1205 if (!uptodate)
@@ -1219,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1219 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1210 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1220 set_bit(STRIPE_HANDLE, &sh->state); 1211 set_bit(STRIPE_HANDLE, &sh->state);
1221 release_stripe(sh); 1212 release_stripe(sh);
1222 return 0;
1223} 1213}
1224 1214
1225 1215
@@ -3340,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3340 * first). 3330 * first).
3341 * If the read failed.. 3331 * If the read failed..
3342 */ 3332 */
3343static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) 3333static void raid5_align_endio(struct bio *bi, int error)
3344{ 3334{
3345 struct bio* raid_bi = bi->bi_private; 3335 struct bio* raid_bi = bi->bi_private;
3346 mddev_t *mddev; 3336 mddev_t *mddev;
@@ -3348,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3348 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3338 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3349 mdk_rdev_t *rdev; 3339 mdk_rdev_t *rdev;
3350 3340
3351 if (bi->bi_size)
3352 return 1;
3353 bio_put(bi); 3341 bio_put(bi);
3354 3342
3355 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3343 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3360,17 +3348,16 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3360 rdev_dec_pending(rdev, conf->mddev); 3348 rdev_dec_pending(rdev, conf->mddev);
3361 3349
3362 if (!error && uptodate) { 3350 if (!error && uptodate) {
3363 bio_endio(raid_bi, bytes, 0); 3351 bio_endio(raid_bi, 0);
3364 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3352 if (atomic_dec_and_test(&conf->active_aligned_reads))
3365 wake_up(&conf->wait_for_stripe); 3353 wake_up(&conf->wait_for_stripe);
3366 return 0; 3354 return;
3367 } 3355 }
3368 3356
3369 3357
3370 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3358 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3371 3359
3372 add_bio_to_retry(raid_bi, conf); 3360 add_bio_to_retry(raid_bi, conf);
3373 return 0;
3374} 3361}
3375 3362
3376static int bio_fits_rdev(struct bio *bi) 3363static int bio_fits_rdev(struct bio *bi)
@@ -3476,7 +3463,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3476 int remaining; 3463 int remaining;
3477 3464
3478 if (unlikely(bio_barrier(bi))) { 3465 if (unlikely(bio_barrier(bi))) {
3479 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 3466 bio_endio(bi, -EOPNOTSUPP);
3480 return 0; 3467 return 0;
3481 } 3468 }
3482 3469
@@ -3592,12 +3579,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3592 remaining = --bi->bi_phys_segments; 3579 remaining = --bi->bi_phys_segments;
3593 spin_unlock_irq(&conf->device_lock); 3580 spin_unlock_irq(&conf->device_lock);
3594 if (remaining == 0) { 3581 if (remaining == 0) {
3595 int bytes = bi->bi_size;
3596 3582
3597 if ( rw == WRITE ) 3583 if ( rw == WRITE )
3598 md_write_end(mddev); 3584 md_write_end(mddev);
3599 bi->bi_size = 0; 3585
3600 bi->bi_end_io(bi, bytes, 3586 bi->bi_end_io(bi,
3601 test_bit(BIO_UPTODATE, &bi->bi_flags) 3587 test_bit(BIO_UPTODATE, &bi->bi_flags)
3602 ? 0 : -EIO); 3588 ? 0 : -EIO);
3603 } 3589 }
@@ -3875,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3875 remaining = --raid_bio->bi_phys_segments; 3861 remaining = --raid_bio->bi_phys_segments;
3876 spin_unlock_irq(&conf->device_lock); 3862 spin_unlock_irq(&conf->device_lock);
3877 if (remaining == 0) { 3863 if (remaining == 0) {
3878 int bytes = raid_bio->bi_size;
3879 3864
3880 raid_bio->bi_size = 0; 3865 raid_bio->bi_end_io(raid_bio,
3881 raid_bio->bi_end_io(raid_bio, bytes,
3882 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3866 test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
3883 ? 0 : -EIO); 3867 ? 0 : -EIO);
3884 } 3868 }