aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/faulty.c14
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c22
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c8
10 files changed, 53 insertions, 71 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 78b20868bcbc..7b986e77b75e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1388,7 +1388,7 @@ out:
1388 * The request function that just remaps the bio built up by 1388 * The request function that just remaps the bio built up by
1389 * dm_merge_bvec. 1389 * dm_merge_bvec.
1390 */ 1390 */
1391static int _dm_request(struct request_queue *q, struct bio *bio) 1391static void _dm_request(struct request_queue *q, struct bio *bio)
1392{ 1392{
1393 int rw = bio_data_dir(bio); 1393 int rw = bio_data_dir(bio);
1394 struct mapped_device *md = q->queuedata; 1394 struct mapped_device *md = q->queuedata;
@@ -1409,12 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1409 queue_io(md, bio); 1409 queue_io(md, bio);
1410 else 1410 else
1411 bio_io_error(bio); 1411 bio_io_error(bio);
1412 return 0; 1412 return;
1413 } 1413 }
1414 1414
1415 __split_and_process_bio(md, bio); 1415 __split_and_process_bio(md, bio);
1416 up_read(&md->io_lock); 1416 up_read(&md->io_lock);
1417 return 0; 1417 return;
1418} 1418}
1419 1419
1420static int dm_request_based(struct mapped_device *md) 1420static int dm_request_based(struct mapped_device *md)
@@ -1422,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md)
1422 return blk_queue_stackable(md->queue); 1422 return blk_queue_stackable(md->queue);
1423} 1423}
1424 1424
1425static int dm_request(struct request_queue *q, struct bio *bio) 1425static void dm_request(struct request_queue *q, struct bio *bio)
1426{ 1426{
1427 struct mapped_device *md = q->queuedata; 1427 struct mapped_device *md = q->queuedata;
1428 1428
1429 if (dm_request_based(md)) 1429 if (dm_request_based(md))
1430 return blk_queue_bio(q, bio); 1430 blk_queue_bio(q, bio);
1431 1431 else
1432 return _dm_request(q, bio); 1432 _dm_request(q, bio);
1433} 1433}
1434 1434
1435void dm_dispatch_request(struct request *rq) 1435void dm_dispatch_request(struct request *rq)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 23078dabb6df..5ef304d4341c 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
169 conf->nfaults = n+1; 169 conf->nfaults = n+1;
170} 170}
171 171
172static int make_request(mddev_t *mddev, struct bio *bio) 172static void make_request(mddev_t *mddev, struct bio *bio)
173{ 173{
174 conf_t *conf = mddev->private; 174 conf_t *conf = mddev->private;
175 int failit = 0; 175 int failit = 0;
@@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
181 * just fail immediately 181 * just fail immediately
182 */ 182 */
183 bio_endio(bio, -EIO); 183 bio_endio(bio, -EIO);
184 return 0; 184 return;
185 } 185 }
186 186
187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), 187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio)
211 } 211 }
212 if (failit) { 212 if (failit) {
213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); 213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
214
214 b->bi_bdev = conf->rdev->bdev; 215 b->bi_bdev = conf->rdev->bdev;
215 b->bi_private = bio; 216 b->bi_private = bio;
216 b->bi_end_io = faulty_fail; 217 b->bi_end_io = faulty_fail;
217 generic_make_request(b); 218 bio = b;
218 return 0; 219 } else
219 } else {
220 bio->bi_bdev = conf->rdev->bdev; 220 bio->bi_bdev = conf->rdev->bdev;
221 return 1; 221
222 } 222 generic_make_request(bio);
223} 223}
224 224
225static void status(struct seq_file *seq, mddev_t *mddev) 225static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6cd2c313e800..c6ee491d98e7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev)
264 return 0; 264 return 0;
265} 265}
266 266
267static int linear_make_request (mddev_t *mddev, struct bio *bio) 267static void linear_make_request (mddev_t *mddev, struct bio *bio)
268{ 268{
269 dev_info_t *tmp_dev; 269 dev_info_t *tmp_dev;
270 sector_t start_sector; 270 sector_t start_sector;
271 271
272 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 272 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
273 md_flush_request(mddev, bio); 273 md_flush_request(mddev, bio);
274 return 0; 274 return;
275 } 275 }
276 276
277 rcu_read_lock(); 277 rcu_read_lock();
@@ -293,7 +293,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
293 (unsigned long long)start_sector); 293 (unsigned long long)start_sector);
294 rcu_read_unlock(); 294 rcu_read_unlock();
295 bio_io_error(bio); 295 bio_io_error(bio);
296 return 0; 296 return;
297 } 297 }
298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
299 tmp_dev->end_sector)) { 299 tmp_dev->end_sector)) {
@@ -307,20 +307,17 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
307 307
308 bp = bio_split(bio, end_sector - bio->bi_sector); 308 bp = bio_split(bio, end_sector - bio->bi_sector);
309 309
310 if (linear_make_request(mddev, &bp->bio1)) 310 linear_make_request(mddev, &bp->bio1);
311 generic_make_request(&bp->bio1); 311 linear_make_request(mddev, &bp->bio2);
312 if (linear_make_request(mddev, &bp->bio2))
313 generic_make_request(&bp->bio2);
314 bio_pair_release(bp); 312 bio_pair_release(bp);
315 return 0; 313 return;
316 } 314 }
317 315
318 bio->bi_bdev = tmp_dev->rdev->bdev; 316 bio->bi_bdev = tmp_dev->rdev->bdev;
319 bio->bi_sector = bio->bi_sector - start_sector 317 bio->bi_sector = bio->bi_sector - start_sector
320 + tmp_dev->rdev->data_offset; 318 + tmp_dev->rdev->data_offset;
321 rcu_read_unlock(); 319 rcu_read_unlock();
322 320 generic_make_request(bio);
323 return 1;
324} 321}
325 322
326static void linear_status (struct seq_file *seq, mddev_t *mddev) 323static void linear_status (struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e221a20f5d9..5c2178562c96 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -330,18 +330,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
330 * call has finished, the bio has been linked into some internal structure 330 * call has finished, the bio has been linked into some internal structure
331 * and so is visible to ->quiesce(), so we don't need the refcount any more. 331 * and so is visible to ->quiesce(), so we don't need the refcount any more.
332 */ 332 */
333static int md_make_request(struct request_queue *q, struct bio *bio) 333static void md_make_request(struct request_queue *q, struct bio *bio)
334{ 334{
335 const int rw = bio_data_dir(bio); 335 const int rw = bio_data_dir(bio);
336 mddev_t *mddev = q->queuedata; 336 mddev_t *mddev = q->queuedata;
337 int rv;
338 int cpu; 337 int cpu;
339 unsigned int sectors; 338 unsigned int sectors;
340 339
341 if (mddev == NULL || mddev->pers == NULL 340 if (mddev == NULL || mddev->pers == NULL
342 || !mddev->ready) { 341 || !mddev->ready) {
343 bio_io_error(bio); 342 bio_io_error(bio);
344 return 0; 343 return;
345 } 344 }
346 smp_rmb(); /* Ensure implications of 'active' are visible */ 345 smp_rmb(); /* Ensure implications of 'active' are visible */
347 rcu_read_lock(); 346 rcu_read_lock();
@@ -366,7 +365,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
366 * go away inside make_request 365 * go away inside make_request
367 */ 366 */
368 sectors = bio_sectors(bio); 367 sectors = bio_sectors(bio);
369 rv = mddev->pers->make_request(mddev, bio); 368 mddev->pers->make_request(mddev, bio);
370 369
371 cpu = part_stat_lock(); 370 cpu = part_stat_lock();
372 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 371 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -375,8 +374,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
375 374
376 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 375 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
377 wake_up(&mddev->sb_wait); 376 wake_up(&mddev->sb_wait);
378
379 return rv;
380} 377}
381 378
382/* mddev_suspend makes sure no new requests are submitted 379/* mddev_suspend makes sure no new requests are submitted
@@ -475,8 +472,7 @@ static void md_submit_flush_data(struct work_struct *ws)
475 bio_endio(bio, 0); 472 bio_endio(bio, 0);
476 else { 473 else {
477 bio->bi_rw &= ~REQ_FLUSH; 474 bio->bi_rw &= ~REQ_FLUSH;
478 if (mddev->pers->make_request(mddev, bio)) 475 mddev->pers->make_request(mddev, bio);
479 generic_make_request(bio);
480 } 476 }
481 477
482 mddev->flush_bio = NULL; 478 mddev->flush_bio = NULL;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452e..bd47847cf7ca 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -424,7 +424,7 @@ struct mdk_personality
424 int level; 424 int level;
425 struct list_head list; 425 struct list_head list;
426 struct module *owner; 426 struct module *owner;
427 int (*make_request)(mddev_t *mddev, struct bio *bio); 427 void (*make_request)(mddev_t *mddev, struct bio *bio);
428 int (*run)(mddev_t *mddev); 428 int (*run)(mddev_t *mddev);
429 int (*stop)(mddev_t *mddev); 429 int (*stop)(mddev_t *mddev);
430 void (*status)(struct seq_file *seq, mddev_t *mddev); 430 void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af288..407cb5691425 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109static void multipath_make_request(mddev_t *mddev, struct bio * bio)
110{ 110{
111 multipath_conf_t *conf = mddev->private; 111 multipath_conf_t *conf = mddev->private;
112 struct multipath_bh * mp_bh; 112 struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
114 114
115 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 115 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
116 md_flush_request(mddev, bio); 116 md_flush_request(mddev, bio);
117 return 0; 117 return;
118 } 118 }
119 119
120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
126 if (mp_bh->path < 0) { 126 if (mp_bh->path < 0) {
127 bio_endio(bio, -EIO); 127 bio_endio(bio, -EIO);
128 mempool_free(mp_bh, conf->pool); 128 mempool_free(mp_bh, conf->pool);
129 return 0; 129 return;
130 } 130 }
131 multipath = conf->multipaths + mp_bh->path; 131 multipath = conf->multipaths + mp_bh->path;
132 132
@@ -137,7 +137,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
137 mp_bh->bio.bi_end_io = multipath_end_request; 137 mp_bh->bio.bi_end_io = multipath_end_request;
138 mp_bh->bio.bi_private = mp_bh; 138 mp_bh->bio.bi_private = mp_bh;
139 generic_make_request(&mp_bh->bio); 139 generic_make_request(&mp_bh->bio);
140 return 0; 140 return;
141} 141}
142 142
143static void multipath_status (struct seq_file *seq, mddev_t *mddev) 143static void multipath_status (struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e86bf3682e1e..4066615d61af 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -466,7 +466,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
466 } 466 }
467} 467}
468 468
469static int raid0_make_request(mddev_t *mddev, struct bio *bio) 469static void raid0_make_request(mddev_t *mddev, struct bio *bio)
470{ 470{
471 unsigned int chunk_sects; 471 unsigned int chunk_sects;
472 sector_t sector_offset; 472 sector_t sector_offset;
@@ -475,7 +475,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
475 475
476 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 476 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
477 md_flush_request(mddev, bio); 477 md_flush_request(mddev, bio);
478 return 0; 478 return;
479 } 479 }
480 480
481 chunk_sects = mddev->chunk_sectors; 481 chunk_sects = mddev->chunk_sectors;
@@ -495,13 +495,10 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
495 else 495 else
496 bp = bio_split(bio, chunk_sects - 496 bp = bio_split(bio, chunk_sects -
497 sector_div(sector, chunk_sects)); 497 sector_div(sector, chunk_sects));
498 if (raid0_make_request(mddev, &bp->bio1)) 498 raid0_make_request(mddev, &bp->bio1);
499 generic_make_request(&bp->bio1); 499 raid0_make_request(mddev, &bp->bio2);
500 if (raid0_make_request(mddev, &bp->bio2))
501 generic_make_request(&bp->bio2);
502
503 bio_pair_release(bp); 500 bio_pair_release(bp);
504 return 0; 501 return;
505 } 502 }
506 503
507 sector_offset = bio->bi_sector; 504 sector_offset = bio->bi_sector;
@@ -511,10 +508,9 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
511 bio->bi_bdev = tmp_dev->bdev; 508 bio->bi_bdev = tmp_dev->bdev;
512 bio->bi_sector = sector_offset + zone->dev_start + 509 bio->bi_sector = sector_offset + zone->dev_start +
513 tmp_dev->data_offset; 510 tmp_dev->data_offset;
514 /* 511
515 * Let the main block layer submit the IO and resolve recursion: 512 generic_make_request(bio);
516 */ 513 return;
517 return 1;
518 514
519bad_map: 515bad_map:
520 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 516 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -523,7 +519,7 @@ bad_map:
523 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 519 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
524 520
525 bio_io_error(bio); 521 bio_io_error(bio);
526 return 0; 522 return;
527} 523}
528 524
529static void raid0_status(struct seq_file *seq, mddev_t *mddev) 525static void raid0_status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..97f2a5f977b1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -785,7 +785,7 @@ do_sync_io:
785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
786} 786}
787 787
788static int make_request(mddev_t *mddev, struct bio * bio) 788static void make_request(mddev_t *mddev, struct bio * bio)
789{ 789{
790 conf_t *conf = mddev->private; 790 conf_t *conf = mddev->private;
791 mirror_info_t *mirror; 791 mirror_info_t *mirror;
@@ -870,7 +870,7 @@ read_again:
870 if (rdisk < 0) { 870 if (rdisk < 0) {
871 /* couldn't find anywhere to read from */ 871 /* couldn't find anywhere to read from */
872 raid_end_bio_io(r1_bio); 872 raid_end_bio_io(r1_bio);
873 return 0; 873 return;
874 } 874 }
875 mirror = conf->mirrors + rdisk; 875 mirror = conf->mirrors + rdisk;
876 876
@@ -928,7 +928,7 @@ read_again:
928 goto read_again; 928 goto read_again;
929 } else 929 } else
930 generic_make_request(read_bio); 930 generic_make_request(read_bio);
931 return 0; 931 return;
932 } 932 }
933 933
934 /* 934 /*
@@ -1119,8 +1119,6 @@ read_again:
1119 1119
1120 if (do_sync || !bitmap || !plugged) 1120 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1121 md_wakeup_thread(mddev->thread);
1122
1123 return 0;
1124} 1122}
1125 1123
1126static void status(struct seq_file *seq, mddev_t *mddev) 1124static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..04b625e1cb60 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -825,7 +825,7 @@ static void unfreeze_array(conf_t *conf)
825 spin_unlock_irq(&conf->resync_lock); 825 spin_unlock_irq(&conf->resync_lock);
826} 826}
827 827
828static int make_request(mddev_t *mddev, struct bio * bio) 828static void make_request(mddev_t *mddev, struct bio * bio)
829{ 829{
830 conf_t *conf = mddev->private; 830 conf_t *conf = mddev->private;
831 mirror_info_t *mirror; 831 mirror_info_t *mirror;
@@ -844,7 +844,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
844 844
845 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 845 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
846 md_flush_request(mddev, bio); 846 md_flush_request(mddev, bio);
847 return 0; 847 return;
848 } 848 }
849 849
850 /* If this request crosses a chunk boundary, we need to 850 /* If this request crosses a chunk boundary, we need to
@@ -876,10 +876,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
876 conf->nr_waiting++; 876 conf->nr_waiting++;
877 spin_unlock_irq(&conf->resync_lock); 877 spin_unlock_irq(&conf->resync_lock);
878 878
879 if (make_request(mddev, &bp->bio1)) 879 make_request(mddev, &bp->bio1);
880 generic_make_request(&bp->bio1); 880 make_request(mddev, &bp->bio2);
881 if (make_request(mddev, &bp->bio2))
882 generic_make_request(&bp->bio2);
883 881
884 spin_lock_irq(&conf->resync_lock); 882 spin_lock_irq(&conf->resync_lock);
885 conf->nr_waiting--; 883 conf->nr_waiting--;
@@ -887,14 +885,14 @@ static int make_request(mddev_t *mddev, struct bio * bio)
887 spin_unlock_irq(&conf->resync_lock); 885 spin_unlock_irq(&conf->resync_lock);
888 886
889 bio_pair_release(bp); 887 bio_pair_release(bp);
890 return 0; 888 return;
891 bad_map: 889 bad_map:
892 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 890 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
893 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 891 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
894 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 892 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
895 893
896 bio_io_error(bio); 894 bio_io_error(bio);
897 return 0; 895 return;
898 } 896 }
899 897
900 md_write_start(mddev, bio); 898 md_write_start(mddev, bio);
@@ -937,7 +935,7 @@ read_again:
937 slot = r10_bio->read_slot; 935 slot = r10_bio->read_slot;
938 if (disk < 0) { 936 if (disk < 0) {
939 raid_end_bio_io(r10_bio); 937 raid_end_bio_io(r10_bio);
940 return 0; 938 return;
941 } 939 }
942 mirror = conf->mirrors + disk; 940 mirror = conf->mirrors + disk;
943 941
@@ -985,7 +983,7 @@ read_again:
985 goto read_again; 983 goto read_again;
986 } else 984 } else
987 generic_make_request(read_bio); 985 generic_make_request(read_bio);
988 return 0; 986 return;
989 } 987 }
990 988
991 /* 989 /*
@@ -1157,7 +1155,6 @@ retry_write:
1157 1155
1158 if (do_sync || !mddev->bitmap || !plugged) 1156 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1157 md_wakeup_thread(mddev->thread);
1160 return 0;
1161} 1158}
1162 1159
1163static void status(struct seq_file *seq, mddev_t *mddev) 1160static void status(struct seq_file *seq, mddev_t *mddev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbae459fb02d..96b7f6a1b6f2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3695,7 +3695,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3695 return sh; 3695 return sh;
3696} 3696}
3697 3697
3698static int make_request(mddev_t *mddev, struct bio * bi) 3698static void make_request(mddev_t *mddev, struct bio * bi)
3699{ 3699{
3700 raid5_conf_t *conf = mddev->private; 3700 raid5_conf_t *conf = mddev->private;
3701 int dd_idx; 3701 int dd_idx;
@@ -3708,7 +3708,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3708 3708
3709 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3709 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3710 md_flush_request(mddev, bi); 3710 md_flush_request(mddev, bi);
3711 return 0; 3711 return;
3712 } 3712 }
3713 3713
3714 md_write_start(mddev, bi); 3714 md_write_start(mddev, bi);
@@ -3716,7 +3716,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3716 if (rw == READ && 3716 if (rw == READ &&
3717 mddev->reshape_position == MaxSector && 3717 mddev->reshape_position == MaxSector &&
3718 chunk_aligned_read(mddev,bi)) 3718 chunk_aligned_read(mddev,bi))
3719 return 0; 3719 return;
3720 3720
3721 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3721 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3722 last_sector = bi->bi_sector + (bi->bi_size>>9); 3722 last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3851,8 +3851,6 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3851 3851
3852 bio_endio(bi, 0); 3852 bio_endio(bi, 0);
3853 } 3853 }
3854
3855 return 0;
3856} 3854}
3857 3855
3858static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3856static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);