aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 20:06:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-04 20:06:58 -0400
commitb4fdcb02f1e39c27058a885905bd0277370ba441 (patch)
treefd4cfd1994f21f44afe5e7904681fb5ac09f81b8 /drivers/md
parent044595d4e448305fbaec472eb7d22636d24e7d8c (diff)
parent6dd9ad7df2019b1e33a372a501907db293ebcd0d (diff)
Merge branch 'for-3.2/core' of git://git.kernel.dk/linux-block
* 'for-3.2/core' of git://git.kernel.dk/linux-block: (29 commits) block: don't call blk_drain_queue() if elevator is not up blk-throttle: use queue_is_locked() instead of lockdep_is_held() blk-throttle: Take blkcg->lock while traversing blkcg->policy_list blk-throttle: Free up policy node associated with deleted rule block: warn if tag is greater than real_max_depth. block: make gendisk hold a reference to its queue blk-flush: move the queue kick into blk-flush: fix invalid BUG_ON in blk_insert_flush block: Remove the control of complete cpu from bio. block: fix a typo in the blk-cgroup.h file block: initialize the bounce pool if high memory may be added later block: fix request_queue lifetime handling by making blk_queue_cleanup() properly shutdown block: drop @tsk from attempt_plug_merge() and explain sync rules block: make get_request[_wait]() fail if queue is dead block: reorganize throtl_get_tg() and blk_throtl_bio() block: reorganize queue draining block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg() block: pass around REQ_* flags instead of broken down booleans during request alloc/free block: move blk_throtl prototypes to block/blk.h block: fix genhd refcounting in blkio_policy_parse_and_set() ... Fix up trivial conflicts due to "mddev_t" -> "struct mddev" conversion and making the request functions be of type "void" instead of "int" in - drivers/md/{faulty.c,linear.c,md.c,md.h,multipath.c,raid0.c,raid1.c,raid10.c,raid5.c} - drivers/staging/zram/zram_drv.c
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/faulty.c14
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c22
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c8
10 files changed, 53 insertions, 83 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6b6616a41baa..4720f68f817e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -192,9 +192,6 @@ struct mapped_device {
192 /* forced geometry settings */ 192 /* forced geometry settings */
193 struct hd_geometry geometry; 193 struct hd_geometry geometry;
194 194
195 /* For saving the address of __make_request for request based dm */
196 make_request_fn *saved_make_request_fn;
197
198 /* sysfs handle */ 195 /* sysfs handle */
199 struct kobject kobj; 196 struct kobject kobj;
200 197
@@ -1403,7 +1400,7 @@ out:
1403 * The request function that just remaps the bio built up by 1400 * The request function that just remaps the bio built up by
1404 * dm_merge_bvec. 1401 * dm_merge_bvec.
1405 */ 1402 */
1406static int _dm_request(struct request_queue *q, struct bio *bio) 1403static void _dm_request(struct request_queue *q, struct bio *bio)
1407{ 1404{
1408 int rw = bio_data_dir(bio); 1405 int rw = bio_data_dir(bio);
1409 struct mapped_device *md = q->queuedata; 1406 struct mapped_device *md = q->queuedata;
@@ -1424,19 +1421,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1424 queue_io(md, bio); 1421 queue_io(md, bio);
1425 else 1422 else
1426 bio_io_error(bio); 1423 bio_io_error(bio);
1427 return 0; 1424 return;
1428 } 1425 }
1429 1426
1430 __split_and_process_bio(md, bio); 1427 __split_and_process_bio(md, bio);
1431 up_read(&md->io_lock); 1428 up_read(&md->io_lock);
1432 return 0; 1429 return;
1433}
1434
1435static int dm_make_request(struct request_queue *q, struct bio *bio)
1436{
1437 struct mapped_device *md = q->queuedata;
1438
1439 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1440} 1430}
1441 1431
1442static int dm_request_based(struct mapped_device *md) 1432static int dm_request_based(struct mapped_device *md)
@@ -1444,14 +1434,14 @@ static int dm_request_based(struct mapped_device *md)
1444 return blk_queue_stackable(md->queue); 1434 return blk_queue_stackable(md->queue);
1445} 1435}
1446 1436
1447static int dm_request(struct request_queue *q, struct bio *bio) 1437static void dm_request(struct request_queue *q, struct bio *bio)
1448{ 1438{
1449 struct mapped_device *md = q->queuedata; 1439 struct mapped_device *md = q->queuedata;
1450 1440
1451 if (dm_request_based(md)) 1441 if (dm_request_based(md))
1452 return dm_make_request(q, bio); 1442 blk_queue_bio(q, bio);
1453 1443 else
1454 return _dm_request(q, bio); 1444 _dm_request(q, bio);
1455} 1445}
1456 1446
1457void dm_dispatch_request(struct request *rq) 1447void dm_dispatch_request(struct request *rq)
@@ -2191,7 +2181,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2191 return 0; 2181 return 0;
2192 2182
2193 md->queue = q; 2183 md->queue = q;
2194 md->saved_make_request_fn = md->queue->make_request_fn;
2195 dm_init_md_queue(md); 2184 dm_init_md_queue(md);
2196 blk_queue_softirq_done(md->queue, dm_softirq_done); 2185 blk_queue_softirq_done(md->queue, dm_softirq_done);
2197 blk_queue_prep_rq(md->queue, dm_prep_fn); 2186 blk_queue_prep_rq(md->queue, dm_prep_fn);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 60816b132c2e..918fb8ac6607 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,7 +169,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
169 conf->nfaults = n+1; 169 conf->nfaults = n+1;
170} 170}
171 171
172static int make_request(struct mddev *mddev, struct bio *bio) 172static void make_request(struct mddev *mddev, struct bio *bio)
173{ 173{
174 struct faulty_conf *conf = mddev->private; 174 struct faulty_conf *conf = mddev->private;
175 int failit = 0; 175 int failit = 0;
@@ -181,7 +181,7 @@ static int make_request(struct mddev *mddev, struct bio *bio)
181 * just fail immediately 181 * just fail immediately
182 */ 182 */
183 bio_endio(bio, -EIO); 183 bio_endio(bio, -EIO);
184 return 0; 184 return;
185 } 185 }
186 186
187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), 187 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@ static int make_request(struct mddev *mddev, struct bio *bio)
211 } 211 }
212 if (failit) { 212 if (failit) {
213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); 213 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
214
214 b->bi_bdev = conf->rdev->bdev; 215 b->bi_bdev = conf->rdev->bdev;
215 b->bi_private = bio; 216 b->bi_private = bio;
216 b->bi_end_io = faulty_fail; 217 b->bi_end_io = faulty_fail;
217 generic_make_request(b); 218 bio = b;
218 return 0; 219 } else
219 } else {
220 bio->bi_bdev = conf->rdev->bdev; 220 bio->bi_bdev = conf->rdev->bdev;
221 return 1; 221
222 } 222 generic_make_request(bio);
223} 223}
224 224
225static void status(struct seq_file *seq, struct mddev *mddev) 225static void status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10c5844460cb..a82035867519 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -264,14 +264,14 @@ static int linear_stop (struct mddev *mddev)
264 return 0; 264 return 0;
265} 265}
266 266
267static int linear_make_request (struct mddev *mddev, struct bio *bio) 267static void linear_make_request(struct mddev *mddev, struct bio *bio)
268{ 268{
269 struct dev_info *tmp_dev; 269 struct dev_info *tmp_dev;
270 sector_t start_sector; 270 sector_t start_sector;
271 271
272 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 272 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
273 md_flush_request(mddev, bio); 273 md_flush_request(mddev, bio);
274 return 0; 274 return;
275 } 275 }
276 276
277 rcu_read_lock(); 277 rcu_read_lock();
@@ -293,7 +293,7 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio)
293 (unsigned long long)start_sector); 293 (unsigned long long)start_sector);
294 rcu_read_unlock(); 294 rcu_read_unlock();
295 bio_io_error(bio); 295 bio_io_error(bio);
296 return 0; 296 return;
297 } 297 }
298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 298 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
299 tmp_dev->end_sector)) { 299 tmp_dev->end_sector)) {
@@ -307,20 +307,17 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio)
307 307
308 bp = bio_split(bio, end_sector - bio->bi_sector); 308 bp = bio_split(bio, end_sector - bio->bi_sector);
309 309
310 if (linear_make_request(mddev, &bp->bio1)) 310 linear_make_request(mddev, &bp->bio1);
311 generic_make_request(&bp->bio1); 311 linear_make_request(mddev, &bp->bio2);
312 if (linear_make_request(mddev, &bp->bio2))
313 generic_make_request(&bp->bio2);
314 bio_pair_release(bp); 312 bio_pair_release(bp);
315 return 0; 313 return;
316 } 314 }
317 315
318 bio->bi_bdev = tmp_dev->rdev->bdev; 316 bio->bi_bdev = tmp_dev->rdev->bdev;
319 bio->bi_sector = bio->bi_sector - start_sector 317 bio->bi_sector = bio->bi_sector - start_sector
320 + tmp_dev->rdev->data_offset; 318 + tmp_dev->rdev->data_offset;
321 rcu_read_unlock(); 319 rcu_read_unlock();
322 320 generic_make_request(bio);
323 return 1;
324} 321}
325 322
326static void linear_status (struct seq_file *seq, struct mddev *mddev) 323static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 266e82ebaf11..2acb32827fde 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -332,18 +332,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
332 * call has finished, the bio has been linked into some internal structure 332 * call has finished, the bio has been linked into some internal structure
333 * and so is visible to ->quiesce(), so we don't need the refcount any more. 333 * and so is visible to ->quiesce(), so we don't need the refcount any more.
334 */ 334 */
335static int md_make_request(struct request_queue *q, struct bio *bio) 335static void md_make_request(struct request_queue *q, struct bio *bio)
336{ 336{
337 const int rw = bio_data_dir(bio); 337 const int rw = bio_data_dir(bio);
338 struct mddev *mddev = q->queuedata; 338 struct mddev *mddev = q->queuedata;
339 int rv;
340 int cpu; 339 int cpu;
341 unsigned int sectors; 340 unsigned int sectors;
342 341
343 if (mddev == NULL || mddev->pers == NULL 342 if (mddev == NULL || mddev->pers == NULL
344 || !mddev->ready) { 343 || !mddev->ready) {
345 bio_io_error(bio); 344 bio_io_error(bio);
346 return 0; 345 return;
347 } 346 }
348 smp_rmb(); /* Ensure implications of 'active' are visible */ 347 smp_rmb(); /* Ensure implications of 'active' are visible */
349 rcu_read_lock(); 348 rcu_read_lock();
@@ -368,7 +367,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
368 * go away inside make_request 367 * go away inside make_request
369 */ 368 */
370 sectors = bio_sectors(bio); 369 sectors = bio_sectors(bio);
371 rv = mddev->pers->make_request(mddev, bio); 370 mddev->pers->make_request(mddev, bio);
372 371
373 cpu = part_stat_lock(); 372 cpu = part_stat_lock();
374 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 373 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -377,8 +376,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
377 376
378 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 377 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
379 wake_up(&mddev->sb_wait); 378 wake_up(&mddev->sb_wait);
380
381 return rv;
382} 379}
383 380
384/* mddev_suspend makes sure no new requests are submitted 381/* mddev_suspend makes sure no new requests are submitted
@@ -477,8 +474,7 @@ static void md_submit_flush_data(struct work_struct *ws)
477 bio_endio(bio, 0); 474 bio_endio(bio, 0);
478 else { 475 else {
479 bio->bi_rw &= ~REQ_FLUSH; 476 bio->bi_rw &= ~REQ_FLUSH;
480 if (mddev->pers->make_request(mddev, bio)) 477 mddev->pers->make_request(mddev, bio);
481 generic_make_request(bio);
482 } 478 }
483 479
484 mddev->flush_bio = NULL; 480 mddev->flush_bio = NULL;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 51c1d91557e0..cf742d9306ec 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -419,7 +419,7 @@ struct md_personality
419 int level; 419 int level;
420 struct list_head list; 420 struct list_head list;
421 struct module *owner; 421 struct module *owner;
422 int (*make_request)(struct mddev *mddev, struct bio *bio); 422 void (*make_request)(struct mddev *mddev, struct bio *bio);
423 int (*run)(struct mddev *mddev); 423 int (*run)(struct mddev *mddev);
424 int (*stop)(struct mddev *mddev); 424 int (*stop)(struct mddev *mddev);
425 void (*status)(struct seq_file *seq, struct mddev *mddev); 425 void (*status)(struct seq_file *seq, struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d32c785e17d4..ad20a28fbf2a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static int multipath_make_request(struct mddev *mddev, struct bio * bio) 109static void multipath_make_request(struct mddev *mddev, struct bio * bio)
110{ 110{
111 struct mpconf *conf = mddev->private; 111 struct mpconf *conf = mddev->private;
112 struct multipath_bh * mp_bh; 112 struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
114 114
115 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 115 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
116 md_flush_request(mddev, bio); 116 md_flush_request(mddev, bio);
117 return 0; 117 return;
118 } 118 }
119 119
120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
126 if (mp_bh->path < 0) { 126 if (mp_bh->path < 0) {
127 bio_endio(bio, -EIO); 127 bio_endio(bio, -EIO);
128 mempool_free(mp_bh, conf->pool); 128 mempool_free(mp_bh, conf->pool);
129 return 0; 129 return;
130 } 130 }
131 multipath = conf->multipaths + mp_bh->path; 131 multipath = conf->multipaths + mp_bh->path;
132 132
@@ -137,7 +137,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio)
137 mp_bh->bio.bi_end_io = multipath_end_request; 137 mp_bh->bio.bi_end_io = multipath_end_request;
138 mp_bh->bio.bi_private = mp_bh; 138 mp_bh->bio.bi_private = mp_bh;
139 generic_make_request(&mp_bh->bio); 139 generic_make_request(&mp_bh->bio);
140 return 0; 140 return;
141} 141}
142 142
143static void multipath_status (struct seq_file *seq, struct mddev *mddev) 143static void multipath_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0eb08a4df759..27e19e2b51d4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -468,7 +468,7 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
468 } 468 }
469} 469}
470 470
471static int raid0_make_request(struct mddev *mddev, struct bio *bio) 471static void raid0_make_request(struct mddev *mddev, struct bio *bio)
472{ 472{
473 unsigned int chunk_sects; 473 unsigned int chunk_sects;
474 sector_t sector_offset; 474 sector_t sector_offset;
@@ -477,7 +477,7 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
477 477
478 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 478 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
479 md_flush_request(mddev, bio); 479 md_flush_request(mddev, bio);
480 return 0; 480 return;
481 } 481 }
482 482
483 chunk_sects = mddev->chunk_sectors; 483 chunk_sects = mddev->chunk_sectors;
@@ -497,13 +497,10 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
497 else 497 else
498 bp = bio_split(bio, chunk_sects - 498 bp = bio_split(bio, chunk_sects -
499 sector_div(sector, chunk_sects)); 499 sector_div(sector, chunk_sects));
500 if (raid0_make_request(mddev, &bp->bio1)) 500 raid0_make_request(mddev, &bp->bio1);
501 generic_make_request(&bp->bio1); 501 raid0_make_request(mddev, &bp->bio2);
502 if (raid0_make_request(mddev, &bp->bio2))
503 generic_make_request(&bp->bio2);
504
505 bio_pair_release(bp); 502 bio_pair_release(bp);
506 return 0; 503 return;
507 } 504 }
508 505
509 sector_offset = bio->bi_sector; 506 sector_offset = bio->bi_sector;
@@ -513,10 +510,9 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio)
513 bio->bi_bdev = tmp_dev->bdev; 510 bio->bi_bdev = tmp_dev->bdev;
514 bio->bi_sector = sector_offset + zone->dev_start + 511 bio->bi_sector = sector_offset + zone->dev_start +
515 tmp_dev->data_offset; 512 tmp_dev->data_offset;
516 /* 513
517 * Let the main block layer submit the IO and resolve recursion: 514 generic_make_request(bio);
518 */ 515 return;
519 return 1;
520 516
521bad_map: 517bad_map:
522 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 518 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -525,7 +521,7 @@ bad_map:
525 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 521 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
526 522
527 bio_io_error(bio); 523 bio_io_error(bio);
528 return 0; 524 return;
529} 525}
530 526
531static void raid0_status(struct seq_file *seq, struct mddev *mddev) 527static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4602fc57c961..cae874646d9e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -807,7 +807,7 @@ do_sync_io:
807 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 807 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
808} 808}
809 809
810static int make_request(struct mddev *mddev, struct bio * bio) 810static void make_request(struct mddev *mddev, struct bio * bio)
811{ 811{
812 struct r1conf *conf = mddev->private; 812 struct r1conf *conf = mddev->private;
813 struct mirror_info *mirror; 813 struct mirror_info *mirror;
@@ -892,7 +892,7 @@ read_again:
892 if (rdisk < 0) { 892 if (rdisk < 0) {
893 /* couldn't find anywhere to read from */ 893 /* couldn't find anywhere to read from */
894 raid_end_bio_io(r1_bio); 894 raid_end_bio_io(r1_bio);
895 return 0; 895 return;
896 } 896 }
897 mirror = conf->mirrors + rdisk; 897 mirror = conf->mirrors + rdisk;
898 898
@@ -950,7 +950,7 @@ read_again:
950 goto read_again; 950 goto read_again;
951 } else 951 } else
952 generic_make_request(read_bio); 952 generic_make_request(read_bio);
953 return 0; 953 return;
954 } 954 }
955 955
956 /* 956 /*
@@ -1151,8 +1151,6 @@ read_again:
1151 1151
1152 if (do_sync || !bitmap || !plugged) 1152 if (do_sync || !bitmap || !plugged)
1153 md_wakeup_thread(mddev->thread); 1153 md_wakeup_thread(mddev->thread);
1154
1155 return 0;
1156} 1154}
1157 1155
1158static void status(struct seq_file *seq, struct mddev *mddev) 1156static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2193,7 +2191,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2193 bio->bi_next = NULL; 2191 bio->bi_next = NULL;
2194 bio->bi_flags &= ~(BIO_POOL_MASK-1); 2192 bio->bi_flags &= ~(BIO_POOL_MASK-1);
2195 bio->bi_flags |= 1 << BIO_UPTODATE; 2193 bio->bi_flags |= 1 << BIO_UPTODATE;
2196 bio->bi_comp_cpu = -1;
2197 bio->bi_rw = READ; 2194 bio->bi_rw = READ;
2198 bio->bi_vcnt = 0; 2195 bio->bi_vcnt = 0;
2199 bio->bi_idx = 0; 2196 bio->bi_idx = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c025a8276dc1..dde6dd4b47ec 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -842,7 +842,7 @@ static void unfreeze_array(struct r10conf *conf)
842 spin_unlock_irq(&conf->resync_lock); 842 spin_unlock_irq(&conf->resync_lock);
843} 843}
844 844
845static int make_request(struct mddev *mddev, struct bio * bio) 845static void make_request(struct mddev *mddev, struct bio * bio)
846{ 846{
847 struct r10conf *conf = mddev->private; 847 struct r10conf *conf = mddev->private;
848 struct mirror_info *mirror; 848 struct mirror_info *mirror;
@@ -861,7 +861,7 @@ static int make_request(struct mddev *mddev, struct bio * bio)
861 861
862 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 862 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
863 md_flush_request(mddev, bio); 863 md_flush_request(mddev, bio);
864 return 0; 864 return;
865 } 865 }
866 866
867 /* If this request crosses a chunk boundary, we need to 867 /* If this request crosses a chunk boundary, we need to
@@ -893,10 +893,8 @@ static int make_request(struct mddev *mddev, struct bio * bio)
893 conf->nr_waiting++; 893 conf->nr_waiting++;
894 spin_unlock_irq(&conf->resync_lock); 894 spin_unlock_irq(&conf->resync_lock);
895 895
896 if (make_request(mddev, &bp->bio1)) 896 make_request(mddev, &bp->bio1);
897 generic_make_request(&bp->bio1); 897 make_request(mddev, &bp->bio2);
898 if (make_request(mddev, &bp->bio2))
899 generic_make_request(&bp->bio2);
900 898
901 spin_lock_irq(&conf->resync_lock); 899 spin_lock_irq(&conf->resync_lock);
902 conf->nr_waiting--; 900 conf->nr_waiting--;
@@ -904,14 +902,14 @@ static int make_request(struct mddev *mddev, struct bio * bio)
904 spin_unlock_irq(&conf->resync_lock); 902 spin_unlock_irq(&conf->resync_lock);
905 903
906 bio_pair_release(bp); 904 bio_pair_release(bp);
907 return 0; 905 return;
908 bad_map: 906 bad_map:
909 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 907 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
910 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 908 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
911 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 909 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
912 910
913 bio_io_error(bio); 911 bio_io_error(bio);
914 return 0; 912 return;
915 } 913 }
916 914
917 md_write_start(mddev, bio); 915 md_write_start(mddev, bio);
@@ -954,7 +952,7 @@ read_again:
954 slot = r10_bio->read_slot; 952 slot = r10_bio->read_slot;
955 if (disk < 0) { 953 if (disk < 0) {
956 raid_end_bio_io(r10_bio); 954 raid_end_bio_io(r10_bio);
957 return 0; 955 return;
958 } 956 }
959 mirror = conf->mirrors + disk; 957 mirror = conf->mirrors + disk;
960 958
@@ -1002,7 +1000,7 @@ read_again:
1002 goto read_again; 1000 goto read_again;
1003 } else 1001 } else
1004 generic_make_request(read_bio); 1002 generic_make_request(read_bio);
1005 return 0; 1003 return;
1006 } 1004 }
1007 1005
1008 /* 1006 /*
@@ -1176,7 +1174,6 @@ retry_write:
1176 1174
1177 if (do_sync || !mddev->bitmap || !plugged) 1175 if (do_sync || !mddev->bitmap || !plugged)
1178 md_wakeup_thread(mddev->thread); 1176 md_wakeup_thread(mddev->thread);
1179 return 0;
1180} 1177}
1181 1178
1182static void status(struct seq_file *seq, struct mddev *mddev) 1179static void status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f6fe053a5bed..bb1b46143fb6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3688,7 +3688,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
3688 return sh; 3688 return sh;
3689} 3689}
3690 3690
3691static int make_request(struct mddev *mddev, struct bio * bi) 3691static void make_request(struct mddev *mddev, struct bio * bi)
3692{ 3692{
3693 struct r5conf *conf = mddev->private; 3693 struct r5conf *conf = mddev->private;
3694 int dd_idx; 3694 int dd_idx;
@@ -3701,7 +3701,7 @@ static int make_request(struct mddev *mddev, struct bio * bi)
3701 3701
3702 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3702 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3703 md_flush_request(mddev, bi); 3703 md_flush_request(mddev, bi);
3704 return 0; 3704 return;
3705 } 3705 }
3706 3706
3707 md_write_start(mddev, bi); 3707 md_write_start(mddev, bi);
@@ -3709,7 +3709,7 @@ static int make_request(struct mddev *mddev, struct bio * bi)
3709 if (rw == READ && 3709 if (rw == READ &&
3710 mddev->reshape_position == MaxSector && 3710 mddev->reshape_position == MaxSector &&
3711 chunk_aligned_read(mddev,bi)) 3711 chunk_aligned_read(mddev,bi))
3712 return 0; 3712 return;
3713 3713
3714 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3714 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3715 last_sector = bi->bi_sector + (bi->bi_size>>9); 3715 last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3844,8 +3844,6 @@ static int make_request(struct mddev *mddev, struct bio * bi)
3844 3844
3845 bio_endio(bi, 0); 3845 bio_endio(bi, 0);
3846 } 3846 }
3847
3848 return 0;
3849} 3847}
3850 3848
3851static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 3849static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);