aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/faulty.c5
-rw-r--r--drivers/md/linear.c7
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c17
9 files changed, 56 insertions, 35 deletions
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index b0536cfd8e17..06a64d5d8c6c 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
170 conf->nfaults = n+1; 170 conf->nfaults = n+1;
171} 171}
172 172
173static void faulty_make_request(struct mddev *mddev, struct bio *bio) 173static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
174{ 174{
175 struct faulty_conf *conf = mddev->private; 175 struct faulty_conf *conf = mddev->private;
176 int failit = 0; 176 int failit = 0;
@@ -182,7 +182,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio)
182 * just fail immediately 182 * just fail immediately
183 */ 183 */
184 bio_io_error(bio); 184 bio_io_error(bio);
185 return; 185 return true;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_iter.bi_sector, 188 if (check_sector(conf, bio->bi_iter.bi_sector,
@@ -224,6 +224,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio)
224 bio->bi_bdev = conf->rdev->bdev; 224 bio->bi_bdev = conf->rdev->bdev;
225 225
226 generic_make_request(bio); 226 generic_make_request(bio);
227 return true;
227} 228}
228 229
229static void faulty_status(struct seq_file *seq, struct mddev *mddev) 230static void faulty_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index df6f2c98eca7..5f1eb9189542 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -245,7 +245,7 @@ static void linear_free(struct mddev *mddev, void *priv)
245 kfree(conf); 245 kfree(conf);
246} 246}
247 247
248static void linear_make_request(struct mddev *mddev, struct bio *bio) 248static bool linear_make_request(struct mddev *mddev, struct bio *bio)
249{ 249{
250 char b[BDEVNAME_SIZE]; 250 char b[BDEVNAME_SIZE];
251 struct dev_info *tmp_dev; 251 struct dev_info *tmp_dev;
@@ -254,7 +254,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
254 254
255 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 255 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
256 md_flush_request(mddev, bio); 256 md_flush_request(mddev, bio);
257 return; 257 return true;
258 } 258 }
259 259
260 tmp_dev = which_dev(mddev, bio_sector); 260 tmp_dev = which_dev(mddev, bio_sector);
@@ -292,7 +292,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
292 mddev_check_write_zeroes(mddev, bio); 292 mddev_check_write_zeroes(mddev, bio);
293 generic_make_request(bio); 293 generic_make_request(bio);
294 } 294 }
295 return; 295 return true;
296 296
297out_of_bounds: 297out_of_bounds:
298 pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n", 298 pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
@@ -302,6 +302,7 @@ out_of_bounds:
302 (unsigned long long)tmp_dev->rdev->sectors, 302 (unsigned long long)tmp_dev->rdev->sectors,
303 (unsigned long long)start_sector); 303 (unsigned long long)start_sector);
304 bio_io_error(bio); 304 bio_io_error(bio);
305 return true;
305} 306}
306 307
307static void linear_status (struct seq_file *seq, struct mddev *mddev) 308static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 87edc342ccb3..d7847014821a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -277,7 +277,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
277 bio_endio(bio); 277 bio_endio(bio);
278 return BLK_QC_T_NONE; 278 return BLK_QC_T_NONE;
279 } 279 }
280 smp_rmb(); /* Ensure implications of 'active' are visible */ 280check_suspended:
281 rcu_read_lock(); 281 rcu_read_lock();
282 if (mddev->suspended) { 282 if (mddev->suspended) {
283 DEFINE_WAIT(__wait); 283 DEFINE_WAIT(__wait);
@@ -302,7 +302,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
302 sectors = bio_sectors(bio); 302 sectors = bio_sectors(bio);
303 /* bio could be mergeable after passing to underlayer */ 303 /* bio could be mergeable after passing to underlayer */
304 bio->bi_opf &= ~REQ_NOMERGE; 304 bio->bi_opf &= ~REQ_NOMERGE;
305 mddev->pers->make_request(mddev, bio); 305 if (!mddev->pers->make_request(mddev, bio)) {
306 atomic_dec(&mddev->active_io);
307 wake_up(&mddev->sb_wait);
308 goto check_suspended;
309 }
306 310
307 cpu = part_stat_lock(); 311 cpu = part_stat_lock();
308 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 312 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -327,6 +331,7 @@ void mddev_suspend(struct mddev *mddev)
327 if (mddev->suspended++) 331 if (mddev->suspended++)
328 return; 332 return;
329 synchronize_rcu(); 333 synchronize_rcu();
334 wake_up(&mddev->sb_wait);
330 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 335 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
331 mddev->pers->quiesce(mddev, 1); 336 mddev->pers->quiesce(mddev, 1);
332 337
@@ -7950,12 +7955,14 @@ EXPORT_SYMBOL(md_done_sync);
7950 * If we need to update some array metadata (e.g. 'active' flag 7955 * If we need to update some array metadata (e.g. 'active' flag
7951 * in superblock) before writing, schedule a superblock update 7956 * in superblock) before writing, schedule a superblock update
7952 * and wait for it to complete. 7957 * and wait for it to complete.
7958 * A return value of 'false' means that the write wasn't recorded
7959 * and cannot proceed as the array is being suspend.
7953 */ 7960 */
7954void md_write_start(struct mddev *mddev, struct bio *bi) 7961bool md_write_start(struct mddev *mddev, struct bio *bi)
7955{ 7962{
7956 int did_change = 0; 7963 int did_change = 0;
7957 if (bio_data_dir(bi) != WRITE) 7964 if (bio_data_dir(bi) != WRITE)
7958 return; 7965 return true;
7959 7966
7960 BUG_ON(mddev->ro == 1); 7967 BUG_ON(mddev->ro == 1);
7961 if (mddev->ro == 2) { 7968 if (mddev->ro == 2) {
@@ -7987,7 +7994,12 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
7987 if (did_change) 7994 if (did_change)
7988 sysfs_notify_dirent_safe(mddev->sysfs_state); 7995 sysfs_notify_dirent_safe(mddev->sysfs_state);
7989 wait_event(mddev->sb_wait, 7996 wait_event(mddev->sb_wait,
7990 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7997 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
7998 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
7999 percpu_ref_put(&mddev->writes_pending);
8000 return false;
8001 }
8002 return true;
7991} 8003}
7992EXPORT_SYMBOL(md_write_start); 8004EXPORT_SYMBOL(md_write_start);
7993 8005
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0fa1de42c42b..63d342d560b8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -510,7 +510,7 @@ struct md_personality
510 int level; 510 int level;
511 struct list_head list; 511 struct list_head list;
512 struct module *owner; 512 struct module *owner;
513 void (*make_request)(struct mddev *mddev, struct bio *bio); 513 bool (*make_request)(struct mddev *mddev, struct bio *bio);
514 int (*run)(struct mddev *mddev); 514 int (*run)(struct mddev *mddev);
515 void (*free)(struct mddev *mddev, void *priv); 515 void (*free)(struct mddev *mddev, void *priv);
516 void (*status)(struct seq_file *seq, struct mddev *mddev); 516 void (*status)(struct seq_file *seq, struct mddev *mddev);
@@ -649,7 +649,7 @@ extern void md_wakeup_thread(struct md_thread *thread);
649extern void md_check_recovery(struct mddev *mddev); 649extern void md_check_recovery(struct mddev *mddev);
650extern void md_reap_sync_thread(struct mddev *mddev); 650extern void md_reap_sync_thread(struct mddev *mddev);
651extern int mddev_init_writes_pending(struct mddev *mddev); 651extern int mddev_init_writes_pending(struct mddev *mddev);
652extern void md_write_start(struct mddev *mddev, struct bio *bi); 652extern bool md_write_start(struct mddev *mddev, struct bio *bi);
653extern void md_write_inc(struct mddev *mddev, struct bio *bi); 653extern void md_write_inc(struct mddev *mddev, struct bio *bi);
654extern void md_write_end(struct mddev *mddev); 654extern void md_write_end(struct mddev *mddev);
655extern void md_done_sync(struct mddev *mddev, int blocks, int ok); 655extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index e95d521d93e9..c8d985ba008d 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static void multipath_make_request(struct mddev *mddev, struct bio * bio) 109static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
110{ 110{
111 struct mpconf *conf = mddev->private; 111 struct mpconf *conf = mddev->private;
112 struct multipath_bh * mp_bh; 112 struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
114 114
115 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 115 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
116 md_flush_request(mddev, bio); 116 md_flush_request(mddev, bio);
117 return; 117 return true;
118 } 118 }
119 119
120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 120 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
126 if (mp_bh->path < 0) { 126 if (mp_bh->path < 0) {
127 bio_io_error(bio); 127 bio_io_error(bio);
128 mempool_free(mp_bh, conf->pool); 128 mempool_free(mp_bh, conf->pool);
129 return; 129 return true;
130 } 130 }
131 multipath = conf->multipaths + mp_bh->path; 131 multipath = conf->multipaths + mp_bh->path;
132 132
@@ -141,7 +141,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
141 mddev_check_writesame(mddev, &mp_bh->bio); 141 mddev_check_writesame(mddev, &mp_bh->bio);
142 mddev_check_write_zeroes(mddev, &mp_bh->bio); 142 mddev_check_write_zeroes(mddev, &mp_bh->bio);
143 generic_make_request(&mp_bh->bio); 143 generic_make_request(&mp_bh->bio);
144 return; 144 return true;
145} 145}
146 146
147static void multipath_status(struct seq_file *seq, struct mddev *mddev) 147static void multipath_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d6c0bc76e837..94d9ae9b0fd0 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -548,7 +548,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
548 bio_endio(bio); 548 bio_endio(bio);
549} 549}
550 550
551static void raid0_make_request(struct mddev *mddev, struct bio *bio) 551static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
552{ 552{
553 struct strip_zone *zone; 553 struct strip_zone *zone;
554 struct md_rdev *tmp_dev; 554 struct md_rdev *tmp_dev;
@@ -559,12 +559,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
559 559
560 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 560 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
561 md_flush_request(mddev, bio); 561 md_flush_request(mddev, bio);
562 return; 562 return true;
563 } 563 }
564 564
565 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { 565 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
566 raid0_handle_discard(mddev, bio); 566 raid0_handle_discard(mddev, bio);
567 return; 567 return true;
568 } 568 }
569 569
570 bio_sector = bio->bi_iter.bi_sector; 570 bio_sector = bio->bi_iter.bi_sector;
@@ -599,6 +599,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
599 mddev_check_writesame(mddev, bio); 599 mddev_check_writesame(mddev, bio);
600 mddev_check_write_zeroes(mddev, bio); 600 mddev_check_write_zeroes(mddev, bio);
601 generic_make_request(bio); 601 generic_make_request(bio);
602 return true;
602} 603}
603 604
604static void raid0_status(struct seq_file *seq, struct mddev *mddev) 605static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e1a7e3d4c5e4..c71739b87ab7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1321,7 +1321,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1321 * Continue immediately if no resync is active currently. 1321 * Continue immediately if no resync is active currently.
1322 */ 1322 */
1323 1323
1324 md_write_start(mddev, bio); /* wait on superblock update early */
1325 1324
1326 if ((bio_end_sector(bio) > mddev->suspend_lo && 1325 if ((bio_end_sector(bio) > mddev->suspend_lo &&
1327 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1326 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
@@ -1550,13 +1549,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1550 wake_up(&conf->wait_barrier); 1549 wake_up(&conf->wait_barrier);
1551} 1550}
1552 1551
1553static void raid1_make_request(struct mddev *mddev, struct bio *bio) 1552static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1554{ 1553{
1555 sector_t sectors; 1554 sector_t sectors;
1556 1555
1557 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1556 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1558 md_flush_request(mddev, bio); 1557 md_flush_request(mddev, bio);
1559 return; 1558 return true;
1560 } 1559 }
1561 1560
1562 /* 1561 /*
@@ -1571,8 +1570,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1571 1570
1572 if (bio_data_dir(bio) == READ) 1571 if (bio_data_dir(bio) == READ)
1573 raid1_read_request(mddev, bio, sectors, NULL); 1572 raid1_read_request(mddev, bio, sectors, NULL);
1574 else 1573 else {
1574 if (!md_write_start(mddev,bio))
1575 return false;
1575 raid1_write_request(mddev, bio, sectors); 1576 raid1_write_request(mddev, bio, sectors);
1577 }
1578 return true;
1576} 1579}
1577 1580
1578static void raid1_status(struct seq_file *seq, struct mddev *mddev) 1581static void raid1_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 797ed60abd5e..52acffa7a06a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1303,8 +1303,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1303 sector_t sectors; 1303 sector_t sectors;
1304 int max_sectors; 1304 int max_sectors;
1305 1305
1306 md_write_start(mddev, bio);
1307
1308 /* 1306 /*
1309 * Register the new request and wait if the reconstruction 1307 * Register the new request and wait if the reconstruction
1310 * thread has put up a bar for new requests. 1308 * thread has put up a bar for new requests.
@@ -1525,7 +1523,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1525 raid10_write_request(mddev, bio, r10_bio); 1523 raid10_write_request(mddev, bio, r10_bio);
1526} 1524}
1527 1525
1528static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1526static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1529{ 1527{
1530 struct r10conf *conf = mddev->private; 1528 struct r10conf *conf = mddev->private;
1531 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1529 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1534,9 +1532,12 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1534 1532
1535 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1533 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1536 md_flush_request(mddev, bio); 1534 md_flush_request(mddev, bio);
1537 return; 1535 return true;
1538 } 1536 }
1539 1537
1538 if (!md_write_start(mddev, bio))
1539 return false;
1540
1540 /* 1541 /*
1541 * If this request crosses a chunk boundary, we need to split 1542 * If this request crosses a chunk boundary, we need to split
1542 * it. 1543 * it.
@@ -1553,6 +1554,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1553 1554
1554 /* In case raid10d snuck in to freeze_array */ 1555 /* In case raid10d snuck in to freeze_array */
1555 wake_up(&conf->wait_barrier); 1556 wake_up(&conf->wait_barrier);
1557 return true;
1556} 1558}
1557 1559
1558static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1560static void raid10_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ec0f951ae19f..b218a42fd702 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5479,7 +5479,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5479 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 5479 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
5480 5480
5481 bi->bi_next = NULL; 5481 bi->bi_next = NULL;
5482 md_write_start(mddev, bi);
5483 5482
5484 stripe_sectors = conf->chunk_sectors * 5483 stripe_sectors = conf->chunk_sectors *
5485 (conf->raid_disks - conf->max_degraded); 5484 (conf->raid_disks - conf->max_degraded);
@@ -5549,11 +5548,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5549 release_stripe_plug(mddev, sh); 5548 release_stripe_plug(mddev, sh);
5550 } 5549 }
5551 5550
5552 md_write_end(mddev);
5553 bio_endio(bi); 5551 bio_endio(bi);
5554} 5552}
5555 5553
5556static void raid5_make_request(struct mddev *mddev, struct bio * bi) 5554static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
5557{ 5555{
5558 struct r5conf *conf = mddev->private; 5556 struct r5conf *conf = mddev->private;
5559 int dd_idx; 5557 int dd_idx;
@@ -5569,10 +5567,10 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5569 int ret = r5l_handle_flush_request(conf->log, bi); 5567 int ret = r5l_handle_flush_request(conf->log, bi);
5570 5568
5571 if (ret == 0) 5569 if (ret == 0)
5572 return; 5570 return true;
5573 if (ret == -ENODEV) { 5571 if (ret == -ENODEV) {
5574 md_flush_request(mddev, bi); 5572 md_flush_request(mddev, bi);
5575 return; 5573 return true;
5576 } 5574 }
5577 /* ret == -EAGAIN, fallback */ 5575 /* ret == -EAGAIN, fallback */
5578 /* 5576 /*
@@ -5582,6 +5580,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5582 do_flush = bi->bi_opf & REQ_PREFLUSH; 5580 do_flush = bi->bi_opf & REQ_PREFLUSH;
5583 } 5581 }
5584 5582
5583 if (!md_write_start(mddev, bi))
5584 return false;
5585 /* 5585 /*
5586 * If array is degraded, better not do chunk aligned read because 5586 * If array is degraded, better not do chunk aligned read because
5587 * later we might have to read it again in order to reconstruct 5587 * later we might have to read it again in order to reconstruct
@@ -5591,18 +5591,18 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5591 mddev->reshape_position == MaxSector) { 5591 mddev->reshape_position == MaxSector) {
5592 bi = chunk_aligned_read(mddev, bi); 5592 bi = chunk_aligned_read(mddev, bi);
5593 if (!bi) 5593 if (!bi)
5594 return; 5594 return true;
5595 } 5595 }
5596 5596
5597 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { 5597 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
5598 make_discard_request(mddev, bi); 5598 make_discard_request(mddev, bi);
5599 return; 5599 md_write_end(mddev);
5600 return true;
5600 } 5601 }
5601 5602
5602 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5603 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5603 last_sector = bio_end_sector(bi); 5604 last_sector = bio_end_sector(bi);
5604 bi->bi_next = NULL; 5605 bi->bi_next = NULL;
5605 md_write_start(mddev, bi);
5606 5606
5607 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5607 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
5608 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5608 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
@@ -5740,6 +5740,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5740 if (rw == WRITE) 5740 if (rw == WRITE)
5741 md_write_end(mddev); 5741 md_write_end(mddev);
5742 bio_endio(bi); 5742 bio_endio(bi);
5743 return true;
5743} 5744}
5744 5745
5745static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5746static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);