aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a9c7981ddd24..cacd008d6864 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
517 int bad_sectors; 517 int bad_sectors;
518 518
519 int disk = start_disk + i; 519 int disk = start_disk + i;
520 if (disk >= conf->raid_disks) 520 if (disk >= conf->raid_disks * 2)
521 disk -= conf->raid_disks; 521 disk -= conf->raid_disks * 2;
522 522
523 rdev = rcu_dereference(conf->mirrors[disk].rdev); 523 rdev = rcu_dereference(conf->mirrors[disk].rdev);
524 if (r1_bio->bios[disk] == IO_BLOCKED 524 if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
883 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 883 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
884 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 884 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
885 struct md_rdev *blocked_rdev; 885 struct md_rdev *blocked_rdev;
886 int plugged;
887 int first_clone; 886 int first_clone;
888 int sectors_handled; 887 int sectors_handled;
889 int max_sectors; 888 int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
1034 * the bad blocks. Each set of writes gets it's own r1bio 1033 * the bad blocks. Each set of writes gets it's own r1bio
1035 * with a set of bios attached. 1034 * with a set of bios attached.
1036 */ 1035 */
1037 plugged = mddev_check_plugged(mddev);
1038 1036
1039 disks = conf->raid_disks * 2; 1037 disks = conf->raid_disks * 2;
1040 retry_write: 1038 retry_write:
@@ -1191,6 +1189,8 @@ read_again:
1191 bio_list_add(&conf->pending_bio_list, mbio); 1189 bio_list_add(&conf->pending_bio_list, mbio);
1192 conf->pending_count++; 1190 conf->pending_count++;
1193 spin_unlock_irqrestore(&conf->device_lock, flags); 1191 spin_unlock_irqrestore(&conf->device_lock, flags);
1192 if (!mddev_check_plugged(mddev))
1193 md_wakeup_thread(mddev->thread);
1194 } 1194 }
1195 /* Mustn't call r1_bio_write_done before this next test, 1195 /* Mustn't call r1_bio_write_done before this next test,
1196 * as it could result in the bio being freed. 1196 * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
1213 1213
1214 /* In case raid1d snuck in to freeze_array */ 1214 /* In case raid1d snuck in to freeze_array */
1215 wake_up(&conf->wait_barrier); 1215 wake_up(&conf->wait_barrier);
1216
1217 if (do_sync || !bitmap || !plugged)
1218 md_wakeup_thread(mddev->thread);
1219} 1216}
1220 1217
1221static void status(struct seq_file *seq, struct mddev *mddev) 1218static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1821 1818
1822 if (atomic_dec_and_test(&r1_bio->remaining)) { 1819 if (atomic_dec_and_test(&r1_bio->remaining)) {
1823 /* if we're here, all write(s) have completed, so clean up */ 1820 /* if we're here, all write(s) have completed, so clean up */
1824 md_done_sync(mddev, r1_bio->sectors, 1); 1821 int s = r1_bio->sectors;
1825 put_buf(r1_bio); 1822 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1823 test_bit(R1BIO_WriteError, &r1_bio->state))
1824 reschedule_retry(r1_bio);
1825 else {
1826 put_buf(r1_bio);
1827 md_done_sync(mddev, s, 1);
1828 }
1826 } 1829 }
1827} 1830}
1828 1831
@@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2488 */ 2491 */
2489 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2492 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2490 atomic_set(&r1_bio->remaining, read_targets); 2493 atomic_set(&r1_bio->remaining, read_targets);
2491 for (i = 0; i < conf->raid_disks * 2; i++) { 2494 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2492 bio = r1_bio->bios[i]; 2495 bio = r1_bio->bios[i];
2493 if (bio->bi_end_io == end_sync_read) { 2496 if (bio->bi_end_io == end_sync_read) {
2497 read_targets--;
2494 md_sync_acct(bio->bi_bdev, nr_sectors); 2498 md_sync_acct(bio->bi_bdev, nr_sectors);
2495 generic_make_request(bio); 2499 generic_make_request(bio);
2496 } 2500 }
@@ -2621,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2621 goto abort; 2625 goto abort;
2622 } 2626 }
2623 err = -ENOMEM; 2627 err = -ENOMEM;
2624 conf->thread = md_register_thread(raid1d, mddev, NULL); 2628 conf->thread = md_register_thread(raid1d, mddev, "raid1");
2625 if (!conf->thread) { 2629 if (!conf->thread) {
2626 printk(KERN_ERR 2630 printk(KERN_ERR
2627 "md/raid1:%s: couldn't allocate thread\n", 2631 "md/raid1:%s: couldn't allocate thread\n",