diff options
Diffstat (limited to 'drivers/md/raid1.c')
| -rw-r--r-- | drivers/md/raid1.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 835de7168cd3..cacd008d6864 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
| 517 | int bad_sectors; | 517 | int bad_sectors; |
| 518 | 518 | ||
| 519 | int disk = start_disk + i; | 519 | int disk = start_disk + i; |
| 520 | if (disk >= conf->raid_disks) | 520 | if (disk >= conf->raid_disks * 2) |
| 521 | disk -= conf->raid_disks; | 521 | disk -= conf->raid_disks * 2; |
| 522 | 522 | ||
| 523 | rdev = rcu_dereference(conf->mirrors[disk].rdev); | 523 | rdev = rcu_dereference(conf->mirrors[disk].rdev); |
| 524 | if (r1_bio->bios[disk] == IO_BLOCKED | 524 | if (r1_bio->bios[disk] == IO_BLOCKED |
| @@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 883 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 883 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
| 884 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 884 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
| 885 | struct md_rdev *blocked_rdev; | 885 | struct md_rdev *blocked_rdev; |
| 886 | int plugged; | ||
| 887 | int first_clone; | 886 | int first_clone; |
| 888 | int sectors_handled; | 887 | int sectors_handled; |
| 889 | int max_sectors; | 888 | int max_sectors; |
| @@ -1034,7 +1033,6 @@ read_again: | |||
| 1034 | * the bad blocks. Each set of writes gets it's own r1bio | 1033 | * the bad blocks. Each set of writes gets it's own r1bio |
| 1035 | * with a set of bios attached. | 1034 | * with a set of bios attached. |
| 1036 | */ | 1035 | */ |
| 1037 | plugged = mddev_check_plugged(mddev); | ||
| 1038 | 1036 | ||
| 1039 | disks = conf->raid_disks * 2; | 1037 | disks = conf->raid_disks * 2; |
| 1040 | retry_write: | 1038 | retry_write: |
| @@ -1191,6 +1189,8 @@ read_again: | |||
| 1191 | bio_list_add(&conf->pending_bio_list, mbio); | 1189 | bio_list_add(&conf->pending_bio_list, mbio); |
| 1192 | conf->pending_count++; | 1190 | conf->pending_count++; |
| 1193 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1191 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 1192 | if (!mddev_check_plugged(mddev)) | ||
| 1193 | md_wakeup_thread(mddev->thread); | ||
| 1194 | } | 1194 | } |
| 1195 | /* Mustn't call r1_bio_write_done before this next test, | 1195 | /* Mustn't call r1_bio_write_done before this next test, |
| 1196 | * as it could result in the bio being freed. | 1196 | * as it could result in the bio being freed. |
| @@ -1213,9 +1213,6 @@ read_again: | |||
| 1213 | 1213 | ||
| 1214 | /* In case raid1d snuck in to freeze_array */ | 1214 | /* In case raid1d snuck in to freeze_array */ |
| 1215 | wake_up(&conf->wait_barrier); | 1215 | wake_up(&conf->wait_barrier); |
| 1216 | |||
| 1217 | if (do_sync || !bitmap || !plugged) | ||
| 1218 | md_wakeup_thread(mddev->thread); | ||
| 1219 | } | 1216 | } |
| 1220 | 1217 | ||
| 1221 | static void status(struct seq_file *seq, struct mddev *mddev) | 1218 | static void status(struct seq_file *seq, struct mddev *mddev) |
| @@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) | |||
| 1821 | 1818 | ||
| 1822 | if (atomic_dec_and_test(&r1_bio->remaining)) { | 1819 | if (atomic_dec_and_test(&r1_bio->remaining)) { |
| 1823 | /* if we're here, all write(s) have completed, so clean up */ | 1820 | /* if we're here, all write(s) have completed, so clean up */ |
| 1824 | md_done_sync(mddev, r1_bio->sectors, 1); | 1821 | int s = r1_bio->sectors; |
| 1825 | put_buf(r1_bio); | 1822 | if (test_bit(R1BIO_MadeGood, &r1_bio->state) || |
| 1823 | test_bit(R1BIO_WriteError, &r1_bio->state)) | ||
| 1824 | reschedule_retry(r1_bio); | ||
| 1825 | else { | ||
| 1826 | put_buf(r1_bio); | ||
| 1827 | md_done_sync(mddev, s, 1); | ||
| 1828 | } | ||
| 1826 | } | 1829 | } |
| 1827 | } | 1830 | } |
| 1828 | 1831 | ||
| @@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
| 2488 | */ | 2491 | */ |
| 2489 | if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 2492 | if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
| 2490 | atomic_set(&r1_bio->remaining, read_targets); | 2493 | atomic_set(&r1_bio->remaining, read_targets); |
| 2491 | for (i = 0; i < conf->raid_disks * 2; i++) { | 2494 | for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { |
| 2492 | bio = r1_bio->bios[i]; | 2495 | bio = r1_bio->bios[i]; |
| 2493 | if (bio->bi_end_io == end_sync_read) { | 2496 | if (bio->bi_end_io == end_sync_read) { |
| 2497 | read_targets--; | ||
| 2494 | md_sync_acct(bio->bi_bdev, nr_sectors); | 2498 | md_sync_acct(bio->bi_bdev, nr_sectors); |
| 2495 | generic_make_request(bio); | 2499 | generic_make_request(bio); |
| 2496 | } | 2500 | } |
| @@ -2550,6 +2554,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2550 | err = -EINVAL; | 2554 | err = -EINVAL; |
| 2551 | spin_lock_init(&conf->device_lock); | 2555 | spin_lock_init(&conf->device_lock); |
| 2552 | rdev_for_each(rdev, mddev) { | 2556 | rdev_for_each(rdev, mddev) { |
| 2557 | struct request_queue *q; | ||
| 2553 | int disk_idx = rdev->raid_disk; | 2558 | int disk_idx = rdev->raid_disk; |
| 2554 | if (disk_idx >= mddev->raid_disks | 2559 | if (disk_idx >= mddev->raid_disks |
| 2555 | || disk_idx < 0) | 2560 | || disk_idx < 0) |
| @@ -2562,6 +2567,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2562 | if (disk->rdev) | 2567 | if (disk->rdev) |
| 2563 | goto abort; | 2568 | goto abort; |
| 2564 | disk->rdev = rdev; | 2569 | disk->rdev = rdev; |
| 2570 | q = bdev_get_queue(rdev->bdev); | ||
| 2571 | if (q->merge_bvec_fn) | ||
| 2572 | mddev->merge_check_needed = 1; | ||
| 2565 | 2573 | ||
| 2566 | disk->head_position = 0; | 2574 | disk->head_position = 0; |
| 2567 | } | 2575 | } |
| @@ -2617,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2617 | goto abort; | 2625 | goto abort; |
| 2618 | } | 2626 | } |
| 2619 | err = -ENOMEM; | 2627 | err = -ENOMEM; |
| 2620 | conf->thread = md_register_thread(raid1d, mddev, NULL); | 2628 | conf->thread = md_register_thread(raid1d, mddev, "raid1"); |
| 2621 | if (!conf->thread) { | 2629 | if (!conf->thread) { |
| 2622 | printk(KERN_ERR | 2630 | printk(KERN_ERR |
| 2623 | "md/raid1:%s: couldn't allocate thread\n", | 2631 | "md/raid1:%s: couldn't allocate thread\n", |
