diff options
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 48 |
1 files changed, 36 insertions, 12 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e8c8157b02fc..2d6f1a51359c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3159,7 +3159,8 @@ static void raid5_activate_delayed(raid5_conf_t *conf) | |||
3159 | atomic_inc(&conf->preread_active_stripes); | 3159 | atomic_inc(&conf->preread_active_stripes); |
3160 | list_add_tail(&sh->lru, &conf->handle_list); | 3160 | list_add_tail(&sh->lru, &conf->handle_list); |
3161 | } | 3161 | } |
3162 | } | 3162 | } else |
3163 | blk_plug_device(conf->mddev->queue); | ||
3163 | } | 3164 | } |
3164 | 3165 | ||
3165 | static void activate_bit_delay(raid5_conf_t *conf) | 3166 | static void activate_bit_delay(raid5_conf_t *conf) |
@@ -3549,7 +3550,8 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3549 | goto retry; | 3550 | goto retry; |
3550 | } | 3551 | } |
3551 | finish_wait(&conf->wait_for_overlap, &w); | 3552 | finish_wait(&conf->wait_for_overlap, &w); |
3552 | handle_stripe(sh, NULL); | 3553 | set_bit(STRIPE_HANDLE, &sh->state); |
3554 | clear_bit(STRIPE_DELAYED, &sh->state); | ||
3553 | release_stripe(sh); | 3555 | release_stripe(sh); |
3554 | } else { | 3556 | } else { |
3555 | /* cannot get stripe for read-ahead, just give-up */ | 3557 | /* cannot get stripe for read-ahead, just give-up */ |
@@ -3698,6 +3700,25 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3698 | release_stripe(sh); | 3700 | release_stripe(sh); |
3699 | first_sector += STRIPE_SECTORS; | 3701 | first_sector += STRIPE_SECTORS; |
3700 | } | 3702 | } |
3703 | /* If this takes us to the resync_max point where we have to pause, | ||
3704 | * then we need to write out the superblock. | ||
3705 | */ | ||
3706 | sector_nr += conf->chunk_size>>9; | ||
3707 | if (sector_nr >= mddev->resync_max) { | ||
3708 | /* Cannot proceed until we've updated the superblock... */ | ||
3709 | wait_event(conf->wait_for_overlap, | ||
3710 | atomic_read(&conf->reshape_stripes) == 0); | ||
3711 | mddev->reshape_position = conf->expand_progress; | ||
3712 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
3713 | md_wakeup_thread(mddev->thread); | ||
3714 | wait_event(mddev->sb_wait, | ||
3715 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) | ||
3716 | || kthread_should_stop()); | ||
3717 | spin_lock_irq(&conf->device_lock); | ||
3718 | conf->expand_lo = mddev->reshape_position; | ||
3719 | spin_unlock_irq(&conf->device_lock); | ||
3720 | wake_up(&conf->wait_for_overlap); | ||
3721 | } | ||
3701 | return conf->chunk_size>>9; | 3722 | return conf->chunk_size>>9; |
3702 | } | 3723 | } |
3703 | 3724 | ||
@@ -3734,6 +3755,12 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3734 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | 3755 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
3735 | return reshape_request(mddev, sector_nr, skipped); | 3756 | return reshape_request(mddev, sector_nr, skipped); |
3736 | 3757 | ||
3758 | /* No need to check resync_max as we never do more than one | ||
3759 | * stripe, and as resync_max will always be on a chunk boundary, | ||
3760 | * if the check in md_do_sync didn't fire, there is no chance | ||
3761 | * of overstepping resync_max here | ||
3762 | */ | ||
3763 | |||
3737 | /* if there is too many failed drives and we are trying | 3764 | /* if there is too many failed drives and we are trying |
3738 | * to resync, then assert that we are finished, because there is | 3765 | * to resync, then assert that we are finished, because there is |
3739 | * nothing we can do. | 3766 | * nothing we can do. |
@@ -3753,6 +3780,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3753 | return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ | 3780 | return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ |
3754 | } | 3781 | } |
3755 | 3782 | ||
3783 | |||
3784 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | ||
3785 | |||
3756 | pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); | 3786 | pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); |
3757 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); | 3787 | sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); |
3758 | if (sh == NULL) { | 3788 | if (sh == NULL) { |
@@ -3864,7 +3894,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3864 | * During the scan, completed stripes are saved for us by the interrupt | 3894 | * During the scan, completed stripes are saved for us by the interrupt |
3865 | * handler, so that they will not have to wait for our next wakeup. | 3895 | * handler, so that they will not have to wait for our next wakeup. |
3866 | */ | 3896 | */ |
3867 | static void raid5d (mddev_t *mddev) | 3897 | static void raid5d(mddev_t *mddev) |
3868 | { | 3898 | { |
3869 | struct stripe_head *sh; | 3899 | struct stripe_head *sh; |
3870 | raid5_conf_t *conf = mddev_to_conf(mddev); | 3900 | raid5_conf_t *conf = mddev_to_conf(mddev); |
@@ -3889,12 +3919,6 @@ static void raid5d (mddev_t *mddev) | |||
3889 | activate_bit_delay(conf); | 3919 | activate_bit_delay(conf); |
3890 | } | 3920 | } |
3891 | 3921 | ||
3892 | if (list_empty(&conf->handle_list) && | ||
3893 | atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && | ||
3894 | !blk_queue_plugged(mddev->queue) && | ||
3895 | !list_empty(&conf->delayed_list)) | ||
3896 | raid5_activate_delayed(conf); | ||
3897 | |||
3898 | while ((bio = remove_bio_from_retry(conf))) { | 3922 | while ((bio = remove_bio_from_retry(conf))) { |
3899 | int ok; | 3923 | int ok; |
3900 | spin_unlock_irq(&conf->device_lock); | 3924 | spin_unlock_irq(&conf->device_lock); |
@@ -4108,7 +4132,7 @@ static int run(mddev_t *mddev) | |||
4108 | 4132 | ||
4109 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); | 4133 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
4110 | 4134 | ||
4111 | ITERATE_RDEV(mddev,rdev,tmp) { | 4135 | rdev_for_each(rdev, tmp, mddev) { |
4112 | raid_disk = rdev->raid_disk; | 4136 | raid_disk = rdev->raid_disk; |
4113 | if (raid_disk >= conf->raid_disks | 4137 | if (raid_disk >= conf->raid_disks |
4114 | || raid_disk < 0) | 4138 | || raid_disk < 0) |
@@ -4521,7 +4545,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4521 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 4545 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
4522 | return -EBUSY; | 4546 | return -EBUSY; |
4523 | 4547 | ||
4524 | ITERATE_RDEV(mddev, rdev, rtmp) | 4548 | rdev_for_each(rdev, rtmp, mddev) |
4525 | if (rdev->raid_disk < 0 && | 4549 | if (rdev->raid_disk < 0 && |
4526 | !test_bit(Faulty, &rdev->flags)) | 4550 | !test_bit(Faulty, &rdev->flags)) |
4527 | spares++; | 4551 | spares++; |
@@ -4543,7 +4567,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
4543 | /* Add some new drives, as many as will fit. | 4567 | /* Add some new drives, as many as will fit. |
4544 | * We know there are enough to make the newly sized array work. | 4568 | * We know there are enough to make the newly sized array work. |
4545 | */ | 4569 | */ |
4546 | ITERATE_RDEV(mddev, rdev, rtmp) | 4570 | rdev_for_each(rdev, rtmp, mddev) |
4547 | if (rdev->raid_disk < 0 && | 4571 | if (rdev->raid_disk < 0 && |
4548 | !test_bit(Faulty, &rdev->flags)) { | 4572 | !test_bit(Faulty, &rdev->flags)) { |
4549 | if (raid5_add_disk(mddev, rdev)) { | 4573 | if (raid5_add_disk(mddev, rdev)) { |