aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/md.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ad5d783ad416..b97ac9861ab2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2695,7 +2695,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
2695 if (kstrtoull(buf, 10, &new_offset) < 0) 2695 if (kstrtoull(buf, 10, &new_offset) < 0)
2696 return -EINVAL; 2696 return -EINVAL;
2697 2697
2698 if (mddev->sync_thread) 2698 if (mddev->sync_thread ||
2699 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2699 return -EBUSY; 2700 return -EBUSY;
2700 if (new_offset == rdev->data_offset) 2701 if (new_offset == rdev->data_offset)
2701 /* reset is always permitted */ 2702 /* reset is always permitted */
@@ -3272,6 +3273,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3272 */ 3273 */
3273 3274
3274 if (mddev->sync_thread || 3275 if (mddev->sync_thread ||
3276 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3275 mddev->reshape_position != MaxSector || 3277 mddev->reshape_position != MaxSector ||
3276 mddev->sysfs_active) 3278 mddev->sysfs_active)
3277 return -EBUSY; 3279 return -EBUSY;
@@ -4026,6 +4028,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4026 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4028 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4027 4029
4028 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4030 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4031 flush_workqueue(md_misc_wq);
4029 if (mddev->sync_thread) { 4032 if (mddev->sync_thread) {
4030 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4033 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4031 md_reap_sync_thread(mddev); 4034 md_reap_sync_thread(mddev);
@@ -5044,6 +5047,7 @@ static void md_clean(struct mddev *mddev)
5044static void __md_stop_writes(struct mddev *mddev) 5047static void __md_stop_writes(struct mddev *mddev)
5045{ 5048{
5046 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5049 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5050 flush_workqueue(md_misc_wq);
5047 if (mddev->sync_thread) { 5051 if (mddev->sync_thread) {
5048 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5052 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5049 md_reap_sync_thread(mddev); 5053 md_reap_sync_thread(mddev);
@@ -5104,19 +5108,22 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5104 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5108 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5105 md_wakeup_thread(mddev->thread); 5109 md_wakeup_thread(mddev->thread);
5106 } 5110 }
5107 if (mddev->sync_thread) { 5111 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5108 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5112 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5113 if (mddev->sync_thread)
5109 /* Thread might be blocked waiting for metadata update 5114 /* Thread might be blocked waiting for metadata update
5110 * which will now never happen */ 5115 * which will now never happen */
5111 wake_up_process(mddev->sync_thread->tsk); 5116 wake_up_process(mddev->sync_thread->tsk);
5112 } 5117
5113 mddev_unlock(mddev); 5118 mddev_unlock(mddev);
5114 wait_event(resync_wait, mddev->sync_thread == NULL); 5119 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5120 &mddev->recovery));
5115 mddev_lock_nointr(mddev); 5121 mddev_lock_nointr(mddev);
5116 5122
5117 mutex_lock(&mddev->open_mutex); 5123 mutex_lock(&mddev->open_mutex);
5118 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5124 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5119 mddev->sync_thread || 5125 mddev->sync_thread ||
5126 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5120 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5127 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5121 printk("md: %s still in use.\n",mdname(mddev)); 5128 printk("md: %s still in use.\n",mdname(mddev));
5122 if (did_freeze) { 5129 if (did_freeze) {
@@ -5162,20 +5169,24 @@ static int do_md_stop(struct mddev *mddev, int mode,
5162 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5169 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5163 md_wakeup_thread(mddev->thread); 5170 md_wakeup_thread(mddev->thread);
5164 } 5171 }
5165 if (mddev->sync_thread) { 5172 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5166 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5173 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5174 if (mddev->sync_thread)
5167 /* Thread might be blocked waiting for metadata update 5175 /* Thread might be blocked waiting for metadata update
5168 * which will now never happen */ 5176 * which will now never happen */
5169 wake_up_process(mddev->sync_thread->tsk); 5177 wake_up_process(mddev->sync_thread->tsk);
5170 } 5178
5171 mddev_unlock(mddev); 5179 mddev_unlock(mddev);
5172 wait_event(resync_wait, mddev->sync_thread == NULL); 5180 wait_event(resync_wait, (mddev->sync_thread == NULL &&
5181 !test_bit(MD_RECOVERY_RUNNING,
5182 &mddev->recovery)));
5173 mddev_lock_nointr(mddev); 5183 mddev_lock_nointr(mddev);
5174 5184
5175 mutex_lock(&mddev->open_mutex); 5185 mutex_lock(&mddev->open_mutex);
5176 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5186 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5177 mddev->sysfs_active || 5187 mddev->sysfs_active ||
5178 mddev->sync_thread || 5188 mddev->sync_thread ||
5189 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5179 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5190 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5180 printk("md: %s still in use.\n",mdname(mddev)); 5191 printk("md: %s still in use.\n",mdname(mddev));
5181 mutex_unlock(&mddev->open_mutex); 5192 mutex_unlock(&mddev->open_mutex);
@@ -5950,7 +5961,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
5950 * of each device. If num_sectors is zero, we find the largest size 5961 * of each device. If num_sectors is zero, we find the largest size
5951 * that fits. 5962 * that fits.
5952 */ 5963 */
5953 if (mddev->sync_thread) 5964 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5965 mddev->sync_thread)
5954 return -EBUSY; 5966 return -EBUSY;
5955 if (mddev->ro) 5967 if (mddev->ro)
5956 return -EROFS; 5968 return -EROFS;
@@ -5981,7 +5993,9 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
5981 if (raid_disks <= 0 || 5993 if (raid_disks <= 0 ||
5982 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5994 (mddev->max_disks && raid_disks >= mddev->max_disks))
5983 return -EINVAL; 5995 return -EINVAL;
5984 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5996 if (mddev->sync_thread ||
5997 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5998 mddev->reshape_position != MaxSector)
5985 return -EBUSY; 5999 return -EBUSY;
5986 6000
5987 rdev_for_each(rdev, mddev) { 6001 rdev_for_each(rdev, mddev) {
@@ -7593,6 +7607,7 @@ static void md_start_sync(struct work_struct *ws)
7593 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7607 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7594 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7608 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7595 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7609 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7610 wake_up(&resync_wait);
7596 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7611 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7597 &mddev->recovery)) 7612 &mddev->recovery))
7598 if (mddev->sysfs_action) 7613 if (mddev->sysfs_action)
@@ -7761,6 +7776,7 @@ void md_check_recovery(struct mddev *mddev)
7761 not_running: 7776 not_running:
7762 if (!mddev->sync_thread) { 7777 if (!mddev->sync_thread) {
7763 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7778 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7779 wake_up(&resync_wait);
7764 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7780 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7765 &mddev->recovery)) 7781 &mddev->recovery))
7766 if (mddev->sysfs_action) 7782 if (mddev->sysfs_action)
@@ -7779,7 +7795,6 @@ void md_reap_sync_thread(struct mddev *mddev)
7779 7795
7780 /* resync has finished, collect result */ 7796 /* resync has finished, collect result */
7781 md_unregister_thread(&mddev->sync_thread); 7797 md_unregister_thread(&mddev->sync_thread);
7782 wake_up(&resync_wait);
7783 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7798 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7784 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7799 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7785 /* success...*/ 7800 /* success...*/
@@ -7807,6 +7822,7 @@ void md_reap_sync_thread(struct mddev *mddev)
7807 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7822 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7808 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7823 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7809 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7824 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7825 wake_up(&resync_wait);
7810 /* flag recovery needed just to double check */ 7826 /* flag recovery needed just to double check */
7811 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7827 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7812 sysfs_notify_dirent_safe(mddev->sysfs_action); 7828 sysfs_notify_dirent_safe(mddev->sysfs_action);