aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-10-10 23:25:57 -0400
committerNeilBrown <neilb@suse.de>2012-10-10 23:25:57 -0400
commit72f36d5972a166197036c1281963f6863c429bf2 (patch)
tree9bf42eb456ea85126489dad2283ed0343d91172d /drivers
parente56108d65f8705170d238858616728359542aebb (diff)
md: refine reporting of resync/reshape delays.
If 'resync_max' is set to 0 (as is often done when starting a reshape, so the mdadm can remain in control during a sensitive period), and if the reshape request is initially delayed because another array using the same array is resyncing or reshaping etc, when user-space cannot easily tell when the delay changes from being due to a conflicting reshape, to being due to resync_max = 0. So introduce a new state: (curr_resync == 3) to reflect this, make sure it is visible both via /proc/mdstat and via the "sync_completed" sysfs attribute, and ensure that the event transition from one delay state to the other is properly notified. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8d93f867adbf..3c6eaab0b6ce 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4400,6 +4400,10 @@ sync_completed_show(struct mddev *mddev, char *page)
4400 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4400 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4401 return sprintf(page, "none\n"); 4401 return sprintf(page, "none\n");
4402 4402
4403 if (mddev->curr_resync == 1 ||
4404 mddev->curr_resync == 2)
4405 return sprintf(page, "delayed\n");
4406
4403 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 4407 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4404 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4408 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4405 max_sectors = mddev->resync_max_sectors; 4409 max_sectors = mddev->resync_max_sectors;
@@ -6807,7 +6811,11 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev)
6807 int scale; 6811 int scale;
6808 unsigned int per_milli; 6812 unsigned int per_milli;
6809 6813
6810 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6814 if (mddev->curr_resync <= 3)
6815 resync = 0;
6816 else
6817 resync = mddev->curr_resync
6818 - atomic_read(&mddev->recovery_active);
6811 6819
6812 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 6820 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
6813 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6821 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
@@ -7033,7 +7041,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
7033 if (mddev->curr_resync > 2) { 7041 if (mddev->curr_resync > 2) {
7034 status_resync(seq, mddev); 7042 status_resync(seq, mddev);
7035 seq_printf(seq, "\n "); 7043 seq_printf(seq, "\n ");
7036 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 7044 } else if (mddev->curr_resync >= 1)
7037 seq_printf(seq, "\tresync=DELAYED\n "); 7045 seq_printf(seq, "\tresync=DELAYED\n ");
7038 else if (mddev->recovery_cp < MaxSector) 7046 else if (mddev->recovery_cp < MaxSector)
7039 seq_printf(seq, "\tresync=PENDING\n "); 7047 seq_printf(seq, "\tresync=PENDING\n ");
@@ -7423,8 +7431,11 @@ void md_do_sync(struct md_thread *thread)
7423 "md: resuming %s of %s from checkpoint.\n", 7431 "md: resuming %s of %s from checkpoint.\n",
7424 desc, mdname(mddev)); 7432 desc, mdname(mddev));
7425 mddev->curr_resync = j; 7433 mddev->curr_resync = j;
7426 } 7434 } else
7435 mddev->curr_resync = 3; /* no longer delayed */
7427 mddev->curr_resync_completed = j; 7436 mddev->curr_resync_completed = j;
7437 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7438 md_new_event(mddev);
7428 7439
7429 blk_start_plug(&plug); 7440 blk_start_plug(&plug);
7430 while (j < max_sectors) { 7441 while (j < max_sectors) {
@@ -7477,7 +7488,8 @@ void md_do_sync(struct md_thread *thread)
7477 break; 7488 break;
7478 7489
7479 j += sectors; 7490 j += sectors;
7480 if (j>1) mddev->curr_resync = j; 7491 if (j > 2)
7492 mddev->curr_resync = j;
7481 mddev->curr_mark_cnt = io_sectors; 7493 mddev->curr_mark_cnt = io_sectors;
7482 if (last_check == 0) 7494 if (last_check == 0)
7483 /* this is the earliest that rebuild will be 7495 /* this is the earliest that rebuild will be
@@ -7599,8 +7611,6 @@ static int remove_and_add_spares(struct mddev *mddev)
7599 int spares = 0; 7611 int spares = 0;
7600 int removed = 0; 7612 int removed = 0;
7601 7613
7602 mddev->curr_resync_completed = 0;
7603
7604 rdev_for_each(rdev, mddev) 7614 rdev_for_each(rdev, mddev)
7605 if (rdev->raid_disk >= 0 && 7615 if (rdev->raid_disk >= 0 &&
7606 !test_bit(Blocked, &rdev->flags) && 7616 !test_bit(Blocked, &rdev->flags) &&
@@ -7791,6 +7801,7 @@ void md_check_recovery(struct mddev *mddev)
7791 /* Set RUNNING before clearing NEEDED to avoid 7801 /* Set RUNNING before clearing NEEDED to avoid
7792 * any transients in the value of "sync_action". 7802 * any transients in the value of "sync_action".
7793 */ 7803 */
7804 mddev->curr_resync_completed = 0;
7794 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7805 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7795 /* Clear some bits that don't mean anything, but 7806 /* Clear some bits that don't mean anything, but
7796 * might be left set 7807 * might be left set
@@ -7804,7 +7815,7 @@ void md_check_recovery(struct mddev *mddev)
7804 /* no recovery is running. 7815 /* no recovery is running.
7805 * remove any failed drives, then 7816 * remove any failed drives, then
7806 * add spares if possible. 7817 * add spares if possible.
7807 * Spare are also removed and re-added, to allow 7818 * Spares are also removed and re-added, to allow
7808 * the personality to fail the re-add. 7819 * the personality to fail the re-add.
7809 */ 7820 */
7810 7821