aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2008-05-23 16:04:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-24 12:56:10 -0400
commitdfc7064500061677720fa26352963c772d3ebe6b (patch)
treea8ca495bccf98837c6762ffba54a8009c9772259 /drivers/md/md.c
parent90b08710e41a07d4ff0fb8940dcce3a552991a56 (diff)
md: restart recovery cleanly after device failure.
When we get any IO error during a recovery (rebuilding a spare), we abort the recovery and restart it. For RAID6 (and multi-drive RAID1) it may not be best to restart at the beginning: when multiple failures can be tolerated, the recovery may be able to continue and re-doing all that has already been done doesn't make sense. We already have the infrastructure to record where a recovery is up to and restart from there, but it is not being used properly. This is because: - We sometimes abort with MD_RECOVERY_ERR rather than just MD_RECOVERY_INTR, which causes the recovery not be be checkpointed. - We remove spares and then re-added them which loses important state information. The distinction between MD_RECOVERY_ERR and MD_RECOVERY_INTR really isn't needed. If there is an error, the relevant drive will be marked as Faulty, and that is enough to ensure correct handling of the error. So we first remove MD_RECOVERY_ERR, changing some of the uses of it to MD_RECOVERY_INTR. Then we cause the attempt to remove a non-faulty device from an array to fail (unless recovery is impossible as the array is too degraded). Then when remove_and_add_spares attempts to remove the devices on which recovery can continue, it will fail, they will remain in place, and recovery will continue on them as desired. Issue: If we are halfway through rebuilding a spare and another drive fails, and a new spare is immediately available, do we want to: 1/ complete the current rebuild, then go back and rebuild the new spare or 2/ restart the rebuild from the start and rebuild both devices in parallel. Both options can be argued for. The code currently takes option 2 as a/ this requires least code change b/ this results in a minimally-degraded array in minimal time. Cc: "Eivind Sarto" <ivan@kasenna.com> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 295be1a68806..51c19f86ff99 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5434,7 +5434,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
5434 atomic_sub(blocks, &mddev->recovery_active); 5434 atomic_sub(blocks, &mddev->recovery_active);
5435 wake_up(&mddev->recovery_wait); 5435 wake_up(&mddev->recovery_wait);
5436 if (!ok) { 5436 if (!ok) {
5437 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 5437 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5438 md_wakeup_thread(mddev->thread); 5438 md_wakeup_thread(mddev->thread);
5439 // stop recovery, signal do_sync .... 5439 // stop recovery, signal do_sync ....
5440 } 5440 }
@@ -5690,7 +5690,7 @@ void md_do_sync(mddev_t *mddev)
5690 sectors = mddev->pers->sync_request(mddev, j, &skipped, 5690 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5691 currspeed < speed_min(mddev)); 5691 currspeed < speed_min(mddev));
5692 if (sectors == 0) { 5692 if (sectors == 0) {
5693 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 5693 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5694 goto out; 5694 goto out;
5695 } 5695 }
5696 5696
@@ -5713,8 +5713,7 @@ void md_do_sync(mddev_t *mddev)
5713 5713
5714 last_check = io_sectors; 5714 last_check = io_sectors;
5715 5715
5716 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 5716 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5717 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5718 break; 5717 break;
5719 5718
5720 repeat: 5719 repeat:
@@ -5768,8 +5767,7 @@ void md_do_sync(mddev_t *mddev)
5768 /* tell personality that we are finished */ 5767 /* tell personality that we are finished */
5769 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 5768 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5770 5769
5771 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5770 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5772 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5773 mddev->curr_resync > 2) { 5771 mddev->curr_resync > 2) {
5774 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5772 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5775 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5773 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5838,7 +5836,10 @@ static int remove_and_add_spares(mddev_t *mddev)
5838 } 5836 }
5839 5837
5840 if (mddev->degraded) { 5838 if (mddev->degraded) {
5841 rdev_for_each(rdev, rtmp, mddev) 5839 rdev_for_each(rdev, rtmp, mddev) {
5840 if (rdev->raid_disk >= 0 &&
5841 !test_bit(In_sync, &rdev->flags))
5842 spares++;
5842 if (rdev->raid_disk < 0 5843 if (rdev->raid_disk < 0
5843 && !test_bit(Faulty, &rdev->flags)) { 5844 && !test_bit(Faulty, &rdev->flags)) {
5844 rdev->recovery_offset = 0; 5845 rdev->recovery_offset = 0;
@@ -5856,6 +5857,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5856 } else 5857 } else
5857 break; 5858 break;
5858 } 5859 }
5860 }
5859 } 5861 }
5860 return spares; 5862 return spares;
5861} 5863}
@@ -5869,7 +5871,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5869 * to do that as needed. 5871 * to do that as needed.
5870 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 5872 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5871 * "->recovery" and create a thread at ->sync_thread. 5873 * "->recovery" and create a thread at ->sync_thread.
5872 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 5874 * When the thread finishes it sets MD_RECOVERY_DONE
5873 * and wakeups up this thread which will reap the thread and finish up. 5875 * and wakeups up this thread which will reap the thread and finish up.
5874 * This thread also removes any faulty devices (with nr_pending == 0). 5876 * This thread also removes any faulty devices (with nr_pending == 0).
5875 * 5877 *
@@ -5944,8 +5946,7 @@ void md_check_recovery(mddev_t *mddev)
5944 /* resync has finished, collect result */ 5946 /* resync has finished, collect result */
5945 md_unregister_thread(mddev->sync_thread); 5947 md_unregister_thread(mddev->sync_thread);
5946 mddev->sync_thread = NULL; 5948 mddev->sync_thread = NULL;
5947 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5949 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5948 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5949 /* success...*/ 5950 /* success...*/
5950 /* activate any spares */ 5951 /* activate any spares */
5951 mddev->pers->spare_active(mddev); 5952 mddev->pers->spare_active(mddev);
@@ -5969,7 +5970,6 @@ void md_check_recovery(mddev_t *mddev)
5969 * might be left set 5970 * might be left set
5970 */ 5971 */
5971 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5972 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5972 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5973 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 5973 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5974 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 5974 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5975 5975