diff options
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 81 |
1 files changed, 62 insertions, 19 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 83eb78b00137..7cf512a34ccf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -74,6 +74,8 @@ static DEFINE_SPINLOCK(pers_lock); | |||
74 | 74 | ||
75 | static void md_print_devices(void); | 75 | static void md_print_devices(void); |
76 | 76 | ||
77 | static DECLARE_WAIT_QUEUE_HEAD(resync_wait); | ||
78 | |||
77 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } | 79 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } |
78 | 80 | ||
79 | /* | 81 | /* |
@@ -274,6 +276,7 @@ static mddev_t * mddev_find(dev_t unit) | |||
274 | atomic_set(&new->active, 1); | 276 | atomic_set(&new->active, 1); |
275 | spin_lock_init(&new->write_lock); | 277 | spin_lock_init(&new->write_lock); |
276 | init_waitqueue_head(&new->sb_wait); | 278 | init_waitqueue_head(&new->sb_wait); |
279 | init_waitqueue_head(&new->recovery_wait); | ||
277 | new->reshape_position = MaxSector; | 280 | new->reshape_position = MaxSector; |
278 | new->resync_max = MaxSector; | 281 | new->resync_max = MaxSector; |
279 | new->level = LEVEL_NONE; | 282 | new->level = LEVEL_NONE; |
@@ -3013,6 +3016,36 @@ degraded_show(mddev_t *mddev, char *page) | |||
3013 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); | 3016 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); |
3014 | 3017 | ||
3015 | static ssize_t | 3018 | static ssize_t |
3019 | sync_force_parallel_show(mddev_t *mddev, char *page) | ||
3020 | { | ||
3021 | return sprintf(page, "%d\n", mddev->parallel_resync); | ||
3022 | } | ||
3023 | |||
3024 | static ssize_t | ||
3025 | sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) | ||
3026 | { | ||
3027 | long n; | ||
3028 | |||
3029 | if (strict_strtol(buf, 10, &n)) | ||
3030 | return -EINVAL; | ||
3031 | |||
3032 | if (n != 0 && n != 1) | ||
3033 | return -EINVAL; | ||
3034 | |||
3035 | mddev->parallel_resync = n; | ||
3036 | |||
3037 | if (mddev->sync_thread) | ||
3038 | wake_up(&resync_wait); | ||
3039 | |||
3040 | return len; | ||
3041 | } | ||
3042 | |||
3043 | /* force parallel resync, even with shared block devices */ | ||
3044 | static struct md_sysfs_entry md_sync_force_parallel = | ||
3045 | __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, | ||
3046 | sync_force_parallel_show, sync_force_parallel_store); | ||
3047 | |||
3048 | static ssize_t | ||
3016 | sync_speed_show(mddev_t *mddev, char *page) | 3049 | sync_speed_show(mddev_t *mddev, char *page) |
3017 | { | 3050 | { |
3018 | unsigned long resync, dt, db; | 3051 | unsigned long resync, dt, db; |
@@ -3187,6 +3220,7 @@ static struct attribute *md_redundancy_attrs[] = { | |||
3187 | &md_sync_min.attr, | 3220 | &md_sync_min.attr, |
3188 | &md_sync_max.attr, | 3221 | &md_sync_max.attr, |
3189 | &md_sync_speed.attr, | 3222 | &md_sync_speed.attr, |
3223 | &md_sync_force_parallel.attr, | ||
3190 | &md_sync_completed.attr, | 3224 | &md_sync_completed.attr, |
3191 | &md_max_sync.attr, | 3225 | &md_max_sync.attr, |
3192 | &md_suspend_lo.attr, | 3226 | &md_suspend_lo.attr, |
@@ -3691,6 +3725,8 @@ static int do_md_stop(mddev_t * mddev, int mode) | |||
3691 | 3725 | ||
3692 | module_put(mddev->pers->owner); | 3726 | module_put(mddev->pers->owner); |
3693 | mddev->pers = NULL; | 3727 | mddev->pers = NULL; |
3728 | /* tell userspace to handle 'inactive' */ | ||
3729 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
3694 | 3730 | ||
3695 | set_capacity(disk, 0); | 3731 | set_capacity(disk, 0); |
3696 | mddev->changed = 1; | 3732 | mddev->changed = 1; |
@@ -3987,8 +4023,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg) | |||
3987 | if (!buf) | 4023 | if (!buf) |
3988 | goto out; | 4024 | goto out; |
3989 | 4025 | ||
3990 | ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); | 4026 | ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); |
3991 | if (!ptr) | 4027 | if (IS_ERR(ptr)) |
3992 | goto out; | 4028 | goto out; |
3993 | 4029 | ||
3994 | strcpy(file->pathname, ptr); | 4030 | strcpy(file->pathname, ptr); |
@@ -5399,7 +5435,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok) | |||
5399 | atomic_sub(blocks, &mddev->recovery_active); | 5435 | atomic_sub(blocks, &mddev->recovery_active); |
5400 | wake_up(&mddev->recovery_wait); | 5436 | wake_up(&mddev->recovery_wait); |
5401 | if (!ok) { | 5437 | if (!ok) { |
5402 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 5438 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5403 | md_wakeup_thread(mddev->thread); | 5439 | md_wakeup_thread(mddev->thread); |
5404 | // stop recovery, signal do_sync .... | 5440 | // stop recovery, signal do_sync .... |
5405 | } | 5441 | } |
@@ -5435,8 +5471,11 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5435 | md_wakeup_thread(mddev->thread); | 5471 | md_wakeup_thread(mddev->thread); |
5436 | } | 5472 | } |
5437 | spin_unlock_irq(&mddev->write_lock); | 5473 | spin_unlock_irq(&mddev->write_lock); |
5474 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
5438 | } | 5475 | } |
5439 | wait_event(mddev->sb_wait, mddev->flags==0); | 5476 | wait_event(mddev->sb_wait, |
5477 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
5478 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5440 | } | 5479 | } |
5441 | 5480 | ||
5442 | void md_write_end(mddev_t *mddev) | 5481 | void md_write_end(mddev_t *mddev) |
@@ -5471,13 +5510,17 @@ void md_allow_write(mddev_t *mddev) | |||
5471 | mddev->safemode = 1; | 5510 | mddev->safemode = 1; |
5472 | spin_unlock_irq(&mddev->write_lock); | 5511 | spin_unlock_irq(&mddev->write_lock); |
5473 | md_update_sb(mddev, 0); | 5512 | md_update_sb(mddev, 0); |
5513 | |||
5514 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
5515 | /* wait for the dirty state to be recorded in the metadata */ | ||
5516 | wait_event(mddev->sb_wait, | ||
5517 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
5518 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5474 | } else | 5519 | } else |
5475 | spin_unlock_irq(&mddev->write_lock); | 5520 | spin_unlock_irq(&mddev->write_lock); |
5476 | } | 5521 | } |
5477 | EXPORT_SYMBOL_GPL(md_allow_write); | 5522 | EXPORT_SYMBOL_GPL(md_allow_write); |
5478 | 5523 | ||
5479 | static DECLARE_WAIT_QUEUE_HEAD(resync_wait); | ||
5480 | |||
5481 | #define SYNC_MARKS 10 | 5524 | #define SYNC_MARKS 10 |
5482 | #define SYNC_MARK_STEP (3*HZ) | 5525 | #define SYNC_MARK_STEP (3*HZ) |
5483 | void md_do_sync(mddev_t *mddev) | 5526 | void md_do_sync(mddev_t *mddev) |
@@ -5541,8 +5584,9 @@ void md_do_sync(mddev_t *mddev) | |||
5541 | for_each_mddev(mddev2, tmp) { | 5584 | for_each_mddev(mddev2, tmp) { |
5542 | if (mddev2 == mddev) | 5585 | if (mddev2 == mddev) |
5543 | continue; | 5586 | continue; |
5544 | if (mddev2->curr_resync && | 5587 | if (!mddev->parallel_resync |
5545 | match_mddev_units(mddev,mddev2)) { | 5588 | && mddev2->curr_resync |
5589 | && match_mddev_units(mddev, mddev2)) { | ||
5546 | DEFINE_WAIT(wq); | 5590 | DEFINE_WAIT(wq); |
5547 | if (mddev < mddev2 && mddev->curr_resync == 2) { | 5591 | if (mddev < mddev2 && mddev->curr_resync == 2) { |
5548 | /* arbitrarily yield */ | 5592 | /* arbitrarily yield */ |
@@ -5622,7 +5666,6 @@ void md_do_sync(mddev_t *mddev) | |||
5622 | window/2,(unsigned long long) max_sectors/2); | 5666 | window/2,(unsigned long long) max_sectors/2); |
5623 | 5667 | ||
5624 | atomic_set(&mddev->recovery_active, 0); | 5668 | atomic_set(&mddev->recovery_active, 0); |
5625 | init_waitqueue_head(&mddev->recovery_wait); | ||
5626 | last_check = 0; | 5669 | last_check = 0; |
5627 | 5670 | ||
5628 | if (j>2) { | 5671 | if (j>2) { |
@@ -5647,7 +5690,7 @@ void md_do_sync(mddev_t *mddev) | |||
5647 | sectors = mddev->pers->sync_request(mddev, j, &skipped, | 5690 | sectors = mddev->pers->sync_request(mddev, j, &skipped, |
5648 | currspeed < speed_min(mddev)); | 5691 | currspeed < speed_min(mddev)); |
5649 | if (sectors == 0) { | 5692 | if (sectors == 0) { |
5650 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 5693 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5651 | goto out; | 5694 | goto out; |
5652 | } | 5695 | } |
5653 | 5696 | ||
@@ -5670,8 +5713,7 @@ void md_do_sync(mddev_t *mddev) | |||
5670 | 5713 | ||
5671 | last_check = io_sectors; | 5714 | last_check = io_sectors; |
5672 | 5715 | ||
5673 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || | 5716 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
5674 | test_bit(MD_RECOVERY_ERR, &mddev->recovery)) | ||
5675 | break; | 5717 | break; |
5676 | 5718 | ||
5677 | repeat: | 5719 | repeat: |
@@ -5725,8 +5767,7 @@ void md_do_sync(mddev_t *mddev) | |||
5725 | /* tell personality that we are finished */ | 5767 | /* tell personality that we are finished */ |
5726 | mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); | 5768 | mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); |
5727 | 5769 | ||
5728 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && | 5770 | if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && |
5729 | !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && | ||
5730 | mddev->curr_resync > 2) { | 5771 | mddev->curr_resync > 2) { |
5731 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 5772 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
5732 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 5773 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
@@ -5795,7 +5836,10 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5795 | } | 5836 | } |
5796 | 5837 | ||
5797 | if (mddev->degraded) { | 5838 | if (mddev->degraded) { |
5798 | rdev_for_each(rdev, rtmp, mddev) | 5839 | rdev_for_each(rdev, rtmp, mddev) { |
5840 | if (rdev->raid_disk >= 0 && | ||
5841 | !test_bit(In_sync, &rdev->flags)) | ||
5842 | spares++; | ||
5799 | if (rdev->raid_disk < 0 | 5843 | if (rdev->raid_disk < 0 |
5800 | && !test_bit(Faulty, &rdev->flags)) { | 5844 | && !test_bit(Faulty, &rdev->flags)) { |
5801 | rdev->recovery_offset = 0; | 5845 | rdev->recovery_offset = 0; |
@@ -5813,6 +5857,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5813 | } else | 5857 | } else |
5814 | break; | 5858 | break; |
5815 | } | 5859 | } |
5860 | } | ||
5816 | } | 5861 | } |
5817 | return spares; | 5862 | return spares; |
5818 | } | 5863 | } |
@@ -5826,7 +5871,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5826 | * to do that as needed. | 5871 | * to do that as needed. |
5827 | * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in | 5872 | * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in |
5828 | * "->recovery" and create a thread at ->sync_thread. | 5873 | * "->recovery" and create a thread at ->sync_thread. |
5829 | * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) | 5874 | * When the thread finishes it sets MD_RECOVERY_DONE |
5830 | * and wakeups up this thread which will reap the thread and finish up. | 5875 | * and wakeups up this thread which will reap the thread and finish up. |
5831 | * This thread also removes any faulty devices (with nr_pending == 0). | 5876 | * This thread also removes any faulty devices (with nr_pending == 0). |
5832 | * | 5877 | * |
@@ -5901,8 +5946,7 @@ void md_check_recovery(mddev_t *mddev) | |||
5901 | /* resync has finished, collect result */ | 5946 | /* resync has finished, collect result */ |
5902 | md_unregister_thread(mddev->sync_thread); | 5947 | md_unregister_thread(mddev->sync_thread); |
5903 | mddev->sync_thread = NULL; | 5948 | mddev->sync_thread = NULL; |
5904 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && | 5949 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
5905 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | ||
5906 | /* success...*/ | 5950 | /* success...*/ |
5907 | /* activate any spares */ | 5951 | /* activate any spares */ |
5908 | mddev->pers->spare_active(mddev); | 5952 | mddev->pers->spare_active(mddev); |
@@ -5926,7 +5970,6 @@ void md_check_recovery(mddev_t *mddev) | |||
5926 | * might be left set | 5970 | * might be left set |
5927 | */ | 5971 | */ |
5928 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5972 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5929 | clear_bit(MD_RECOVERY_ERR, &mddev->recovery); | ||
5930 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); | 5973 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5931 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | 5974 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); |
5932 | 5975 | ||