aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorJonathan Corbet <corbet@lwn.net>2008-07-14 17:29:34 -0400
committerJonathan Corbet <corbet@lwn.net>2008-07-14 17:29:34 -0400
commit2fceef397f9880b212a74c418290ce69e7ac00eb (patch)
treed9cc09ab992825ef7fede4a688103503e3caf655 /drivers/md/md.c
parentfeae1ef116ed381625d3731c5ae4f4ebcb3fa302 (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
Merge commit 'v2.6.26' into bkl-removal
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c87
1 files changed, 66 insertions, 21 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 83eb78b00137..2580ac1b9b0f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -74,6 +74,8 @@ static DEFINE_SPINLOCK(pers_lock);
74 74
75static void md_print_devices(void); 75static void md_print_devices(void);
76 76
77static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
78
77#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 79#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
78 80
79/* 81/*
@@ -274,6 +276,7 @@ static mddev_t * mddev_find(dev_t unit)
274 atomic_set(&new->active, 1); 276 atomic_set(&new->active, 1);
275 spin_lock_init(&new->write_lock); 277 spin_lock_init(&new->write_lock);
276 init_waitqueue_head(&new->sb_wait); 278 init_waitqueue_head(&new->sb_wait);
279 init_waitqueue_head(&new->recovery_wait);
277 new->reshape_position = MaxSector; 280 new->reshape_position = MaxSector;
278 new->resync_max = MaxSector; 281 new->resync_max = MaxSector;
279 new->level = LEVEL_NONE; 282 new->level = LEVEL_NONE;
@@ -3013,6 +3016,36 @@ degraded_show(mddev_t *mddev, char *page)
3013static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3016static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3014 3017
3015static ssize_t 3018static ssize_t
3019sync_force_parallel_show(mddev_t *mddev, char *page)
3020{
3021 return sprintf(page, "%d\n", mddev->parallel_resync);
3022}
3023
3024static ssize_t
3025sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3026{
3027 long n;
3028
3029 if (strict_strtol(buf, 10, &n))
3030 return -EINVAL;
3031
3032 if (n != 0 && n != 1)
3033 return -EINVAL;
3034
3035 mddev->parallel_resync = n;
3036
3037 if (mddev->sync_thread)
3038 wake_up(&resync_wait);
3039
3040 return len;
3041}
3042
3043/* force parallel resync, even with shared block devices */
3044static struct md_sysfs_entry md_sync_force_parallel =
3045__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3046 sync_force_parallel_show, sync_force_parallel_store);
3047
3048static ssize_t
3016sync_speed_show(mddev_t *mddev, char *page) 3049sync_speed_show(mddev_t *mddev, char *page)
3017{ 3050{
3018 unsigned long resync, dt, db; 3051 unsigned long resync, dt, db;
@@ -3187,6 +3220,7 @@ static struct attribute *md_redundancy_attrs[] = {
3187 &md_sync_min.attr, 3220 &md_sync_min.attr,
3188 &md_sync_max.attr, 3221 &md_sync_max.attr,
3189 &md_sync_speed.attr, 3222 &md_sync_speed.attr,
3223 &md_sync_force_parallel.attr,
3190 &md_sync_completed.attr, 3224 &md_sync_completed.attr,
3191 &md_max_sync.attr, 3225 &md_max_sync.attr,
3192 &md_suspend_lo.attr, 3226 &md_suspend_lo.attr,
@@ -3691,6 +3725,8 @@ static int do_md_stop(mddev_t * mddev, int mode)
3691 3725
3692 module_put(mddev->pers->owner); 3726 module_put(mddev->pers->owner);
3693 mddev->pers = NULL; 3727 mddev->pers = NULL;
3728 /* tell userspace to handle 'inactive' */
3729 sysfs_notify(&mddev->kobj, NULL, "array_state");
3694 3730
3695 set_capacity(disk, 0); 3731 set_capacity(disk, 0);
3696 mddev->changed = 1; 3732 mddev->changed = 1;
@@ -3861,8 +3897,10 @@ static void autorun_devices(int part)
3861 3897
3862 md_probe(dev, NULL, NULL); 3898 md_probe(dev, NULL, NULL);
3863 mddev = mddev_find(dev); 3899 mddev = mddev_find(dev);
3864 if (!mddev) { 3900 if (!mddev || !mddev->gendisk) {
3865 printk(KERN_ERR 3901 if (mddev)
3902 mddev_put(mddev);
3903 printk(KERN_ERR
3866 "md: cannot allocate memory for md drive.\n"); 3904 "md: cannot allocate memory for md drive.\n");
3867 break; 3905 break;
3868 } 3906 }
@@ -3987,8 +4025,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3987 if (!buf) 4025 if (!buf)
3988 goto out; 4026 goto out;
3989 4027
3990 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 4028 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
3991 if (!ptr) 4029 if (IS_ERR(ptr))
3992 goto out; 4030 goto out;
3993 4031
3994 strcpy(file->pathname, ptr); 4032 strcpy(file->pathname, ptr);
@@ -5399,7 +5437,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
5399 atomic_sub(blocks, &mddev->recovery_active); 5437 atomic_sub(blocks, &mddev->recovery_active);
5400 wake_up(&mddev->recovery_wait); 5438 wake_up(&mddev->recovery_wait);
5401 if (!ok) { 5439 if (!ok) {
5402 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 5440 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5403 md_wakeup_thread(mddev->thread); 5441 md_wakeup_thread(mddev->thread);
5404 // stop recovery, signal do_sync .... 5442 // stop recovery, signal do_sync ....
5405 } 5443 }
@@ -5435,8 +5473,11 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
5435 md_wakeup_thread(mddev->thread); 5473 md_wakeup_thread(mddev->thread);
5436 } 5474 }
5437 spin_unlock_irq(&mddev->write_lock); 5475 spin_unlock_irq(&mddev->write_lock);
5476 sysfs_notify(&mddev->kobj, NULL, "array_state");
5438 } 5477 }
5439 wait_event(mddev->sb_wait, mddev->flags==0); 5478 wait_event(mddev->sb_wait,
5479 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5480 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5440} 5481}
5441 5482
5442void md_write_end(mddev_t *mddev) 5483void md_write_end(mddev_t *mddev)
@@ -5471,13 +5512,17 @@ void md_allow_write(mddev_t *mddev)
5471 mddev->safemode = 1; 5512 mddev->safemode = 1;
5472 spin_unlock_irq(&mddev->write_lock); 5513 spin_unlock_irq(&mddev->write_lock);
5473 md_update_sb(mddev, 0); 5514 md_update_sb(mddev, 0);
5515
5516 sysfs_notify(&mddev->kobj, NULL, "array_state");
5517 /* wait for the dirty state to be recorded in the metadata */
5518 wait_event(mddev->sb_wait,
5519 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5520 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5474 } else 5521 } else
5475 spin_unlock_irq(&mddev->write_lock); 5522 spin_unlock_irq(&mddev->write_lock);
5476} 5523}
5477EXPORT_SYMBOL_GPL(md_allow_write); 5524EXPORT_SYMBOL_GPL(md_allow_write);
5478 5525
5479static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5480
5481#define SYNC_MARKS 10 5526#define SYNC_MARKS 10
5482#define SYNC_MARK_STEP (3*HZ) 5527#define SYNC_MARK_STEP (3*HZ)
5483void md_do_sync(mddev_t *mddev) 5528void md_do_sync(mddev_t *mddev)
@@ -5541,8 +5586,9 @@ void md_do_sync(mddev_t *mddev)
5541 for_each_mddev(mddev2, tmp) { 5586 for_each_mddev(mddev2, tmp) {
5542 if (mddev2 == mddev) 5587 if (mddev2 == mddev)
5543 continue; 5588 continue;
5544 if (mddev2->curr_resync && 5589 if (!mddev->parallel_resync
5545 match_mddev_units(mddev,mddev2)) { 5590 && mddev2->curr_resync
5591 && match_mddev_units(mddev, mddev2)) {
5546 DEFINE_WAIT(wq); 5592 DEFINE_WAIT(wq);
5547 if (mddev < mddev2 && mddev->curr_resync == 2) { 5593 if (mddev < mddev2 && mddev->curr_resync == 2) {
5548 /* arbitrarily yield */ 5594 /* arbitrarily yield */
@@ -5622,7 +5668,6 @@ void md_do_sync(mddev_t *mddev)
5622 window/2,(unsigned long long) max_sectors/2); 5668 window/2,(unsigned long long) max_sectors/2);
5623 5669
5624 atomic_set(&mddev->recovery_active, 0); 5670 atomic_set(&mddev->recovery_active, 0);
5625 init_waitqueue_head(&mddev->recovery_wait);
5626 last_check = 0; 5671 last_check = 0;
5627 5672
5628 if (j>2) { 5673 if (j>2) {
@@ -5647,7 +5692,7 @@ void md_do_sync(mddev_t *mddev)
5647 sectors = mddev->pers->sync_request(mddev, j, &skipped, 5692 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5648 currspeed < speed_min(mddev)); 5693 currspeed < speed_min(mddev));
5649 if (sectors == 0) { 5694 if (sectors == 0) {
5650 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 5695 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5651 goto out; 5696 goto out;
5652 } 5697 }
5653 5698
@@ -5670,8 +5715,7 @@ void md_do_sync(mddev_t *mddev)
5670 5715
5671 last_check = io_sectors; 5716 last_check = io_sectors;
5672 5717
5673 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 5718 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5674 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5675 break; 5719 break;
5676 5720
5677 repeat: 5721 repeat:
@@ -5725,8 +5769,7 @@ void md_do_sync(mddev_t *mddev)
5725 /* tell personality that we are finished */ 5769 /* tell personality that we are finished */
5726 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 5770 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5727 5771
5728 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5772 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5729 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5730 mddev->curr_resync > 2) { 5773 mddev->curr_resync > 2) {
5731 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5774 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5732 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5775 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5795,7 +5838,10 @@ static int remove_and_add_spares(mddev_t *mddev)
5795 } 5838 }
5796 5839
5797 if (mddev->degraded) { 5840 if (mddev->degraded) {
5798 rdev_for_each(rdev, rtmp, mddev) 5841 rdev_for_each(rdev, rtmp, mddev) {
5842 if (rdev->raid_disk >= 0 &&
5843 !test_bit(In_sync, &rdev->flags))
5844 spares++;
5799 if (rdev->raid_disk < 0 5845 if (rdev->raid_disk < 0
5800 && !test_bit(Faulty, &rdev->flags)) { 5846 && !test_bit(Faulty, &rdev->flags)) {
5801 rdev->recovery_offset = 0; 5847 rdev->recovery_offset = 0;
@@ -5813,6 +5859,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5813 } else 5859 } else
5814 break; 5860 break;
5815 } 5861 }
5862 }
5816 } 5863 }
5817 return spares; 5864 return spares;
5818} 5865}
@@ -5826,7 +5873,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5826 * to do that as needed. 5873 * to do that as needed.
5827 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 5874 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5828 * "->recovery" and create a thread at ->sync_thread. 5875 * "->recovery" and create a thread at ->sync_thread.
5829 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 5876 * When the thread finishes it sets MD_RECOVERY_DONE
5830 * and wakeups up this thread which will reap the thread and finish up. 5877 * and wakeups up this thread which will reap the thread and finish up.
5831 * This thread also removes any faulty devices (with nr_pending == 0). 5878 * This thread also removes any faulty devices (with nr_pending == 0).
5832 * 5879 *
@@ -5901,8 +5948,7 @@ void md_check_recovery(mddev_t *mddev)
5901 /* resync has finished, collect result */ 5948 /* resync has finished, collect result */
5902 md_unregister_thread(mddev->sync_thread); 5949 md_unregister_thread(mddev->sync_thread);
5903 mddev->sync_thread = NULL; 5950 mddev->sync_thread = NULL;
5904 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5951 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5905 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5906 /* success...*/ 5952 /* success...*/
5907 /* activate any spares */ 5953 /* activate any spares */
5908 mddev->pers->spare_active(mddev); 5954 mddev->pers->spare_active(mddev);
@@ -5926,7 +5972,6 @@ void md_check_recovery(mddev_t *mddev)
5926 * might be left set 5972 * might be left set
5927 */ 5973 */
5928 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5974 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5929 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5930 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 5975 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5931 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 5976 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5932 5977