aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-07 05:17:13 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-07 05:17:34 -0400
commit44347d947f628060b92449702071bfe1d31dfb75 (patch)
treec6ed74610d5b3295df4296659f80f5feb94b28cc /drivers/md/md.c
parentd94fc523f3c35bd8013f04827e94756cbc0212f4 (diff)
parent413f81eba35d6ede9289b0c8a920c013a84fac71 (diff)
Merge branch 'linus' into tracing/core
Merge reason: tracing/core was on a .30-rc1 base and was missing out on on a handful of tracing fixes present in .30-rc5-almost. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ed5727c089a9..612343fdde94 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2017,6 +2017,8 @@ repeat:
2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2018 spin_unlock_irq(&mddev->write_lock); 2018 spin_unlock_irq(&mddev->write_lock);
2019 wake_up(&mddev->sb_wait); 2019 wake_up(&mddev->sb_wait);
2020 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2021 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2020 2022
2021} 2023}
2022 2024
@@ -2086,6 +2088,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2086 * -writemostly - clears write_mostly 2088 * -writemostly - clears write_mostly
2087 * blocked - sets the Blocked flag 2089 * blocked - sets the Blocked flag
2088 * -blocked - clears the Blocked flag 2090 * -blocked - clears the Blocked flag
2091 * insync - sets Insync providing device isn't active
2089 */ 2092 */
2090 int err = -EINVAL; 2093 int err = -EINVAL;
2091 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2094 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2118,6 +2121,9 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2118 md_wakeup_thread(rdev->mddev->thread); 2121 md_wakeup_thread(rdev->mddev->thread);
2119 2122
2120 err = 0; 2123 err = 0;
2124 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2125 set_bit(In_sync, &rdev->flags);
2126 err = 0;
2121 } 2127 }
2122 if (!err && rdev->sysfs_state) 2128 if (!err && rdev->sysfs_state)
2123 sysfs_notify_dirent(rdev->sysfs_state); 2129 sysfs_notify_dirent(rdev->sysfs_state);
@@ -2190,7 +2196,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2190 } else if (rdev->mddev->pers) { 2196 } else if (rdev->mddev->pers) {
2191 mdk_rdev_t *rdev2; 2197 mdk_rdev_t *rdev2;
2192 /* Activating a spare .. or possibly reactivating 2198 /* Activating a spare .. or possibly reactivating
2193 * if we every get bitmaps working here. 2199 * if we ever get bitmaps working here.
2194 */ 2200 */
2195 2201
2196 if (rdev->raid_disk != -1) 2202 if (rdev->raid_disk != -1)
@@ -3482,12 +3488,15 @@ sync_completed_show(mddev_t *mddev, char *page)
3482{ 3488{
3483 unsigned long max_sectors, resync; 3489 unsigned long max_sectors, resync;
3484 3490
3491 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3492 return sprintf(page, "none\n");
3493
3485 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3494 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3486 max_sectors = mddev->resync_max_sectors; 3495 max_sectors = mddev->resync_max_sectors;
3487 else 3496 else
3488 max_sectors = mddev->dev_sectors; 3497 max_sectors = mddev->dev_sectors;
3489 3498
3490 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 3499 resync = mddev->curr_resync_completed;
3491 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3500 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3492} 3501}
3493 3502
@@ -6334,18 +6343,13 @@ void md_do_sync(mddev_t *mddev)
6334 sector_t sectors; 6343 sector_t sectors;
6335 6344
6336 skipped = 0; 6345 skipped = 0;
6337 if (j >= mddev->resync_max) {
6338 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6339 wait_event(mddev->recovery_wait,
6340 mddev->resync_max > j
6341 || kthread_should_stop());
6342 }
6343 if (kthread_should_stop())
6344 goto interrupted;
6345 6346
6346 if (mddev->curr_resync > mddev->curr_resync_completed && 6347 if ((mddev->curr_resync > mddev->curr_resync_completed &&
6347 (mddev->curr_resync - mddev->curr_resync_completed) 6348 (mddev->curr_resync - mddev->curr_resync_completed)
6348 > (max_sectors >> 4)) { 6349 > (max_sectors >> 4)) ||
6350 (j - mddev->curr_resync_completed)*2
6351 >= mddev->resync_max - mddev->curr_resync_completed
6352 ) {
6349 /* time to update curr_resync_completed */ 6353 /* time to update curr_resync_completed */
6350 blk_unplug(mddev->queue); 6354 blk_unplug(mddev->queue);
6351 wait_event(mddev->recovery_wait, 6355 wait_event(mddev->recovery_wait,
@@ -6353,7 +6357,17 @@ void md_do_sync(mddev_t *mddev)
6353 mddev->curr_resync_completed = 6357 mddev->curr_resync_completed =
6354 mddev->curr_resync; 6358 mddev->curr_resync;
6355 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6359 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6360 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6356 } 6361 }
6362
6363 if (j >= mddev->resync_max)
6364 wait_event(mddev->recovery_wait,
6365 mddev->resync_max > j
6366 || kthread_should_stop());
6367
6368 if (kthread_should_stop())
6369 goto interrupted;
6370
6357 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6371 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6358 currspeed < speed_min(mddev)); 6372 currspeed < speed_min(mddev));
6359 if (sectors == 0) { 6373 if (sectors == 0) {
@@ -6461,6 +6475,7 @@ void md_do_sync(mddev_t *mddev)
6461 6475
6462 skip: 6476 skip:
6463 mddev->curr_resync = 0; 6477 mddev->curr_resync = 0;
6478 mddev->curr_resync_completed = 0;
6464 mddev->resync_min = 0; 6479 mddev->resync_min = 0;
6465 mddev->resync_max = MaxSector; 6480 mddev->resync_max = MaxSector;
6466 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6481 sysfs_notify(&mddev->kobj, NULL, "sync_completed");