aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-03-30 23:27:02 -0400
committerNeilBrown <neilb@suse.de>2009-03-30 23:27:02 -0400
commiteea1bf384e05b5ab747f8530c4fba9e9e6907fff (patch)
treeed4c05f8bd7e5805092e9c6a2c257ca71dc0fe2d /drivers/md/md.c
parent99adcd9d67aaf04e28f5ae96df280f236bde4b66 (diff)
md: Fix is_mddev_idle test (again).
There are two problems with is_mddev_idle. 1/ sync_io is 'atomic_t' and hence 'int'. curr_events and all the rest are 'long'. So if sync_io were to wrap on a 64bit host, the value of curr_events would go very negative suddenly, and take a very long time to return to positive. So do all calculations as 'int'. That gives us plenty of precision for what we need. 2/ To initialise rdev->last_events we simply call is_mddev_idle, on the assumption that it will make sure that last_events is in a suitable range. It used to do this, but now it does not. So now we need to be more explicit about initialisation. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 03b4cd0a6344..a99c50e217c0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5716,19 +5716,19 @@ int unregister_md_personality(struct mdk_personality *p)
5716 return 0; 5716 return 0;
5717} 5717}
5718 5718
5719static int is_mddev_idle(mddev_t *mddev) 5719static int is_mddev_idle(mddev_t *mddev, int init)
5720{ 5720{
5721 mdk_rdev_t * rdev; 5721 mdk_rdev_t * rdev;
5722 int idle; 5722 int idle;
5723 long curr_events; 5723 int curr_events;
5724 5724
5725 idle = 1; 5725 idle = 1;
5726 rcu_read_lock(); 5726 rcu_read_lock();
5727 rdev_for_each_rcu(rdev, mddev) { 5727 rdev_for_each_rcu(rdev, mddev) {
5728 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5728 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5729 curr_events = part_stat_read(&disk->part0, sectors[0]) + 5729 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
5730 part_stat_read(&disk->part0, sectors[1]) - 5730 (int)part_stat_read(&disk->part0, sectors[1]) -
5731 atomic_read(&disk->sync_io); 5731 atomic_read(&disk->sync_io);
5732 /* sync IO will cause sync_io to increase before the disk_stats 5732 /* sync IO will cause sync_io to increase before the disk_stats
5733 * as sync_io is counted when a request starts, and 5733 * as sync_io is counted when a request starts, and
5734 * disk_stats is counted when it completes. 5734 * disk_stats is counted when it completes.
@@ -5751,7 +5751,7 @@ static int is_mddev_idle(mddev_t *mddev)
5751 * always make curr_events less than last_events. 5751 * always make curr_events less than last_events.
5752 * 5752 *
5753 */ 5753 */
5754 if (curr_events - rdev->last_events > 4096) { 5754 if (init || curr_events - rdev->last_events > 64) {
5755 rdev->last_events = curr_events; 5755 rdev->last_events = curr_events;
5756 idle = 0; 5756 idle = 0;
5757 } 5757 }
@@ -5994,7 +5994,7 @@ void md_do_sync(mddev_t *mddev)
5994 "(but not more than %d KB/sec) for %s.\n", 5994 "(but not more than %d KB/sec) for %s.\n",
5995 speed_max(mddev), desc); 5995 speed_max(mddev), desc);
5996 5996
5997 is_mddev_idle(mddev); /* this also initializes IO event counters */ 5997 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
5998 5998
5999 io_sectors = 0; 5999 io_sectors = 0;
6000 for (m = 0; m < SYNC_MARKS; m++) { 6000 for (m = 0; m < SYNC_MARKS; m++) {
@@ -6096,7 +6096,7 @@ void md_do_sync(mddev_t *mddev)
6096 6096
6097 if (currspeed > speed_min(mddev)) { 6097 if (currspeed > speed_min(mddev)) {
6098 if ((currspeed > speed_max(mddev)) || 6098 if ((currspeed > speed_max(mddev)) ||
6099 !is_mddev_idle(mddev)) { 6099 !is_mddev_idle(mddev, 0)) {
6100 msleep(500); 6100 msleep(500);
6101 goto repeat; 6101 goto repeat;
6102 } 6102 }