aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-06-30 23:15:35 -0400
committerNeilBrown <neilb@suse.de>2009-06-30 23:15:35 -0400
commite62e58a5ffdc98ac28d8dbd070c857620d541f99 (patch)
tree0ec3471f4e66e3a376ac8cb2da79d6123e7aa2cf
parenta5c308d4d1659b1f4833b863394e3e24cdbdfc6e (diff)
md: use interruptible wait when duration is controlled by userspace.
User space can set various limits on an md array so that resync waits when it gets to a certain point, or so that I/O is blocked for a short while. When md is waiting against one of these limit, it should use an interruptible wait so as not to add to the load average, and so are not to trigger a warning if the wait goes on for too long. Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c15
2 files changed, 21 insertions, 8 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65fe35b5e34..0f4a70c43ff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6336,10 +6336,16 @@ void md_do_sync(mddev_t *mddev)
6336 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6336 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6337 } 6337 }
6338 6338
6339 if (j >= mddev->resync_max) 6339 while (j >= mddev->resync_max && !kthread_should_stop()) {
6340 wait_event(mddev->recovery_wait, 6340 /* As this condition is controlled by user-space,
6341 mddev->resync_max > j 6341 * we can block indefinitely, so use '_interruptible'
6342 || kthread_should_stop()); 6342 * to avoid triggering warnings.
6343 */
6344 flush_signals(current); /* just in case */
6345 wait_event_interruptible(mddev->recovery_wait,
6346 mddev->resync_max > j
6347 || kthread_should_stop());
6348 }
6343 6349
6344 if (kthread_should_stop()) 6350 if (kthread_should_stop())
6345 goto interrupted; 6351 goto interrupted;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1f444ae07f8..37835538b58 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,14 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
3699 goto retry; 3699 goto retry;
3700 } 3700 }
3701 } 3701 }
3702 /* FIXME what if we get a false positive because these 3702
3703 * are being updated.
3704 */
3705 if (bio_data_dir(bi) == WRITE && 3703 if (bio_data_dir(bi) == WRITE &&
3706 logical_sector >= mddev->suspend_lo && 3704 logical_sector >= mddev->suspend_lo &&
3707 logical_sector < mddev->suspend_hi) { 3705 logical_sector < mddev->suspend_hi) {
3708 release_stripe(sh); 3706 release_stripe(sh);
3709 schedule(); 3707 /* As the suspend_* range is controlled by
3708 * userspace, we want an interruptible
3709 * wait.
3710 */
3711 flush_signals(current);
3712 prepare_to_wait(&conf->wait_for_overlap,
3713 &w, TASK_INTERRUPTIBLE);
3714 if (logical_sector >= mddev->suspend_lo &&
3715 logical_sector < mddev->suspend_hi)
3716 schedule();
3710 goto retry; 3717 goto retry;
3711 } 3718 }
3712 3719