aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBernd Schubert <bs@q-leap.de>2008-05-23 16:04:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-24 12:56:10 -0400
commit90b08710e41a07d4ff0fb8940dcce3a552991a56 (patch)
tree8e45d1c6d9b4020099fd36781065bf8b8fdb76e4 /drivers
parent4f54b0e9485644a3c5fca2ae43bcbe7376825747 (diff)
md: allow parallel resync of md-devices.
In some configurations, a raid6 resync can be limited by CPU speed (Calculating P and Q and moving data) rather than by device speed. In these cases there is nothing to be gained byt serialising resync of arrays that share a device, and doing the resync in parallel can provide benefit. So add a sysfs tunable to flag an array as being allowed to resync in parallel with other arrays that use (a different part of) the same device. Signed-off-by: Bernd Schubert <bs@q-leap.de> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c40
1 files changed, 36 insertions, 4 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e57213dacd25..295be1a68806 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -74,6 +74,8 @@ static DEFINE_SPINLOCK(pers_lock);
74 74
75static void md_print_devices(void); 75static void md_print_devices(void);
76 76
77static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
78
77#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 79#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
78 80
79/* 81/*
@@ -3013,6 +3015,36 @@ degraded_show(mddev_t *mddev, char *page)
3013static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3015static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3014 3016
3015static ssize_t 3017static ssize_t
3018sync_force_parallel_show(mddev_t *mddev, char *page)
3019{
3020 return sprintf(page, "%d\n", mddev->parallel_resync);
3021}
3022
3023static ssize_t
3024sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3025{
3026 long n;
3027
3028 if (strict_strtol(buf, 10, &n))
3029 return -EINVAL;
3030
3031 if (n != 0 && n != 1)
3032 return -EINVAL;
3033
3034 mddev->parallel_resync = n;
3035
3036 if (mddev->sync_thread)
3037 wake_up(&resync_wait);
3038
3039 return len;
3040}
3041
3042/* force parallel resync, even with shared block devices */
3043static struct md_sysfs_entry md_sync_force_parallel =
3044__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3045 sync_force_parallel_show, sync_force_parallel_store);
3046
3047static ssize_t
3016sync_speed_show(mddev_t *mddev, char *page) 3048sync_speed_show(mddev_t *mddev, char *page)
3017{ 3049{
3018 unsigned long resync, dt, db; 3050 unsigned long resync, dt, db;
@@ -3187,6 +3219,7 @@ static struct attribute *md_redundancy_attrs[] = {
3187 &md_sync_min.attr, 3219 &md_sync_min.attr,
3188 &md_sync_max.attr, 3220 &md_sync_max.attr,
3189 &md_sync_speed.attr, 3221 &md_sync_speed.attr,
3222 &md_sync_force_parallel.attr,
3190 &md_sync_completed.attr, 3223 &md_sync_completed.attr,
3191 &md_max_sync.attr, 3224 &md_max_sync.attr,
3192 &md_suspend_lo.attr, 3225 &md_suspend_lo.attr,
@@ -5487,8 +5520,6 @@ void md_allow_write(mddev_t *mddev)
5487} 5520}
5488EXPORT_SYMBOL_GPL(md_allow_write); 5521EXPORT_SYMBOL_GPL(md_allow_write);
5489 5522
5490static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5491
5492#define SYNC_MARKS 10 5523#define SYNC_MARKS 10
5493#define SYNC_MARK_STEP (3*HZ) 5524#define SYNC_MARK_STEP (3*HZ)
5494void md_do_sync(mddev_t *mddev) 5525void md_do_sync(mddev_t *mddev)
@@ -5552,8 +5583,9 @@ void md_do_sync(mddev_t *mddev)
5552 for_each_mddev(mddev2, tmp) { 5583 for_each_mddev(mddev2, tmp) {
5553 if (mddev2 == mddev) 5584 if (mddev2 == mddev)
5554 continue; 5585 continue;
5555 if (mddev2->curr_resync && 5586 if (!mddev->parallel_resync
5556 match_mddev_units(mddev,mddev2)) { 5587 && mddev2->curr_resync
5588 && match_mddev_units(mddev, mddev2)) {
5557 DEFINE_WAIT(wq); 5589 DEFINE_WAIT(wq);
5558 if (mddev < mddev2 && mddev->curr_resync == 2) { 5590 if (mddev < mddev2 && mddev->curr_resync == 2) {
5559 /* arbitrarily yield */ 5591 /* arbitrarily yield */