aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorRobert Becker <Rob.Becker@riverbed.com>2009-12-13 20:49:58 -0500
committerNeilBrown <neilb@suse.de>2009-12-13 20:51:41 -0500
commit1e50915fe0bbf7a46db0fa7e1e604d3fc95f057d (patch)
tree7a722ad6f56c61a6173493f1cd44d809c8b1bd8d /drivers/md/md.c
parent67b8dc4b06b0e97df55fd76e209f34f9a52e820e (diff)
raid: improve MD/raid10 handling of correctable read errors.
We've noticed severe lasting performance degradation of our raid arrays when we have drives that yield large amounts of media errors. The raid10 module will queue each failed read for retry, and also will attempt call fix_read_error() to perform the read recovery. Read recovery is performed while the array is frozen, so repeated recovery attempts can degrade the performance of the array for extended periods of time. With this patch I propose adding a per md device max number of corrected read attempts. Each rdev will maintain a count of read correction attempts in the rdev->read_errors field (not used currently for raid10). When we enter fix_read_error() we'll check to see when the last read error occurred, and divide the read error count by 2 for every hour since the last read error. If at that point our read error count exceeds the read error threshold, we'll fail the raid device. In addition in this patch I add sysfs nodes (get/set) for the per md max_read_errors attribute, the rdev->read_errors attribute, and added some printk's to indicate when fix_read_error fails to repair an rdev. For testing I used debugfs->fail_make_request to inject IO errors to the rdev while doing IO to the raid array. Signed-off-by: Robert Becker <Rob.Becker@riverbed.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 859edbf8c9b0..f1b905a20133 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -68,6 +68,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 68#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
69 69
70/* 70/*
71 * Default number of read corrections we'll attempt on an rdev
72 * before ejecting it from the array. We divide the read error
73 * count by 2 for every hour elapsed between read errors.
74 */
75#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
76/*
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 77 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much. 78 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that 79 * Increase it if you want to have more _guaranteed_ speed. Note that
@@ -2653,6 +2659,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2653 rdev->flags = 0; 2659 rdev->flags = 0;
2654 rdev->data_offset = 0; 2660 rdev->data_offset = 0;
2655 rdev->sb_events = 0; 2661 rdev->sb_events = 0;
2662 rdev->last_read_error.tv_sec = 0;
2663 rdev->last_read_error.tv_nsec = 0;
2656 atomic_set(&rdev->nr_pending, 0); 2664 atomic_set(&rdev->nr_pending, 0);
2657 atomic_set(&rdev->read_errors, 0); 2665 atomic_set(&rdev->read_errors, 0);
2658 atomic_set(&rdev->corrected_errors, 0); 2666 atomic_set(&rdev->corrected_errors, 0);
@@ -3290,6 +3298,29 @@ static struct md_sysfs_entry md_array_state =
3290__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3298__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3291 3299
3292static ssize_t 3300static ssize_t
3301max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3302 return sprintf(page, "%d\n",
3303 atomic_read(&mddev->max_corr_read_errors));
3304}
3305
3306static ssize_t
3307max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3308{
3309 char *e;
3310 unsigned long n = simple_strtoul(buf, &e, 10);
3311
3312 if (*buf && (*e == 0 || *e == '\n')) {
3313 atomic_set(&mddev->max_corr_read_errors, n);
3314 return len;
3315 }
3316 return -EINVAL;
3317}
3318
3319static struct md_sysfs_entry max_corr_read_errors =
3320__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3321 max_corrected_read_errors_store);
3322
3323static ssize_t
3293null_show(mddev_t *mddev, char *page) 3324null_show(mddev_t *mddev, char *page)
3294{ 3325{
3295 return -EINVAL; 3326 return -EINVAL;
@@ -3914,6 +3945,7 @@ static struct attribute *md_default_attrs[] = {
3914 &md_array_state.attr, 3945 &md_array_state.attr,
3915 &md_reshape_position.attr, 3946 &md_reshape_position.attr,
3916 &md_array_size.attr, 3947 &md_array_size.attr,
3948 &max_corr_read_errors.attr,
3917 NULL, 3949 NULL,
3918}; 3950};
3919 3951
@@ -4333,6 +4365,8 @@ static int do_md_run(mddev_t * mddev)
4333 mddev->ro = 0; 4365 mddev->ro = 0;
4334 4366
4335 atomic_set(&mddev->writes_pending,0); 4367 atomic_set(&mddev->writes_pending,0);
4368 atomic_set(&mddev->max_corr_read_errors,
4369 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4336 mddev->safemode = 0; 4370 mddev->safemode = 0;
4337 mddev->safemode_timer.function = md_safemode_timeout; 4371 mddev->safemode_timer.function = md_safemode_timeout;
4338 mddev->safemode_timer.data = (unsigned long) mddev; 4372 mddev->safemode_timer.data = (unsigned long) mddev;