diff options
author | Song Liu <songliubraving@fb.com> | 2017-01-24 13:45:30 -0500 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2017-01-24 14:26:06 -0500 |
commit | 2e38a37f23c98d7fad87ff022670060b8a0e2bf5 (patch) | |
tree | 0cc010718f63769debe1e45f7ff20bffc6db0b88 | |
parent | 07e83364845e1e1c7e189a01206a9d7d33831568 (diff) |
md/r5cache: disable write back for degraded array
write-back cache in degraded mode introduces corner cases to the array.
Although we try to cover all these corner cases, it is safer to just
disable write-back cache when the array is in degraded mode.
In this patch, we disable writeback cache for degraded mode:
1. On device failure, if the array enters degraded mode, raid5_error()
will submit async job r5c_disable_writeback_async to disable
writeback;
2. In r5c_journal_mode_store(), it is invalid to enable writeback in
degraded mode;
3. In r5c_try_caching_write(), stripes with s->failed>0 will be handled
in write-through mode.
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Shaohua Li <shli@fb.com>
-rw-r--r-- | drivers/md/raid5-cache.c | 46 | ||||
-rw-r--r-- | drivers/md/raid5.c | 15 | ||||
-rw-r--r-- | drivers/md/raid5.h | 2 |
3 files changed, 56 insertions, 7 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 3d7dda85494c..302dea3296ba 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -162,6 +162,8 @@ struct r5l_log { | |||
162 | 162 | ||
163 | /* to submit async io_units, to fulfill ordering of flush */ | 163 | /* to submit async io_units, to fulfill ordering of flush */ |
164 | struct work_struct deferred_io_work; | 164 | struct work_struct deferred_io_work; |
165 | /* to disable write back during in degraded mode */ | ||
166 | struct work_struct disable_writeback_work; | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | /* | 169 | /* |
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work) | |||
611 | r5l_do_submit_io(log, io); | 613 | r5l_do_submit_io(log, io); |
612 | } | 614 | } |
613 | 615 | ||
616 | static void r5c_disable_writeback_async(struct work_struct *work) | ||
617 | { | ||
618 | struct r5l_log *log = container_of(work, struct r5l_log, | ||
619 | disable_writeback_work); | ||
620 | struct mddev *mddev = log->rdev->mddev; | ||
621 | |||
622 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | ||
623 | return; | ||
624 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", | ||
625 | mdname(mddev)); | ||
626 | mddev_suspend(mddev); | ||
627 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | ||
628 | mddev_resume(mddev); | ||
629 | } | ||
630 | |||
614 | static void r5l_submit_current_io(struct r5l_log *log) | 631 | static void r5l_submit_current_io(struct r5l_log *log) |
615 | { | 632 | { |
616 | struct r5l_io_unit *io = log->current_io; | 633 | struct r5l_io_unit *io = log->current_io; |
@@ -2269,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, | |||
2269 | val > R5C_JOURNAL_MODE_WRITE_BACK) | 2286 | val > R5C_JOURNAL_MODE_WRITE_BACK) |
2270 | return -EINVAL; | 2287 | return -EINVAL; |
2271 | 2288 | ||
2289 | if (raid5_calc_degraded(conf) > 0 && | ||
2290 | val == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2291 | return -EINVAL; | ||
2292 | |||
2272 | mddev_suspend(mddev); | 2293 | mddev_suspend(mddev); |
2273 | conf->log->r5c_journal_mode = val; | 2294 | conf->log->r5c_journal_mode = val; |
2274 | mddev_resume(mddev); | 2295 | mddev_resume(mddev); |
@@ -2323,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf, | |||
2323 | set_bit(STRIPE_R5C_CACHING, &sh->state); | 2344 | set_bit(STRIPE_R5C_CACHING, &sh->state); |
2324 | } | 2345 | } |
2325 | 2346 | ||
2347 | /* | ||
2348 | * When run in degraded mode, array is set to write-through mode. | ||
2349 | * This check helps drain pending write safely in the transition to | ||
2350 | * write-through mode. | ||
2351 | */ | ||
2352 | if (s->failed) { | ||
2353 | r5c_make_stripe_write_out(sh); | ||
2354 | return -EAGAIN; | ||
2355 | } | ||
2356 | |||
2326 | for (i = disks; i--; ) { | 2357 | for (i = disks; i--; ) { |
2327 | dev = &sh->dev[i]; | 2358 | dev = &sh->dev[i]; |
2328 | /* if non-overwrite, use writing-out phase */ | 2359 | /* if non-overwrite, use writing-out phase */ |
@@ -2579,6 +2610,19 @@ ioerr: | |||
2579 | return ret; | 2610 | return ret; |
2580 | } | 2611 | } |
2581 | 2612 | ||
2613 | void r5c_update_on_rdev_error(struct mddev *mddev) | ||
2614 | { | ||
2615 | struct r5conf *conf = mddev->private; | ||
2616 | struct r5l_log *log = conf->log; | ||
2617 | |||
2618 | if (!log) | ||
2619 | return; | ||
2620 | |||
2621 | if (raid5_calc_degraded(conf) > 0 && | ||
2622 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2623 | schedule_work(&log->disable_writeback_work); | ||
2624 | } | ||
2625 | |||
2582 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | 2626 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) |
2583 | { | 2627 | { |
2584 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 2628 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
@@ -2651,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | |||
2651 | spin_lock_init(&log->no_space_stripes_lock); | 2695 | spin_lock_init(&log->no_space_stripes_lock); |
2652 | 2696 | ||
2653 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); | 2697 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); |
2698 | INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); | ||
2654 | 2699 | ||
2655 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 2700 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
2656 | INIT_LIST_HEAD(&log->stripe_in_journal_list); | 2701 | INIT_LIST_HEAD(&log->stripe_in_journal_list); |
@@ -2683,6 +2728,7 @@ io_kc: | |||
2683 | 2728 | ||
2684 | void r5l_exit_log(struct r5l_log *log) | 2729 | void r5l_exit_log(struct r5l_log *log) |
2685 | { | 2730 | { |
2731 | flush_work(&log->disable_writeback_work); | ||
2686 | md_unregister_thread(&log->reclaim_thread); | 2732 | md_unregister_thread(&log->reclaim_thread); |
2687 | mempool_destroy(log->meta_pool); | 2733 | mempool_destroy(log->meta_pool); |
2688 | bioset_free(log->bs); | 2734 | bioset_free(log->bs); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dc83da69ca7c..3c7e106c12a2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, | |||
556 | * of the two sections, and some non-in_sync devices may | 556 | * of the two sections, and some non-in_sync devices may |
557 | * be insync in the section most affected by failed devices. | 557 | * be insync in the section most affected by failed devices. |
558 | */ | 558 | */ |
559 | static int calc_degraded(struct r5conf *conf) | 559 | int raid5_calc_degraded(struct r5conf *conf) |
560 | { | 560 | { |
561 | int degraded, degraded2; | 561 | int degraded, degraded2; |
562 | int i; | 562 | int i; |
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf) | |||
619 | if (conf->mddev->reshape_position == MaxSector) | 619 | if (conf->mddev->reshape_position == MaxSector) |
620 | return conf->mddev->degraded > conf->max_degraded; | 620 | return conf->mddev->degraded > conf->max_degraded; |
621 | 621 | ||
622 | degraded = calc_degraded(conf); | 622 | degraded = raid5_calc_degraded(conf); |
623 | if (degraded > conf->max_degraded) | 623 | if (degraded > conf->max_degraded) |
624 | return 1; | 624 | return 1; |
625 | return 0; | 625 | return 0; |
@@ -2555,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2555 | 2555 | ||
2556 | spin_lock_irqsave(&conf->device_lock, flags); | 2556 | spin_lock_irqsave(&conf->device_lock, flags); |
2557 | clear_bit(In_sync, &rdev->flags); | 2557 | clear_bit(In_sync, &rdev->flags); |
2558 | mddev->degraded = calc_degraded(conf); | 2558 | mddev->degraded = raid5_calc_degraded(conf); |
2559 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2559 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2560 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 2560 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
2561 | 2561 | ||
@@ -2569,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2569 | bdevname(rdev->bdev, b), | 2569 | bdevname(rdev->bdev, b), |
2570 | mdname(mddev), | 2570 | mdname(mddev), |
2571 | conf->raid_disks - mddev->degraded); | 2571 | conf->raid_disks - mddev->degraded); |
2572 | r5c_update_on_rdev_error(mddev); | ||
2572 | } | 2573 | } |
2573 | 2574 | ||
2574 | /* | 2575 | /* |
@@ -7091,7 +7092,7 @@ static int raid5_run(struct mddev *mddev) | |||
7091 | /* | 7092 | /* |
7092 | * 0 for a fully functional array, 1 or 2 for a degraded array. | 7093 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
7093 | */ | 7094 | */ |
7094 | mddev->degraded = calc_degraded(conf); | 7095 | mddev->degraded = raid5_calc_degraded(conf); |
7095 | 7096 | ||
7096 | if (has_failed(conf)) { | 7097 | if (has_failed(conf)) { |
7097 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", | 7098 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", |
@@ -7338,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev) | |||
7338 | } | 7339 | } |
7339 | } | 7340 | } |
7340 | spin_lock_irqsave(&conf->device_lock, flags); | 7341 | spin_lock_irqsave(&conf->device_lock, flags); |
7341 | mddev->degraded = calc_degraded(conf); | 7342 | mddev->degraded = raid5_calc_degraded(conf); |
7342 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7343 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7343 | print_raid5_conf(conf); | 7344 | print_raid5_conf(conf); |
7344 | return count; | 7345 | return count; |
@@ -7698,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev) | |||
7698 | * pre and post number of devices. | 7699 | * pre and post number of devices. |
7699 | */ | 7700 | */ |
7700 | spin_lock_irqsave(&conf->device_lock, flags); | 7701 | spin_lock_irqsave(&conf->device_lock, flags); |
7701 | mddev->degraded = calc_degraded(conf); | 7702 | mddev->degraded = raid5_calc_degraded(conf); |
7702 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7703 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7703 | } | 7704 | } |
7704 | mddev->raid_disks = conf->raid_disks; | 7705 | mddev->raid_disks = conf->raid_disks; |
@@ -7786,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
7786 | } else { | 7787 | } else { |
7787 | int d; | 7788 | int d; |
7788 | spin_lock_irq(&conf->device_lock); | 7789 | spin_lock_irq(&conf->device_lock); |
7789 | mddev->degraded = calc_degraded(conf); | 7790 | mddev->degraded = raid5_calc_degraded(conf); |
7790 | spin_unlock_irq(&conf->device_lock); | 7791 | spin_unlock_irq(&conf->device_lock); |
7791 | for (d = conf->raid_disks ; | 7792 | for (d = conf->raid_disks ; |
7792 | d < conf->raid_disks - mddev->delta_disks; | 7793 | d < conf->raid_disks - mddev->delta_disks; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 461df197d157..1440fa26e296 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -758,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, | |||
758 | extern struct stripe_head * | 758 | extern struct stripe_head * |
759 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | 759 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, |
760 | int previous, int noblock, int noquiesce); | 760 | int previous, int noblock, int noquiesce); |
761 | extern int raid5_calc_degraded(struct r5conf *conf); | ||
761 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); | 762 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); |
762 | extern void r5l_exit_log(struct r5l_log *log); | 763 | extern void r5l_exit_log(struct r5l_log *log); |
763 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); | 764 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); |
@@ -786,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num); | |||
786 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); | 787 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); |
787 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); | 788 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); |
788 | extern struct md_sysfs_entry r5c_journal_mode; | 789 | extern struct md_sysfs_entry r5c_journal_mode; |
790 | extern void r5c_update_on_rdev_error(struct mddev *mddev); | ||
789 | #endif | 791 | #endif |