aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2015-10-09 00:54:06 -0400
committerNeilBrown <neilb@suse.com>2015-10-31 22:48:29 -0400
commit4b482044d24f3db2e11607d0a18f64b3a326972d (patch)
treeaaa53c2f74e98baec8af07194ffa6005df095b0e
parent9efdca16e0182eca489a519f576019fd9c0c1b25 (diff)
raid5-cache: add trim support for log
Since superblock is updated infrequently, we do a simple trim of log disk (a synchronous trim) Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: NeilBrown <neilb@suse.com>
-rw-r--r--drivers/md/raid5-cache.c63
1 files changed, 62 insertions, 1 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d3b5441b4c11..7071c7598f5d 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -85,6 +85,7 @@ struct r5l_log {
85 spinlock_t no_space_stripes_lock; 85 spinlock_t no_space_stripes_lock;
86 86
87 bool need_cache_flush; 87 bool need_cache_flush;
88 bool in_teardown;
88}; 89};
89 90
90/* 91/*
@@ -644,6 +645,60 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
644} 645}
645 646
646static void r5l_write_super(struct r5l_log *log, sector_t cp); 647static void r5l_write_super(struct r5l_log *log, sector_t cp);
648static void r5l_write_super_and_discard_space(struct r5l_log *log,
649 sector_t end)
650{
651 struct block_device *bdev = log->rdev->bdev;
652 struct mddev *mddev;
653
654 r5l_write_super(log, end);
655
656 if (!blk_queue_discard(bdev_get_queue(bdev)))
657 return;
658
659 mddev = log->rdev->mddev;
660 /*
661 * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
662 * wait for this thread to finish. This thread waits for
663 * MD_CHANGE_PENDING clear, which is supposed to be done in
664 * md_check_recovery(). md_check_recovery() tries to get
665 * reconfig_mutex. Since r5l_quiesce already holds the mutex,
666 * md_check_recovery() fails, so the PENDING never get cleared. The
667 * in_teardown check workaround this issue.
668 */
669 if (!log->in_teardown) {
670 set_bit(MD_CHANGE_DEVS, &mddev->flags);
671 set_bit(MD_CHANGE_PENDING, &mddev->flags);
672 md_wakeup_thread(mddev->thread);
673 wait_event(mddev->sb_wait,
674 !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
675 log->in_teardown);
676 /*
677 * r5l_quiesce could run after in_teardown check and hold
678 * mutex first. Superblock might get updated twice.
679 */
680 if (log->in_teardown)
681 md_update_sb(mddev, 1);
682 } else {
683 WARN_ON(!mddev_is_locked(mddev));
684 md_update_sb(mddev, 1);
685 }
686
687 if (log->last_checkpoint < end) {
688 blkdev_issue_discard(bdev,
689 log->last_checkpoint + log->rdev->data_offset,
690 end - log->last_checkpoint, GFP_NOIO, 0);
691 } else {
692 blkdev_issue_discard(bdev,
693 log->last_checkpoint + log->rdev->data_offset,
694 log->device_size - log->last_checkpoint,
695 GFP_NOIO, 0);
696 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
697 GFP_NOIO, 0);
698 }
699}
700
701
647static void r5l_do_reclaim(struct r5l_log *log) 702static void r5l_do_reclaim(struct r5l_log *log)
648{ 703{
649 sector_t reclaim_target = xchg(&log->reclaim_target, 0); 704 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
@@ -685,7 +740,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
685 * here, because the log area might be reused soon and we don't want to 740 * here, because the log area might be reused soon and we don't want to
686 * confuse recovery 741 * confuse recovery
687 */ 742 */
688 r5l_write_super(log, next_checkpoint); 743 r5l_write_super_and_discard_space(log, next_checkpoint);
689 744
690 mutex_lock(&log->io_mutex); 745 mutex_lock(&log->io_mutex);
691 log->last_checkpoint = next_checkpoint; 746 log->last_checkpoint = next_checkpoint;
@@ -721,9 +776,11 @@ static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
721 776
722void r5l_quiesce(struct r5l_log *log, int state) 777void r5l_quiesce(struct r5l_log *log, int state)
723{ 778{
779 struct mddev *mddev;
724 if (!log || state == 2) 780 if (!log || state == 2)
725 return; 781 return;
726 if (state == 0) { 782 if (state == 0) {
783 log->in_teardown = 0;
727 log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 784 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
728 log->rdev->mddev, "reclaim"); 785 log->rdev->mddev, "reclaim");
729 } else if (state == 1) { 786 } else if (state == 1) {
@@ -731,6 +788,10 @@ void r5l_quiesce(struct r5l_log *log, int state)
731 * at this point all stripes are finished, so io_unit is at 788 * at this point all stripes are finished, so io_unit is at
732 * least in STRIPE_END state 789 * least in STRIPE_END state
733 */ 790 */
791 log->in_teardown = 1;
792 /* make sure r5l_write_super_and_discard_space exits */
793 mddev = log->rdev->mddev;
794 wake_up(&mddev->sb_wait);
734 r5l_wake_reclaim(log, -1L); 795 r5l_wake_reclaim(log, -1L);
735 md_unregister_thread(&log->reclaim_thread); 796 md_unregister_thread(&log->reclaim_thread);
736 r5l_do_reclaim(log); 797 r5l_do_reclaim(log);