aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorGuoqing Jiang <gqjiang@suse.com>2018-10-18 04:37:44 -0400
committerShaohua Li <shli@fb.com>2018-10-18 12:38:06 -0400
commitaefb2e5fc2be590e6bef8985f3d175c3d38b0b77 (patch)
treeca88b03b17faa56f2cd3fcdc4384aa680962846b /drivers/md
parent5ebaf80bc8d5826edcc2d1cea26a7d5a4b8f01dd (diff)
md-cluster/raid10: call update_size in md_reap_sync_thread
We need to change the capacity in all nodes after one node finishs reshape. And as we did before, we can't change the capacity directly in md_do_sync, instead, the capacity should be only changed in update_size or received CHANGE_CAPACITY msg. So master node calls update_size after completes reshape in md_reap_sync_thread, but we need to skip ops->update_size if MD_CLOSING is set since reshaping could not be finish. Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Guoqing Jiang <gqjiang@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/md.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e07096c4ff20..e28f5db0a882 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8623,8 +8623,10 @@ void md_do_sync(struct md_thread *thread)
8623 mddev_lock_nointr(mddev); 8623 mddev_lock_nointr(mddev);
8624 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 8624 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
8625 mddev_unlock(mddev); 8625 mddev_unlock(mddev);
8626 set_capacity(mddev->gendisk, mddev->array_sectors); 8626 if (!mddev_is_clustered(mddev)) {
8627 revalidate_disk(mddev->gendisk); 8627 set_capacity(mddev->gendisk, mddev->array_sectors);
8628 revalidate_disk(mddev->gendisk);
8629 }
8628 } 8630 }
8629 8631
8630 spin_lock(&mddev->lock); 8632 spin_lock(&mddev->lock);
@@ -8968,6 +8970,8 @@ EXPORT_SYMBOL(md_check_recovery);
8968void md_reap_sync_thread(struct mddev *mddev) 8970void md_reap_sync_thread(struct mddev *mddev)
8969{ 8971{
8970 struct md_rdev *rdev; 8972 struct md_rdev *rdev;
8973 sector_t old_dev_sectors = mddev->dev_sectors;
8974 bool is_reshaped = false;
8971 8975
8972 /* resync has finished, collect result */ 8976 /* resync has finished, collect result */
8973 md_unregister_thread(&mddev->sync_thread); 8977 md_unregister_thread(&mddev->sync_thread);
@@ -8982,8 +8986,11 @@ void md_reap_sync_thread(struct mddev *mddev)
8982 } 8986 }
8983 } 8987 }
8984 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8988 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8985 mddev->pers->finish_reshape) 8989 mddev->pers->finish_reshape) {
8986 mddev->pers->finish_reshape(mddev); 8990 mddev->pers->finish_reshape(mddev);
8991 if (mddev_is_clustered(mddev))
8992 is_reshaped = true;
8993 }
8987 8994
8988 /* If array is no-longer degraded, then any saved_raid_disk 8995 /* If array is no-longer degraded, then any saved_raid_disk
8989 * information must be scrapped. 8996 * information must be scrapped.
@@ -9004,6 +9011,14 @@ void md_reap_sync_thread(struct mddev *mddev)
9004 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9011 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9005 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9012 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9006 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9013 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9014 /*
9015 * We call md_cluster_ops->update_size here because sync_size could
9016 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9017 * so it is time to update size across cluster.
9018 */
9019 if (mddev_is_clustered(mddev) && is_reshaped
9020 && !test_bit(MD_CLOSING, &mddev->flags))
9021 md_cluster_ops->update_size(mddev, old_dev_sectors);
9007 wake_up(&resync_wait); 9022 wake_up(&resync_wait);
9008 /* flag recovery needed just to double check */ 9023 /* flag recovery needed just to double check */
9009 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9024 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);