aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid5.c50
3 files changed, 34 insertions, 29 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 923d1250b9a9..c50931352b23 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5073,14 +5073,6 @@ void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5073} 5073}
5074EXPORT_SYMBOL(md_set_array_sectors); 5074EXPORT_SYMBOL(md_set_array_sectors);
5075 5075
5076void md_set_array_sectors_lock(mddev_t *mddev, sector_t array_sectors)
5077{
5078 mddev_lock(mddev);
5079 md_set_array_sectors(mddev, array_sectors);
5080 mddev_unlock(mddev);
5081}
5082EXPORT_SYMBOL(md_set_array_sectors_lock);
5083
5084static int update_size(mddev_t *mddev, sector_t num_sectors) 5076static int update_size(mddev_t *mddev, sector_t num_sectors)
5085{ 5077{
5086 mdk_rdev_t *rdev; 5078 mdk_rdev_t *rdev;
@@ -6641,6 +6633,9 @@ void md_check_recovery(mddev_t *mddev)
6641 sysfs_notify(&mddev->kobj, NULL, 6633 sysfs_notify(&mddev->kobj, NULL,
6642 "degraded"); 6634 "degraded");
6643 } 6635 }
6636 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6637 mddev->pers->finish_reshape)
6638 mddev->pers->finish_reshape(mddev);
6644 md_update_sb(mddev, 1); 6639 md_update_sb(mddev, 1);
6645 6640
6646 /* if array is no-longer degraded, then any saved_raid_disk 6641 /* if array is no-longer degraded, then any saved_raid_disk
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d13e34f842e2..e9b7f54c24d6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -317,6 +317,7 @@ struct mdk_personality
317 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks); 317 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
318 int (*check_reshape) (mddev_t *mddev); 318 int (*check_reshape) (mddev_t *mddev);
319 int (*start_reshape) (mddev_t *mddev); 319 int (*start_reshape) (mddev_t *mddev);
320 void (*finish_reshape) (mddev_t *mddev);
320 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); 321 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
321 /* quiesce moves between quiescence states 322 /* quiesce moves between quiescence states
322 * 0 - fully active 323 * 0 - fully active
@@ -433,4 +434,3 @@ extern void md_new_event(mddev_t *mddev);
433extern int md_allow_write(mddev_t *mddev); 434extern int md_allow_write(mddev_t *mddev);
434extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 435extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
435extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 436extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
436extern void md_set_array_sectors_lock(mddev_t *mddev, sector_t array_sectors);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5694eb8941b6..a0f22dd33234 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3850,6 +3850,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3850 if (sector_nr >= max_sector) { 3850 if (sector_nr >= max_sector) {
3851 /* just being told to finish up .. nothing much to do */ 3851 /* just being told to finish up .. nothing much to do */
3852 unplug_slaves(mddev); 3852 unplug_slaves(mddev);
3853
3853 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3854 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3854 end_reshape(conf); 3855 end_reshape(conf);
3855 return 0; 3856 return 0;
@@ -4836,43 +4837,49 @@ static int raid5_start_reshape(mddev_t *mddev)
4836 4837
4837static void end_reshape(raid5_conf_t *conf) 4838static void end_reshape(raid5_conf_t *conf)
4838{ 4839{
4839 struct block_device *bdev;
4840 4840
4841 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4841 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
4842 mddev_t *mddev = conf->mddev;
4843
4844 md_set_array_sectors_lock(mddev, raid5_size(mddev, 0,
4845 conf->raid_disks));
4846 set_capacity(mddev->gendisk, mddev->array_sectors);
4847 mddev->changed = 1;
4848 conf->previous_raid_disks = conf->raid_disks;
4849 4842
4850 bdev = bdget_disk(conf->mddev->gendisk, 0);
4851 if (bdev) {
4852 mutex_lock(&bdev->bd_inode->i_mutex);
4853 i_size_write(bdev->bd_inode,
4854 (loff_t)conf->mddev->array_sectors << 9);
4855 mutex_unlock(&bdev->bd_inode->i_mutex);
4856 bdput(bdev);
4857 }
4858 spin_lock_irq(&conf->device_lock); 4843 spin_lock_irq(&conf->device_lock);
4844 conf->previous_raid_disks = conf->raid_disks;
4859 conf->expand_progress = MaxSector; 4845 conf->expand_progress = MaxSector;
4860 spin_unlock_irq(&conf->device_lock); 4846 spin_unlock_irq(&conf->device_lock);
4861 conf->mddev->reshape_position = MaxSector;
4862 4847
4863 /* read-ahead size must cover two whole stripes, which is 4848 /* read-ahead size must cover two whole stripes, which is
4864 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4849 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4865 */ 4850 */
4866 { 4851 {
4867 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4852 int data_disks = conf->raid_disks - conf->max_degraded;
4868 int stripe = data_disks * 4853 int stripe = data_disks * (conf->chunk_size
4869 (conf->mddev->chunk_size / PAGE_SIZE); 4854 / PAGE_SIZE);
4870 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4855 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4871 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4856 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4872 } 4857 }
4873 } 4858 }
4874} 4859}
4875 4860
4861static void raid5_finish_reshape(mddev_t *mddev)
4862{
4863 struct block_device *bdev;
4864
4865 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4866
4867 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4868 set_capacity(mddev->gendisk, mddev->array_sectors);
4869 mddev->changed = 1;
4870 mddev->reshape_position = MaxSector;
4871
4872 bdev = bdget_disk(mddev->gendisk, 0);
4873 if (bdev) {
4874 mutex_lock(&bdev->bd_inode->i_mutex);
4875 i_size_write(bdev->bd_inode,
4876 (loff_t)mddev->array_sectors << 9);
4877 mutex_unlock(&bdev->bd_inode->i_mutex);
4878 bdput(bdev);
4879 }
4880 }
4881}
4882
4876static void raid5_quiesce(mddev_t *mddev, int state) 4883static void raid5_quiesce(mddev_t *mddev, int state)
4877{ 4884{
4878 raid5_conf_t *conf = mddev_to_conf(mddev); 4885 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -5098,6 +5105,7 @@ static struct mdk_personality raid6_personality =
5098#ifdef CONFIG_MD_RAID5_RESHAPE 5105#ifdef CONFIG_MD_RAID5_RESHAPE
5099 .check_reshape = raid5_check_reshape, 5106 .check_reshape = raid5_check_reshape,
5100 .start_reshape = raid5_start_reshape, 5107 .start_reshape = raid5_start_reshape,
5108 .finish_reshape = raid5_finish_reshape,
5101#endif 5109#endif
5102 .quiesce = raid5_quiesce, 5110 .quiesce = raid5_quiesce,
5103 .takeover = raid6_takeover, 5111 .takeover = raid6_takeover,
@@ -5121,6 +5129,7 @@ static struct mdk_personality raid5_personality =
5121#ifdef CONFIG_MD_RAID5_RESHAPE 5129#ifdef CONFIG_MD_RAID5_RESHAPE
5122 .check_reshape = raid5_check_reshape, 5130 .check_reshape = raid5_check_reshape,
5123 .start_reshape = raid5_start_reshape, 5131 .start_reshape = raid5_start_reshape,
5132 .finish_reshape = raid5_finish_reshape,
5124#endif 5133#endif
5125 .quiesce = raid5_quiesce, 5134 .quiesce = raid5_quiesce,
5126 .takeover = raid5_takeover, 5135 .takeover = raid5_takeover,
@@ -5146,6 +5155,7 @@ static struct mdk_personality raid4_personality =
5146#ifdef CONFIG_MD_RAID5_RESHAPE 5155#ifdef CONFIG_MD_RAID5_RESHAPE
5147 .check_reshape = raid5_check_reshape, 5156 .check_reshape = raid5_check_reshape,
5148 .start_reshape = raid5_start_reshape, 5157 .start_reshape = raid5_start_reshape,
5158 .finish_reshape = raid5_finish_reshape,
5149#endif 5159#endif
5150 .quiesce = raid5_quiesce, 5160 .quiesce = raid5_quiesce,
5151}; 5161};