aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/raid5.c5
2 files changed, 5 insertions, 1 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6e853c61d87e..7d6f7f18a920 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3170,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3170 mddev->layout = mddev->new_layout; 3170 mddev->layout = mddev->new_layout;
3171 mddev->chunk_sectors = mddev->new_chunk_sectors; 3171 mddev->chunk_sectors = mddev->new_chunk_sectors;
3172 mddev->delta_disks = 0; 3172 mddev->delta_disks = 0;
3173 mddev->degraded = 0;
3173 if (mddev->pers->sync_request == NULL) { 3174 if (mddev->pers->sync_request == NULL) {
3174 /* this is now an array without redundancy, so 3175 /* this is now an array without redundancy, so
3175 * it must always be in_sync 3176 * it must always be in_sync
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f301e6ae220c..fd500112f13e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5151,7 +5151,6 @@ static int run(mddev_t *mddev)
5151 5151
5152 mddev->queue->backing_dev_info.congested_data = mddev; 5152 mddev->queue->backing_dev_info.congested_data = mddev;
5153 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5153 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5154 mddev->queue->queue_lock = &conf->device_lock;
5155 5154
5156 chunk_size = mddev->chunk_sectors << 9; 5155 chunk_size = mddev->chunk_sectors << 9;
5157 blk_queue_io_min(mddev->queue, chunk_size); 5156 blk_queue_io_min(mddev->queue, chunk_size);
@@ -5679,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
5679static void *raid45_takeover_raid0(mddev_t *mddev, int level) 5678static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5680{ 5679{
5681 struct raid0_private_data *raid0_priv = mddev->private; 5680 struct raid0_private_data *raid0_priv = mddev->private;
5681 unsigned long long sectors;
5682 5682
5683 /* for raid0 takeover only one zone is supported */ 5683 /* for raid0 takeover only one zone is supported */
5684 if (raid0_priv->nr_strip_zones > 1) { 5684 if (raid0_priv->nr_strip_zones > 1) {
@@ -5687,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5687 return ERR_PTR(-EINVAL); 5687 return ERR_PTR(-EINVAL);
5688 } 5688 }
5689 5689
5690 sectors = raid0_priv->strip_zone[0].zone_end;
5691 sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
5692 mddev->dev_sectors = sectors;
5690 mddev->new_level = level; 5693 mddev->new_level = level;
5691 mddev->new_layout = ALGORITHM_PARITY_N; 5694 mddev->new_layout = ALGORITHM_PARITY_N;
5692 mddev->new_chunk_sectors = mddev->chunk_sectors; 5695 mddev->new_chunk_sectors = mddev->chunk_sectors;