diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-20 20:40:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-20 20:40:02 -0400 |
commit | d09571059b89cf1a079afe88ecb64386f591b061 (patch) | |
tree | 588f36e1bee031f1e4f7168e79ec42a00711e20b /drivers | |
parent | 73aa86825f45cf8efccf20128779416db7c278b2 (diff) | |
parent | 28a8397852011e323d16a1eecd4787d72b7b9a9e (diff) |
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md:
md: Update documentation for sync_min and sync_max entries
md: Cleanup after raid45->raid0 takeover
md: Fix dev_sectors on takeover from raid0 to raid4/5
md/raid5: remove setting of ->queue_lock
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/md.c | 1 | ||||
-rw-r--r-- | drivers/md/raid5.c | 5 |
2 files changed, 5 insertions, 1 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6e853c61d87e..7d6f7f18a920 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -3170,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
3170 | mddev->layout = mddev->new_layout; | 3170 | mddev->layout = mddev->new_layout; |
3171 | mddev->chunk_sectors = mddev->new_chunk_sectors; | 3171 | mddev->chunk_sectors = mddev->new_chunk_sectors; |
3172 | mddev->delta_disks = 0; | 3172 | mddev->delta_disks = 0; |
3173 | mddev->degraded = 0; | ||
3173 | if (mddev->pers->sync_request == NULL) { | 3174 | if (mddev->pers->sync_request == NULL) { |
3174 | /* this is now an array without redundancy, so | 3175 | /* this is now an array without redundancy, so |
3175 | * it must always be in_sync | 3176 | * it must always be in_sync |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f301e6ae220c..fd500112f13e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5151,7 +5151,6 @@ static int run(mddev_t *mddev) | |||
5151 | 5151 | ||
5152 | mddev->queue->backing_dev_info.congested_data = mddev; | 5152 | mddev->queue->backing_dev_info.congested_data = mddev; |
5153 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5153 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5154 | mddev->queue->queue_lock = &conf->device_lock; | ||
5155 | 5154 | ||
5156 | chunk_size = mddev->chunk_sectors << 9; | 5155 | chunk_size = mddev->chunk_sectors << 9; |
5157 | blk_queue_io_min(mddev->queue, chunk_size); | 5156 | blk_queue_io_min(mddev->queue, chunk_size); |
@@ -5679,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
5679 | static void *raid45_takeover_raid0(mddev_t *mddev, int level) | 5678 | static void *raid45_takeover_raid0(mddev_t *mddev, int level) |
5680 | { | 5679 | { |
5681 | struct raid0_private_data *raid0_priv = mddev->private; | 5680 | struct raid0_private_data *raid0_priv = mddev->private; |
5681 | unsigned long long sectors; | ||
5682 | 5682 | ||
5683 | /* for raid0 takeover only one zone is supported */ | 5683 | /* for raid0 takeover only one zone is supported */ |
5684 | if (raid0_priv->nr_strip_zones > 1) { | 5684 | if (raid0_priv->nr_strip_zones > 1) { |
@@ -5687,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level) | |||
5687 | return ERR_PTR(-EINVAL); | 5687 | return ERR_PTR(-EINVAL); |
5688 | } | 5688 | } |
5689 | 5689 | ||
5690 | sectors = raid0_priv->strip_zone[0].zone_end; | ||
5691 | sector_div(sectors, raid0_priv->strip_zone[0].nb_dev); | ||
5692 | mddev->dev_sectors = sectors; | ||
5690 | mddev->new_level = level; | 5693 | mddev->new_level = level; |
5691 | mddev->new_layout = ALGORITHM_PARITY_N; | 5694 | mddev->new_layout = ALGORITHM_PARITY_N; |
5692 | mddev->new_chunk_sectors = mddev->chunk_sectors; | 5695 | mddev->new_chunk_sectors = mddev->chunk_sectors; |