diff options
-rw-r--r-- | drivers/md/md.c | 14 | ||||
-rw-r--r-- | drivers/md/raid5.c | 26 |
2 files changed, 40 insertions, 0 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 2a64cba9ea72..22c630b7ba6c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -3017,6 +3017,20 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
3017 | mddev->to_remove = &md_redundancy_group; | 3017 | mddev->to_remove = &md_redundancy_group; |
3018 | } | 3018 | } |
3019 | 3019 | ||
3020 | if (mddev->pers->sync_request == NULL && | ||
3021 | mddev->external) { | ||
3022 | /* We are converting from a no-redundancy array | ||
3023 | * to a redundancy array and metadata is managed | ||
3024 | * externally so we need to be sure that writes | ||
3025 | * won't block due to a need to transition | ||
3026 | * clean->dirty | ||
3027 | * until external management is started. | ||
3028 | */ | ||
3029 | mddev->in_sync = 0; | ||
3030 | mddev->safemode_delay = 0; | ||
3031 | mddev->safemode = 0; | ||
3032 | } | ||
3033 | |||
3020 | module_put(mddev->pers->owner); | 3034 | module_put(mddev->pers->owner); |
3021 | /* Invalidate devices that are now superfluous */ | 3035 | /* Invalidate devices that are now superfluous */ |
3022 | list_for_each_entry(rdev, &mddev->disks, same_set) | 3036 | list_for_each_entry(rdev, &mddev->disks, same_set) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 10af3715b1fc..bb28fd6b44fe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/cpu.h> | 52 | #include <linux/cpu.h> |
53 | #include "md.h" | 53 | #include "md.h" |
54 | #include "raid5.h" | 54 | #include "raid5.h" |
55 | #include "raid0.h" | ||
55 | #include "bitmap.h" | 56 | #include "bitmap.h" |
56 | 57 | ||
57 | /* | 58 | /* |
@@ -5619,6 +5620,21 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
5619 | } | 5620 | } |
5620 | 5621 | ||
5621 | 5622 | ||
5623 | static void *raid5_takeover_raid0(mddev_t *mddev) | ||
5624 | { | ||
5625 | |||
5626 | mddev->new_level = 5; | ||
5627 | mddev->new_layout = ALGORITHM_PARITY_N; | ||
5628 | mddev->new_chunk_sectors = mddev->chunk_sectors; | ||
5629 | mddev->raid_disks += 1; | ||
5630 | mddev->delta_disks = 1; | ||
5631 | /* make sure it will be not marked as dirty */ | ||
5632 | mddev->recovery_cp = MaxSector; | ||
5633 | |||
5634 | return setup_conf(mddev); | ||
5635 | } | ||
5636 | |||
5637 | |||
5622 | static void *raid5_takeover_raid1(mddev_t *mddev) | 5638 | static void *raid5_takeover_raid1(mddev_t *mddev) |
5623 | { | 5639 | { |
5624 | int chunksect; | 5640 | int chunksect; |
@@ -5748,6 +5764,16 @@ static void *raid5_takeover(mddev_t *mddev) | |||
5748 | * raid4 - trivial - just use a raid4 layout. | 5764 | * raid4 - trivial - just use a raid4 layout. |
5749 | * raid6 - Providing it is a *_6 layout | 5765 | * raid6 - Providing it is a *_6 layout |
5750 | */ | 5766 | */ |
5767 | if (mddev->level == 0) { | ||
5768 | /* for raid0 takeover only one zone is supported */ | ||
5769 | struct raid0_private_data *raid0_priv | ||
5770 | = mddev->private; | ||
5771 | if (raid0_priv->nr_strip_zones > 1) { | ||
5772 | printk(KERN_ERR "md: cannot takeover raid 0 with more than one zone.\n"); | ||
5773 | return ERR_PTR(-EINVAL); | ||
5774 | } | ||
5775 | return raid5_takeover_raid0(mddev); | ||
5776 | } | ||
5751 | 5777 | ||
5752 | if (mddev->level == 1) | 5778 | if (mddev->level == 1) |
5753 | return raid5_takeover_raid1(mddev); | 5779 | return raid5_takeover_raid1(mddev); |