diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 14:01:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 14:01:00 -0500 |
commit | 775a2e29c3bbcf853432f47d3caa9ff8808807ad (patch) | |
tree | 614a7481b68dc9b0b628f392ab9bbdc53bbfe447 /drivers/md/dm-raid.c | |
parent | 2a4c32edd39b7de166e723b1991abcde4db3a701 (diff) | |
parent | ef548c551e72dbbdcc6d9ed7c7b3b01083fea8e2 (diff) |
Merge tag 'dm-4.10-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- various fixes and improvements to request-based DM and DM multipath
- some locking improvements in DM bufio
- add Kconfig option to disable the DM block manager's extra locking
which mainly serves as a developer tool
- a few bug fixes to DM's persistent-data
- a couple changes to prepare for multipage biovec support in the block
layer
- various improvements and cleanups in the DM core, DM cache, DM raid
and DM crypt
- add ability to have DM crypt use keys from the kernel key retention
service
- add a new "error_writes" feature to the DM flakey target, reads are
left unchanged in this mode
* tag 'dm-4.10-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (40 commits)
dm flakey: introduce "error_writes" feature
dm cache policy smq: use hash_32() instead of hash_32_generic()
dm crypt: reject key strings containing whitespace chars
dm space map: always set ev if sm_ll_mutate() succeeds
dm space map metadata: skip useless memcpy in metadata_ll_init_index()
dm space map metadata: fix 'struct sm_metadata' leak on failed create
Documentation: dm raid: define data_offset status field
dm raid: fix discard support regression
dm raid: don't allow "write behind" with raid4/5/6
dm mpath: use hw_handler_params if attached hw_handler is same as requested
dm crypt: add ability to use keys from the kernel key retention service
dm array: remove a dead assignment in populate_ablock_with_values()
dm ioctl: use offsetof() instead of open-coding it
dm rq: simplify use_blk_mq initialization
dm: use blk_set_queue_dying() in __dm_destroy()
dm bufio: drop the lock when doing GFP_NOIO allocation
dm bufio: don't take the lock in dm_bufio_shrink_count
dm bufio: avoid sleeping while holding the dm_bufio lock
dm table: simplify dm_table_determine_type()
dm table: an 'all_blk_mq' table must be loaded for a blk-mq DM device
...
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r-- | drivers/md/dm-raid.c | 82 |
1 files changed, 42 insertions, 40 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 953159d9a825..b8f978e551d7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -160,7 +160,6 @@ struct raid_dev { | |||
160 | CTR_FLAG_DAEMON_SLEEP | \ | 160 | CTR_FLAG_DAEMON_SLEEP | \ |
161 | CTR_FLAG_MIN_RECOVERY_RATE | \ | 161 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
162 | CTR_FLAG_MAX_RECOVERY_RATE | \ | 162 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
163 | CTR_FLAG_MAX_WRITE_BEHIND | \ | ||
164 | CTR_FLAG_STRIPE_CACHE | \ | 163 | CTR_FLAG_STRIPE_CACHE | \ |
165 | CTR_FLAG_REGION_SIZE | \ | 164 | CTR_FLAG_REGION_SIZE | \ |
166 | CTR_FLAG_DELTA_DISKS | \ | 165 | CTR_FLAG_DELTA_DISKS | \ |
@@ -171,7 +170,6 @@ struct raid_dev { | |||
171 | CTR_FLAG_DAEMON_SLEEP | \ | 170 | CTR_FLAG_DAEMON_SLEEP | \ |
172 | CTR_FLAG_MIN_RECOVERY_RATE | \ | 171 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
173 | CTR_FLAG_MAX_RECOVERY_RATE | \ | 172 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
174 | CTR_FLAG_MAX_WRITE_BEHIND | \ | ||
175 | CTR_FLAG_STRIPE_CACHE | \ | 173 | CTR_FLAG_STRIPE_CACHE | \ |
176 | CTR_FLAG_REGION_SIZE | \ | 174 | CTR_FLAG_REGION_SIZE | \ |
177 | CTR_FLAG_DELTA_DISKS | \ | 175 | CTR_FLAG_DELTA_DISKS | \ |
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) | |||
2050 | 2048 | ||
2051 | mddev->reshape_position = MaxSector; | 2049 | mddev->reshape_position = MaxSector; |
2052 | 2050 | ||
2051 | mddev->raid_disks = le32_to_cpu(sb->num_devices); | ||
2052 | mddev->level = le32_to_cpu(sb->level); | ||
2053 | mddev->layout = le32_to_cpu(sb->layout); | ||
2054 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); | ||
2055 | |||
2053 | /* | 2056 | /* |
2054 | * Reshaping is supported, e.g. reshape_position is valid | 2057 | * Reshaping is supported, e.g. reshape_position is valid |
2055 | * in superblock and superblock content is authoritative. | 2058 | * in superblock and superblock content is authoritative. |
2056 | */ | 2059 | */ |
2057 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { | 2060 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { |
2058 | /* Superblock is authoritative wrt given raid set layout! */ | 2061 | /* Superblock is authoritative wrt given raid set layout! */ |
2059 | mddev->raid_disks = le32_to_cpu(sb->num_devices); | ||
2060 | mddev->level = le32_to_cpu(sb->level); | ||
2061 | mddev->layout = le32_to_cpu(sb->layout); | ||
2062 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); | ||
2063 | mddev->new_level = le32_to_cpu(sb->new_level); | 2062 | mddev->new_level = le32_to_cpu(sb->new_level); |
2064 | mddev->new_layout = le32_to_cpu(sb->new_layout); | 2063 | mddev->new_layout = le32_to_cpu(sb->new_layout); |
2065 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); | 2064 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); |
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) | |||
2087 | /* | 2086 | /* |
2088 | * No takeover/reshaping, because we don't have the extended v1.9.0 metadata | 2087 | * No takeover/reshaping, because we don't have the extended v1.9.0 metadata |
2089 | */ | 2088 | */ |
2090 | if (le32_to_cpu(sb->level) != mddev->new_level) { | 2089 | struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout); |
2091 | DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); | 2090 | struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); |
2092 | return -EINVAL; | ||
2093 | } | ||
2094 | if (le32_to_cpu(sb->layout) != mddev->new_layout) { | ||
2095 | DMERR("Reshaping raid sets not yet supported. (raid layout change)"); | ||
2096 | DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); | ||
2097 | DMERR(" Old layout: %s w/ %d copies", | ||
2098 | raid10_md_layout_to_format(le32_to_cpu(sb->layout)), | ||
2099 | raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); | ||
2100 | DMERR(" New layout: %s w/ %d copies", | ||
2101 | raid10_md_layout_to_format(mddev->layout), | ||
2102 | raid10_md_layout_to_copies(mddev->layout)); | ||
2103 | return -EINVAL; | ||
2104 | } | ||
2105 | if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) { | ||
2106 | DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); | ||
2107 | return -EINVAL; | ||
2108 | } | ||
2109 | 2091 | ||
2110 | /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */ | 2092 | if (rs_takeover_requested(rs)) { |
2111 | if (!rt_is_raid1(rs->raid_type) && | 2093 | if (rt_cur && rt_new) |
2112 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { | 2094 | DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)", |
2113 | DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)", | 2095 | rt_cur->name, rt_new->name); |
2114 | sb->num_devices, mddev->raid_disks); | 2096 | else |
2097 | DMERR("Takeover raid sets not yet supported by metadata. (raid level change)"); | ||
2098 | return -EINVAL; | ||
2099 | } else if (rs_reshape_requested(rs)) { | ||
2100 | DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)"); | ||
2101 | if (mddev->layout != mddev->new_layout) { | ||
2102 | if (rt_cur && rt_new) | ||
2103 | DMERR(" current layout %s vs new layout %s", | ||
2104 | rt_cur->name, rt_new->name); | ||
2105 | else | ||
2106 | DMERR(" current layout 0x%X vs new layout 0x%X", | ||
2107 | le32_to_cpu(sb->layout), mddev->new_layout); | ||
2108 | } | ||
2109 | if (mddev->chunk_sectors != mddev->new_chunk_sectors) | ||
2110 | DMERR(" current stripe sectors %u vs new stripe sectors %u", | ||
2111 | mddev->chunk_sectors, mddev->new_chunk_sectors); | ||
2112 | if (rs->delta_disks) | ||
2113 | DMERR(" current %u disks vs new %u disks", | ||
2114 | mddev->raid_disks, mddev->raid_disks + rs->delta_disks); | ||
2115 | if (rs_is_raid10(rs)) { | ||
2116 | DMERR(" Old layout: %s w/ %u copies", | ||
2117 | raid10_md_layout_to_format(mddev->layout), | ||
2118 | raid10_md_layout_to_copies(mddev->layout)); | ||
2119 | DMERR(" New layout: %s w/ %u copies", | ||
2120 | raid10_md_layout_to_format(mddev->new_layout), | ||
2121 | raid10_md_layout_to_copies(mddev->new_layout)); | ||
2122 | } | ||
2115 | return -EINVAL; | 2123 | return -EINVAL; |
2116 | } | 2124 | } |
2117 | 2125 | ||
2118 | DMINFO("Discovered old metadata format; upgrading to extended metadata format"); | 2126 | DMINFO("Discovered old metadata format; upgrading to extended metadata format"); |
2119 | |||
2120 | /* Table line is checked vs. authoritative superblock */ | ||
2121 | rs_set_new(rs); | ||
2122 | } | 2127 | } |
2123 | 2128 | ||
2124 | if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) | 2129 | if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) |
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) | |||
2211 | continue; | 2216 | continue; |
2212 | 2217 | ||
2213 | if (role != r->raid_disk) { | 2218 | if (role != r->raid_disk) { |
2214 | if (__is_raid10_near(mddev->layout)) { | 2219 | if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { |
2215 | if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || | 2220 | if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || |
2216 | rs->raid_disks % rs->raid10_copies) { | 2221 | rs->raid_disks % rs->raid10_copies) { |
2217 | rs->ti->error = | 2222 | rs->ti->error = |
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2994 | } | 2999 | } |
2995 | } | 3000 | } |
2996 | 3001 | ||
3002 | /* Disable/enable discard support on raid set. */ | ||
3003 | configure_discard_support(rs); | ||
3004 | |||
2997 | mddev_unlock(&rs->md); | 3005 | mddev_unlock(&rs->md); |
2998 | return 0; | 3006 | return 0; |
2999 | 3007 | ||
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti) | |||
3580 | if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) | 3588 | if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) |
3581 | rs_update_sbs(rs); | 3589 | rs_update_sbs(rs); |
3582 | 3590 | ||
3583 | /* | ||
3584 | * Disable/enable discard support on raid set after any | ||
3585 | * conversion, because devices can have been added | ||
3586 | */ | ||
3587 | configure_discard_support(rs); | ||
3588 | |||
3589 | /* Load the bitmap from disk unless raid0 */ | 3591 | /* Load the bitmap from disk unless raid0 */ |
3590 | r = __load_dirty_region_bitmap(rs); | 3592 | r = __load_dirty_region_bitmap(rs); |
3591 | if (r) | 3593 | if (r) |