diff options
author | Heinz Mauelshagen <heinzm@redhat.com> | 2016-05-19 12:49:26 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-06-13 14:40:24 -0400 |
commit | 702108d194e3649f69afcd2661282a0157c71e54 (patch) | |
tree | 56375c6cf68542bced7ae7086fc0326c76f74ba1 /drivers/md/dm-raid.c | |
parent | 92c83d79b07ec1c53e0c74b8a7988799e00856db (diff) |
dm raid: cleanup / provide infrastructure
Provide necessary infrastructure to handle ctr flags and their names
and cleanup setting ti->error:
- comment constructor flags
- introduce constructor flag manipulation
- introduce ti_error_*() functions to simplify
setting the error message (use in other targets?)
- introduce array to hold ctr flag <-> flag name mapping
- introduce argument name by flag functions for that array
- use those functions throughout the ctr call path
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r-- | drivers/md/dm-raid.c | 424 |
1 files changed, 228 insertions, 196 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 01aa511ebe44..ab7aa7d83364 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010-2011 Neil Brown | 2 | * Copyright (C) 2010-2011 Neil Brown |
3 | * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
@@ -47,18 +47,22 @@ struct raid_dev { | |||
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Flags for rs->ctr_flags field. | 49 | * Flags for rs->ctr_flags field. |
50 | * | ||
51 | * 1 = no flag value | ||
52 | * 2 = flag with value | ||
50 | */ | 53 | */ |
51 | #define CTR_FLAG_SYNC 0x1 | 54 | #define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */ |
52 | #define CTR_FLAG_NOSYNC 0x2 | 55 | #define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */ |
53 | #define CTR_FLAG_REBUILD 0x4 | 56 | #define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */ |
54 | #define CTR_FLAG_DAEMON_SLEEP 0x8 | 57 | #define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */ |
55 | #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 | 58 | #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */ |
56 | #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 | 59 | #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */ |
57 | #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 | 60 | #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */ |
58 | #define CTR_FLAG_STRIPE_CACHE 0x80 | 61 | #define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */ |
59 | #define CTR_FLAG_REGION_SIZE 0x100 | 62 | #define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */ |
60 | #define CTR_FLAG_RAID10_COPIES 0x200 | 63 | #define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */ |
61 | #define CTR_FLAG_RAID10_FORMAT 0x400 | 64 | #define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */ |
65 | #define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */ | ||
62 | 66 | ||
63 | struct raid_set { | 67 | struct raid_set { |
64 | struct dm_target *ti; | 68 | struct dm_target *ti; |
@@ -101,6 +105,83 @@ static bool _in_range(long v, long min, long max) | |||
101 | return v >= min && v <= max; | 105 | return v >= min && v <= max; |
102 | } | 106 | } |
103 | 107 | ||
108 | /* ctr flag bit manipulation... */ | ||
109 | /* Set single @flag in @flags */ | ||
110 | static void _set_flag(uint32_t flag, uint32_t *flags) | ||
111 | { | ||
112 | WARN_ON_ONCE(hweight32(flag) != 1); | ||
113 | *flags |= flag; | ||
114 | } | ||
115 | |||
116 | /* Test single @flag in @flags */ | ||
117 | static bool _test_flag(uint32_t flag, uint32_t flags) | ||
118 | { | ||
119 | WARN_ON_ONCE(hweight32(flag) != 1); | ||
120 | return (flag & flags) ? true : false; | ||
121 | } | ||
122 | |||
123 | /* Return true if single @flag is set in @*flags, else set it and return false */ | ||
124 | static bool _test_and_set_flag(uint32_t flag, uint32_t *flags) | ||
125 | { | ||
126 | if (_test_flag(flag, *flags)) | ||
127 | return true; | ||
128 | |||
129 | _set_flag(flag, flags); | ||
130 | return false; | ||
131 | } | ||
132 | /* ...ctr and runtime flag bit manipulation */ | ||
133 | |||
134 | /* All table line arguments are defined here */ | ||
135 | static struct arg_name_flag { | ||
136 | const uint32_t flag; | ||
137 | const char *name; | ||
138 | } _arg_name_flags[] = { | ||
139 | { CTR_FLAG_SYNC, "sync"}, | ||
140 | { CTR_FLAG_NOSYNC, "nosync"}, | ||
141 | { CTR_FLAG_REBUILD, "rebuild"}, | ||
142 | { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, | ||
143 | { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, | ||
144 | { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, | ||
145 | { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, | ||
146 | { CTR_FLAG_WRITE_MOSTLY, "writemostly"}, | ||
147 | { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, | ||
148 | { CTR_FLAG_REGION_SIZE, "region_size"}, | ||
149 | { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, | ||
150 | { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, | ||
151 | }; | ||
152 | |||
153 | /* Return argument name string for given @flag */ | ||
154 | static const char *_argname_by_flag(const uint32_t flag) | ||
155 | { | ||
156 | if (hweight32(flag) == 1) { | ||
157 | struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags); | ||
158 | |||
159 | while (anf-- > _arg_name_flags) | ||
160 | if (_test_flag(flag, anf->flag)) | ||
161 | return anf->name; | ||
162 | |||
163 | } else | ||
164 | DMERR("%s called with more than one flag!", __func__); | ||
165 | |||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Convenience functions to set ti->error to @errmsg and | ||
171 | * return @r in order to shorten code in a lot of places | ||
172 | */ | ||
173 | static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r) | ||
174 | { | ||
175 | ti->error = (char *) errmsg; | ||
176 | return r; | ||
177 | } | ||
178 | |||
179 | static int ti_error_einval(struct dm_target *ti, const char *errmsg) | ||
180 | { | ||
181 | return ti_error_ret(ti, errmsg, -EINVAL); | ||
182 | } | ||
183 | /* END: convenience functions to set ti->error to @errmsg... */ | ||
184 | |||
104 | static char *raid10_md_layout_to_format(int layout) | 185 | static char *raid10_md_layout_to_format(int layout) |
105 | { | 186 | { |
106 | /* | 187 | /* |
@@ -157,16 +238,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra | |||
157 | unsigned i; | 238 | unsigned i; |
158 | struct raid_set *rs; | 239 | struct raid_set *rs; |
159 | 240 | ||
160 | if (raid_devs <= raid_type->parity_devs) { | 241 | if (raid_devs <= raid_type->parity_devs) |
161 | ti->error = "Insufficient number of devices"; | 242 | return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices")); |
162 | return ERR_PTR(-EINVAL); | ||
163 | } | ||
164 | 243 | ||
165 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); | 244 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
166 | if (!rs) { | 245 | if (!rs) |
167 | ti->error = "Cannot allocate raid context"; | 246 | return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM)); |
168 | return ERR_PTR(-ENOMEM); | ||
169 | } | ||
170 | 247 | ||
171 | mddev_init(&rs->md); | 248 | mddev_init(&rs->md); |
172 | 249 | ||
@@ -226,7 +303,7 @@ static void context_free(struct raid_set *rs) | |||
226 | * This code parses those words. If there is a failure, | 303 | * This code parses those words. If there is a failure, |
227 | * the caller must use context_free to unwind the operations. | 304 | * the caller must use context_free to unwind the operations. |
228 | */ | 305 | */ |
229 | static int parse_dev_parms(struct raid_set *rs, struct dm_arg_set *as) | 306 | static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) |
230 | { | 307 | { |
231 | int i; | 308 | int i; |
232 | int rebuild = 0; | 309 | int rebuild = 0; |
@@ -260,13 +337,12 @@ static int parse_dev_parms(struct raid_set *rs, struct dm_arg_set *as) | |||
260 | r = dm_get_device(rs->ti, arg, | 337 | r = dm_get_device(rs->ti, arg, |
261 | dm_table_get_mode(rs->ti->table), | 338 | dm_table_get_mode(rs->ti->table), |
262 | &rs->dev[i].meta_dev); | 339 | &rs->dev[i].meta_dev); |
263 | rs->ti->error = "RAID metadata device lookup failure"; | ||
264 | if (r) | 340 | if (r) |
265 | return r; | 341 | return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r); |
266 | 342 | ||
267 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); | 343 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); |
268 | if (!rs->dev[i].rdev.sb_page) | 344 | if (!rs->dev[i].rdev.sb_page) |
269 | return -ENOMEM; | 345 | return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM); |
270 | } | 346 | } |
271 | 347 | ||
272 | arg = dm_shift_arg(as); | 348 | arg = dm_shift_arg(as); |
@@ -275,14 +351,11 @@ static int parse_dev_parms(struct raid_set *rs, struct dm_arg_set *as) | |||
275 | 351 | ||
276 | if (!strcmp(arg, "-")) { | 352 | if (!strcmp(arg, "-")) { |
277 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && | 353 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && |
278 | (!rs->dev[i].rdev.recovery_offset)) { | 354 | (!rs->dev[i].rdev.recovery_offset)) |
279 | rs->ti->error = "Drive designated for rebuild not specified"; | 355 | return ti_error_einval(rs->ti, "Drive designated for rebuild not specified"); |
280 | return -EINVAL; | ||
281 | } | ||
282 | 356 | ||
283 | rs->ti->error = "No data device supplied with metadata device"; | ||
284 | if (rs->dev[i].meta_dev) | 357 | if (rs->dev[i].meta_dev) |
285 | return -EINVAL; | 358 | return ti_error_einval(rs->ti, "No data device supplied with metadata device"); |
286 | 359 | ||
287 | continue; | 360 | continue; |
288 | } | 361 | } |
@@ -290,10 +363,8 @@ static int parse_dev_parms(struct raid_set *rs, struct dm_arg_set *as) | |||
290 | r = dm_get_device(rs->ti, arg, | 363 | r = dm_get_device(rs->ti, arg, |
291 | dm_table_get_mode(rs->ti->table), | 364 | dm_table_get_mode(rs->ti->table), |
292 | &rs->dev[i].data_dev); | 365 | &rs->dev[i].data_dev); |
293 | if (r) { | 366 | if (r) |
294 | rs->ti->error = "RAID device lookup failure"; | 367 | return ti_error_ret(rs->ti, "RAID device lookup failure", r); |
295 | return r; | ||
296 | } | ||
297 | 368 | ||
298 | if (rs->dev[i].meta_dev) { | 369 | if (rs->dev[i].meta_dev) { |
299 | metadata_available = 1; | 370 | metadata_available = 1; |
@@ -322,8 +393,7 @@ static int parse_dev_parms(struct raid_set *rs, struct dm_arg_set *as) | |||
322 | * User could specify 'nosync' option if desperate. | 393 | * User could specify 'nosync' option if desperate. |
323 | */ | 394 | */ |
324 | DMERR("Unable to rebuild drive while array is not in-sync"); | 395 | DMERR("Unable to rebuild drive while array is not in-sync"); |
325 | rs->ti->error = "RAID device lookup failure"; | 396 | return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync"); |
326 | return -EINVAL; | ||
327 | } | 397 | } |
328 | 398 | ||
329 | return 0; | 399 | return 0; |
@@ -360,27 +430,20 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
360 | /* | 430 | /* |
361 | * Validate user-supplied value. | 431 | * Validate user-supplied value. |
362 | */ | 432 | */ |
363 | if (region_size > rs->ti->len) { | 433 | if (region_size > rs->ti->len) |
364 | rs->ti->error = "Supplied region size is too large"; | 434 | return ti_error_einval(rs->ti, "Supplied region size is too large"); |
365 | return -EINVAL; | ||
366 | } | ||
367 | 435 | ||
368 | if (region_size < min_region_size) { | 436 | if (region_size < min_region_size) { |
369 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", | 437 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", |
370 | region_size, min_region_size); | 438 | region_size, min_region_size); |
371 | rs->ti->error = "Supplied region size is too small"; | 439 | return ti_error_einval(rs->ti, "Supplied region size is too small"); |
372 | return -EINVAL; | ||
373 | } | 440 | } |
374 | 441 | ||
375 | if (!is_power_of_2(region_size)) { | 442 | if (!is_power_of_2(region_size)) |
376 | rs->ti->error = "Region size is not a power of 2"; | 443 | return ti_error_einval(rs->ti, "Region size is not a power of 2"); |
377 | return -EINVAL; | ||
378 | } | ||
379 | 444 | ||
380 | if (region_size < rs->md.chunk_sectors) { | 445 | if (region_size < rs->md.chunk_sectors) |
381 | rs->ti->error = "Region size is smaller than the chunk size"; | 446 | return ti_error_einval(rs->ti, "Region size is smaller than the chunk size"); |
382 | return -EINVAL; | ||
383 | } | ||
384 | } | 447 | } |
385 | 448 | ||
386 | /* | 449 | /* |
@@ -522,14 +585,13 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | |||
522 | sector_t sectors_per_dev = rs->ti->len; | 585 | sector_t sectors_per_dev = rs->ti->len; |
523 | sector_t max_io_len; | 586 | sector_t max_io_len; |
524 | const char *arg, *key; | 587 | const char *arg, *key; |
588 | struct raid_dev *rd; | ||
525 | 589 | ||
526 | arg = dm_shift_arg(as); | 590 | arg = dm_shift_arg(as); |
527 | num_raid_params--; /* Account for chunk_size argument */ | 591 | num_raid_params--; /* Account for chunk_size argument */ |
528 | 592 | ||
529 | if (kstrtouint(arg, 10, &value) < 0) { | 593 | if (kstrtouint(arg, 10, &value) < 0) |
530 | rs->ti->error = "Bad numerical argument given for chunk_size"; | 594 | return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size"); |
531 | return -EINVAL; | ||
532 | } | ||
533 | 595 | ||
534 | /* | 596 | /* |
535 | * First, parse the in-order required arguments | 597 | * First, parse the in-order required arguments |
@@ -539,13 +601,10 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | |||
539 | if (value) | 601 | if (value) |
540 | DMERR("Ignoring chunk size parameter for RAID 1"); | 602 | DMERR("Ignoring chunk size parameter for RAID 1"); |
541 | value = 0; | 603 | value = 0; |
542 | } else if (!is_power_of_2(value)) { | 604 | } else if (!is_power_of_2(value)) |
543 | rs->ti->error = "Chunk size must be a power of 2"; | 605 | return ti_error_einval(rs->ti, "Chunk size must be a power of 2"); |
544 | return -EINVAL; | 606 | else if (value < 8) |
545 | } else if (value < 8) { | 607 | return ti_error_einval(rs->ti, "Chunk size value is too small"); |
546 | rs->ti->error = "Chunk size value is too small"; | ||
547 | return -EINVAL; | ||
548 | } | ||
549 | 608 | ||
550 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; | 609 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; |
551 | 610 | ||
@@ -576,144 +635,134 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | |||
576 | */ | 635 | */ |
577 | for (i = 0; i < num_raid_params; i++) { | 636 | for (i = 0; i < num_raid_params; i++) { |
578 | arg = dm_shift_arg(as); | 637 | arg = dm_shift_arg(as); |
579 | if (!arg) { | 638 | if (!arg) |
580 | rs->ti->error = "Not enough raid parameters given"; | 639 | return ti_error_einval(rs->ti, "Not enough raid parameters given"); |
581 | return -EINVAL; | ||
582 | } | ||
583 | 640 | ||
584 | if (!strcasecmp(arg, "nosync")) { | 641 | if (!strcasecmp(arg, "nosync")) { |
585 | rs->md.recovery_cp = MaxSector; | 642 | rs->md.recovery_cp = MaxSector; |
586 | rs->ctr_flags |= CTR_FLAG_NOSYNC; | 643 | _set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags); |
587 | continue; | 644 | continue; |
588 | } | 645 | } |
589 | if (!strcasecmp(arg, "sync")) { | 646 | if (!strcasecmp(arg, "sync")) { |
590 | rs->md.recovery_cp = 0; | 647 | rs->md.recovery_cp = 0; |
591 | rs->ctr_flags |= CTR_FLAG_SYNC; | 648 | _set_flag(CTR_FLAG_SYNC, &rs->ctr_flags); |
592 | continue; | 649 | continue; |
593 | } | 650 | } |
594 | 651 | ||
595 | /* The rest of the optional arguments come in key/value pairs */ | ||
596 | if ((i + 1) >= num_raid_params) { | ||
597 | rs->ti->error = "Wrong number of raid parameters given"; | ||
598 | return -EINVAL; | ||
599 | } | ||
600 | |||
601 | key = arg; | 652 | key = arg; |
602 | arg = dm_shift_arg(as); | 653 | arg = dm_shift_arg(as); |
603 | i++; /* Account for the argument pairs */ | 654 | i++; /* Account for the argument pairs */ |
655 | if (!arg) | ||
656 | return ti_error_einval(rs->ti, "Wrong number of raid parameters given"); | ||
604 | 657 | ||
605 | /* Parameters that take a string value are checked here. */ | 658 | /* |
606 | if (!strcasecmp(key, "raid10_format")) { | 659 | * Parameters that take a string value are checked here. |
607 | if (rs->raid_type->level != 10) { | 660 | */ |
608 | rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; | 661 | |
609 | return -EINVAL; | 662 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) { |
610 | } | 663 | if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) |
664 | return ti_error_einval(rs->ti, "Only one raid10_format argument pair allowed"); | ||
665 | if (rs->raid_type->level != 10) | ||
666 | return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type"); | ||
611 | if (strcmp("near", arg) && | 667 | if (strcmp("near", arg) && |
612 | strcmp("far", arg) && | 668 | strcmp("far", arg) && |
613 | strcmp("offset", arg)) { | 669 | strcmp("offset", arg)) |
614 | rs->ti->error = "Invalid 'raid10_format' value given"; | 670 | return ti_error_einval(rs->ti, "Invalid 'raid10_format' value given"); |
615 | return -EINVAL; | 671 | |
616 | } | ||
617 | raid10_format = (char *) arg; | 672 | raid10_format = (char *) arg; |
618 | rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT; | ||
619 | continue; | 673 | continue; |
620 | } | 674 | } |
621 | 675 | ||
622 | if (kstrtouint(arg, 10, &value) < 0) { | 676 | if (kstrtouint(arg, 10, &value) < 0) |
623 | rs->ti->error = "Bad numerical argument given in raid params"; | 677 | return ti_error_einval(rs->ti, "Bad numerical argument given in raid params"); |
624 | return -EINVAL; | 678 | |
625 | } | 679 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) { |
680 | /* | ||
681 | * "rebuild" is being passed in by userspace to provide | ||
682 | * indexes of replaced devices and to set up additional | ||
683 | * devices on raid level takeover. | ||
684 | */ | ||
685 | if (!_in_range(value, 0, rs->md.raid_disks - 1)) | ||
686 | return ti_error_einval(rs->ti, "Invalid rebuild index given"); | ||
687 | |||
688 | rd = rs->dev + value; | ||
689 | clear_bit(In_sync, &rd->rdev.flags); | ||
690 | clear_bit(Faulty, &rd->rdev.flags); | ||
691 | rd->rdev.recovery_offset = 0; | ||
692 | _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags); | ||
693 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) { | ||
694 | if (rs->raid_type->level != 1) | ||
695 | return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1"); | ||
696 | |||
697 | if (!_in_range(value, 0, rs->md.raid_disks - 1)) | ||
698 | return ti_error_einval(rs->ti, "Invalid write_mostly index given"); | ||
626 | 699 | ||
627 | /* Parameters that take a numeric value are checked here */ | ||
628 | if (!strcasecmp(key, "rebuild")) { | ||
629 | if (value >= rs->md.raid_disks) { | ||
630 | rs->ti->error = "Invalid rebuild index given"; | ||
631 | return -EINVAL; | ||
632 | } | ||
633 | clear_bit(In_sync, &rs->dev[value].rdev.flags); | ||
634 | rs->dev[value].rdev.recovery_offset = 0; | ||
635 | rs->ctr_flags |= CTR_FLAG_REBUILD; | ||
636 | } else if (!strcasecmp(key, "write_mostly")) { | ||
637 | if (rs->raid_type->level != 1) { | ||
638 | rs->ti->error = "write_mostly option is only valid for RAID1"; | ||
639 | return -EINVAL; | ||
640 | } | ||
641 | if (value >= rs->md.raid_disks) { | ||
642 | rs->ti->error = "Invalid write_mostly drive index given"; | ||
643 | return -EINVAL; | ||
644 | } | ||
645 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); | 700 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); |
646 | } else if (!strcasecmp(key, "max_write_behind")) { | 701 | _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); |
647 | if (rs->raid_type->level != 1) { | 702 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { |
648 | rs->ti->error = "max_write_behind option is only valid for RAID1"; | 703 | if (rs->raid_type->level != 1) |
649 | return -EINVAL; | 704 | return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1"); |
650 | } | 705 | |
651 | rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND; | 706 | if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) |
707 | return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed"); | ||
652 | 708 | ||
653 | /* | 709 | /* |
654 | * In device-mapper, we specify things in sectors, but | 710 | * In device-mapper, we specify things in sectors, but |
655 | * MD records this value in kB | 711 | * MD records this value in kB |
656 | */ | 712 | */ |
657 | value /= 2; | 713 | value /= 2; |
658 | if (value > COUNTER_MAX) { | 714 | if (value > COUNTER_MAX) |
659 | rs->ti->error = "Max write-behind limit out of range"; | 715 | return ti_error_einval(rs->ti, "Max write-behind limit out of range"); |
660 | return -EINVAL; | 716 | |
661 | } | ||
662 | rs->md.bitmap_info.max_write_behind = value; | 717 | rs->md.bitmap_info.max_write_behind = value; |
663 | } else if (!strcasecmp(key, "daemon_sleep")) { | 718 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) { |
664 | rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP; | 719 | if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) |
665 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { | 720 | return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed"); |
666 | rs->ti->error = "daemon sleep period out of range"; | 721 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) |
667 | return -EINVAL; | 722 | return ti_error_einval(rs->ti, "daemon sleep period out of range"); |
668 | } | ||
669 | rs->md.bitmap_info.daemon_sleep = value; | 723 | rs->md.bitmap_info.daemon_sleep = value; |
670 | } else if (!strcasecmp(key, "stripe_cache")) { | 724 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) { |
671 | rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE; | 725 | if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) |
672 | 726 | return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed"); | |
673 | /* | 727 | /* |
674 | * In device-mapper, we specify things in sectors, but | 728 | * In device-mapper, we specify things in sectors, but |
675 | * MD records this value in kB | 729 | * MD records this value in kB |
676 | */ | 730 | */ |
677 | value /= 2; | 731 | value /= 2; |
678 | 732 | ||
679 | if ((rs->raid_type->level != 5) && | 733 | if (!_in_range(rs->raid_type->level, 4, 6)) |
680 | (rs->raid_type->level != 6)) { | 734 | return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache"); |
681 | rs->ti->error = "Inappropriate argument: stripe_cache"; | 735 | if (raid5_set_cache_size(&rs->md, (int)value)) |
682 | return -EINVAL; | 736 | return ti_error_einval(rs->ti, "Bad stripe_cache size"); |
683 | } | 737 | |
684 | if (raid5_set_cache_size(&rs->md, (int)value)) { | 738 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { |
685 | rs->ti->error = "Bad stripe_cache size"; | 739 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) |
686 | return -EINVAL; | 740 | return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed"); |
687 | } | 741 | if (value > INT_MAX) |
688 | } else if (!strcasecmp(key, "min_recovery_rate")) { | 742 | return ti_error_einval(rs->ti, "min_recovery_rate out of range"); |
689 | rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE; | ||
690 | if (value > INT_MAX) { | ||
691 | rs->ti->error = "min_recovery_rate out of range"; | ||
692 | return -EINVAL; | ||
693 | } | ||
694 | rs->md.sync_speed_min = (int)value; | 743 | rs->md.sync_speed_min = (int)value; |
695 | } else if (!strcasecmp(key, "max_recovery_rate")) { | 744 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { |
696 | rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE; | 745 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) |
697 | if (value > INT_MAX) { | 746 | return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed"); |
698 | rs->ti->error = "max_recovery_rate out of range"; | 747 | if (value > INT_MAX) |
699 | return -EINVAL; | 748 | return ti_error_einval(rs->ti, "max_recovery_rate out of range"); |
700 | } | ||
701 | rs->md.sync_speed_max = (int)value; | 749 | rs->md.sync_speed_max = (int)value; |
702 | } else if (!strcasecmp(key, "region_size")) { | 750 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) { |
703 | rs->ctr_flags |= CTR_FLAG_REGION_SIZE; | 751 | if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) |
752 | return ti_error_einval(rs->ti, "Only one region_size argument pair allowed"); | ||
753 | |||
704 | region_size = value; | 754 | region_size = value; |
705 | } else if (!strcasecmp(key, "raid10_copies") && | 755 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) { |
706 | (rs->raid_type->level == 10)) { | 756 | if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) |
707 | if ((value < 2) || (value > 0xFF)) { | 757 | return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed"); |
708 | rs->ti->error = "Bad value for 'raid10_copies'"; | 758 | |
709 | return -EINVAL; | 759 | if (!_in_range(value, 2, rs->md.raid_disks)) |
710 | } | 760 | return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'"); |
711 | rs->ctr_flags |= CTR_FLAG_RAID10_COPIES; | 761 | |
712 | raid10_copies = value; | 762 | raid10_copies = value; |
713 | } else { | 763 | } else { |
714 | DMERR("Unable to parse RAID parameter: %s", key); | 764 | DMERR("Unable to parse RAID parameter: %s", key); |
715 | rs->ti->error = "Unable to parse RAID parameters"; | 765 | return ti_error_einval(rs->ti, "Unable to parse RAID parameters"); |
716 | return -EINVAL; | ||
717 | } | 766 | } |
718 | } | 767 | } |
719 | 768 | ||
@@ -729,19 +778,15 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | |||
729 | return -EINVAL; | 778 | return -EINVAL; |
730 | 779 | ||
731 | if (rs->raid_type->level == 10) { | 780 | if (rs->raid_type->level == 10) { |
732 | if (raid10_copies > rs->md.raid_disks) { | 781 | if (raid10_copies > rs->md.raid_disks) |
733 | rs->ti->error = "Not enough devices to satisfy specification"; | 782 | return ti_error_einval(rs->ti, "Not enough devices to satisfy specification"); |
734 | return -EINVAL; | ||
735 | } | ||
736 | 783 | ||
737 | /* | 784 | /* |
738 | * If the format is not "near", we only support | 785 | * If the format is not "near", we only support |
739 | * two copies at the moment. | 786 | * two copies at the moment. |
740 | */ | 787 | */ |
741 | if (strcmp("near", raid10_format) && (raid10_copies > 2)) { | 788 | if (strcmp("near", raid10_format) && (raid10_copies > 2)) |
742 | rs->ti->error = "Too many copies for given RAID10 format."; | 789 | return ti_error_einval(rs->ti, "Too many copies for given RAID10 format."); |
743 | return -EINVAL; | ||
744 | } | ||
745 | 790 | ||
746 | /* (Len * #mirrors) / #devices */ | 791 | /* (Len * #mirrors) / #devices */ |
747 | sectors_per_dev = rs->ti->len * raid10_copies; | 792 | sectors_per_dev = rs->ti->len * raid10_copies; |
@@ -752,10 +797,9 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, | |||
752 | rs->md.new_layout = rs->md.layout; | 797 | rs->md.new_layout = rs->md.layout; |
753 | } else if ((!rs->raid_type->level || rs->raid_type->level > 1) && | 798 | } else if ((!rs->raid_type->level || rs->raid_type->level > 1) && |
754 | sector_div(sectors_per_dev, | 799 | sector_div(sectors_per_dev, |
755 | (rs->md.raid_disks - rs->raid_type->parity_devs))) { | 800 | (rs->md.raid_disks - rs->raid_type->parity_devs))) |
756 | rs->ti->error = "Target length not divisible by number of data devices"; | 801 | return ti_error_einval(rs->ti, "Target length not divisible by number of data devices"); |
757 | return -EINVAL; | 802 | |
758 | } | ||
759 | rs->md.dev_sectors = sectors_per_dev; | 803 | rs->md.dev_sectors = sectors_per_dev; |
760 | 804 | ||
761 | /* Assume there are no metadata devices until the drives are parsed */ | 805 | /* Assume there are no metadata devices until the drives are parsed */ |
@@ -1035,11 +1079,9 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) | |||
1035 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { | 1079 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { |
1036 | role = le32_to_cpu(sb2->array_position); | 1080 | role = le32_to_cpu(sb2->array_position); |
1037 | if (role != r->raid_disk) { | 1081 | if (role != r->raid_disk) { |
1038 | if (rs->raid_type->level != 1) { | 1082 | if (rs->raid_type->level != 1) |
1039 | rs->ti->error = "Cannot change device " | 1083 | return ti_error_einval(rs->ti, "Cannot change device " |
1040 | "positions in RAID array"; | 1084 | "positions in RAID array"); |
1041 | return -EINVAL; | ||
1042 | } | ||
1043 | DMINFO("RAID1 device #%d now at position #%d", | 1085 | DMINFO("RAID1 device #%d now at position #%d", |
1044 | role, r->raid_disk); | 1086 | role, r->raid_disk); |
1045 | } | 1087 | } |
@@ -1170,18 +1212,15 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
1170 | if (!freshest) | 1212 | if (!freshest) |
1171 | return 0; | 1213 | return 0; |
1172 | 1214 | ||
1173 | if (validate_raid_redundancy(rs)) { | 1215 | if (validate_raid_redundancy(rs)) |
1174 | rs->ti->error = "Insufficient redundancy to activate array"; | 1216 | return ti_error_einval(rs->ti, "Insufficient redundancy to activate array"); |
1175 | return -EINVAL; | ||
1176 | } | ||
1177 | 1217 | ||
1178 | /* | 1218 | /* |
1179 | * Validation of the freshest device provides the source of | 1219 | * Validation of the freshest device provides the source of |
1180 | * validation for the remaining devices. | 1220 | * validation for the remaining devices. |
1181 | */ | 1221 | */ |
1182 | ti->error = "Unable to assemble array: Invalid superblocks"; | ||
1183 | if (super_validate(rs, freshest)) | 1222 | if (super_validate(rs, freshest)) |
1184 | return -EINVAL; | 1223 | return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks"); |
1185 | 1224 | ||
1186 | rdev_for_each(rdev, mddev) | 1225 | rdev_for_each(rdev, mddev) |
1187 | if ((rdev != freshest) && super_validate(rs, rdev)) | 1226 | if ((rdev != freshest) && super_validate(rs, rdev)) |
@@ -1265,16 +1304,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1265 | 1304 | ||
1266 | /* Must have <raid_type> */ | 1305 | /* Must have <raid_type> */ |
1267 | arg = dm_shift_arg(&as); | 1306 | arg = dm_shift_arg(&as); |
1268 | if (!arg) { | 1307 | if (!arg) |
1269 | ti->error = "No arguments"; | 1308 | return ti_error_einval(rs->ti, "No arguments"); |
1270 | return -EINVAL; | ||
1271 | } | ||
1272 | 1309 | ||
1273 | rt = get_raid_type(arg); | 1310 | rt = get_raid_type(arg); |
1274 | if (!rt) { | 1311 | if (!rt) |
1275 | ti->error = "Unrecognised raid_type"; | 1312 | return ti_error_einval(rs->ti, "Unrecognised raid_type"); |
1276 | return -EINVAL; | ||
1277 | } | ||
1278 | 1313 | ||
1279 | /* Must have <#raid_params> */ | 1314 | /* Must have <#raid_params> */ |
1280 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) | 1315 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) |
@@ -1287,10 +1322,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1287 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) | 1322 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) |
1288 | return -EINVAL; | 1323 | return -EINVAL; |
1289 | 1324 | ||
1290 | if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) { | 1325 | if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) |
1291 | ti->error = "Invalid number of supplied raid devices"; | 1326 | return ti_error_einval(rs->ti, "Invalid number of supplied raid devices"); |
1292 | return -EINVAL; | ||
1293 | } | ||
1294 | 1327 | ||
1295 | rs = context_alloc(ti, rt, num_raid_devs); | 1328 | rs = context_alloc(ti, rt, num_raid_devs); |
1296 | if (IS_ERR(rs)) | 1329 | if (IS_ERR(rs)) |
@@ -1300,7 +1333,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1300 | if (r) | 1333 | if (r) |
1301 | goto bad; | 1334 | goto bad; |
1302 | 1335 | ||
1303 | r = parse_dev_parms(rs, &as); | 1336 | r = parse_dev_params(rs, &as); |
1304 | if (r) | 1337 | if (r) |
1305 | goto bad; | 1338 | goto bad; |
1306 | 1339 | ||
@@ -1330,8 +1363,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1330 | } | 1363 | } |
1331 | 1364 | ||
1332 | if (ti->len != rs->md.array_sectors) { | 1365 | if (ti->len != rs->md.array_sectors) { |
1333 | ti->error = "Array size does not match requested target length"; | 1366 | r = ti_error_einval(ti, "Array size does not match requested target length"); |
1334 | r = -EINVAL; | ||
1335 | goto size_mismatch; | 1367 | goto size_mismatch; |
1336 | } | 1368 | } |
1337 | rs->callbacks.congested_fn = raid_is_congested; | 1369 | rs->callbacks.congested_fn = raid_is_congested; |
@@ -1751,7 +1783,7 @@ static void raid_resume(struct dm_target *ti) | |||
1751 | 1783 | ||
1752 | static struct target_type raid_target = { | 1784 | static struct target_type raid_target = { |
1753 | .name = "raid", | 1785 | .name = "raid", |
1754 | .version = {1, 8, 0}, | 1786 | .version = {1, 8, 1}, |
1755 | .module = THIS_MODULE, | 1787 | .module = THIS_MODULE, |
1756 | .ctr = raid_ctr, | 1788 | .ctr = raid_ctr, |
1757 | .dtr = raid_dtr, | 1789 | .dtr = raid_dtr, |