aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-05-31 14:26:52 -0400
committerMike Snitzer <snitzer@redhat.com>2016-06-14 17:24:57 -0400
commitbd83a4c4f838d0115a5754a80e1bd1fdae82ab6f (patch)
treef8f29ec09f4eb6a018953f140ea95cea63227ffc /drivers/md/dm-raid.c
parent43157840fddb01653b2446e7ee51e910a9fc584e (diff)
dm raid: remove ti_error_* wrappers
There ti_error_* wrappers added very little. No other DM target has ever gone to such lengths to wrap setting ti->error. Also fixes some NULL derefences via rs->ti->error. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c401
1 files changed, 249 insertions, 152 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 50d2901fd9f4..06a4d170e724 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -401,22 +401,6 @@ static bool rt_is_raid456(struct raid_type *rt)
401} 401}
402/* END: raid level bools */ 402/* END: raid level bools */
403 403
404/*
405 * Convenience functions to set ti->error to @errmsg and
406 * return @r in order to shorten code in a lot of places
407 */
408static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r)
409{
410 ti->error = (char *) errmsg;
411 return r;
412}
413
414static int ti_error_einval(struct dm_target *ti, const char *errmsg)
415{
416 return ti_error_ret(ti, errmsg, -EINVAL);
417}
418/* END: convenience functions to set ti->error to @errmsg... */
419
420/* Return invalid ctr flags for the raid level of @rs */ 404/* Return invalid ctr flags for the raid level of @rs */
421static uint32_t _invalid_flags(struct raid_set *rs) 405static uint32_t _invalid_flags(struct raid_set *rs)
422{ 406{
@@ -441,8 +425,10 @@ static uint32_t _invalid_flags(struct raid_set *rs)
441 */ 425 */
442static int rs_check_for_invalid_flags(struct raid_set *rs) 426static int rs_check_for_invalid_flags(struct raid_set *rs)
443{ 427{
444 if (_test_flags(rs->ctr_flags, _invalid_flags(rs))) 428 if (_test_flags(rs->ctr_flags, _invalid_flags(rs))) {
445 return ti_error_einval(rs->ti, "Invalid flag combined"); 429 rs->ti->error = "Invalid flag combined";
430 return -EINVAL;
431 }
446 432
447 return 0; 433 return 0;
448} 434}
@@ -644,12 +630,16 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
644 unsigned i; 630 unsigned i;
645 struct raid_set *rs; 631 struct raid_set *rs;
646 632
647 if (raid_devs <= raid_type->parity_devs) 633 if (raid_devs <= raid_type->parity_devs) {
648 return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices")); 634 ti->error = "Insufficient number of devices";
635 return ERR_PTR(-EINVAL);
636 }
649 637
650 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); 638 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
651 if (!rs) 639 if (!rs) {
652 return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM)); 640 ti->error = "Cannot allocate raid context";
641 return ERR_PTR(-ENOMEM);
642 }
653 643
654 mddev_init(&rs->md); 644 mddev_init(&rs->md);
655 645
@@ -743,15 +733,18 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
743 return -EINVAL; 733 return -EINVAL;
744 734
745 if (strcmp(arg, "-")) { 735 if (strcmp(arg, "-")) {
746 r = dm_get_device(rs->ti, arg, 736 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
747 dm_table_get_mode(rs->ti->table), 737 &rs->dev[i].meta_dev);
748 &rs->dev[i].meta_dev); 738 if (r) {
749 if (r) 739 rs->ti->error = "RAID metadata device lookup failure";
750 return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r); 740 return r;
741 }
751 742
752 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); 743 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
753 if (!rs->dev[i].rdev.sb_page) 744 if (!rs->dev[i].rdev.sb_page) {
754 return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM); 745 rs->ti->error = "Failed to allocate superblock page";
746 return -ENOMEM;
747 }
755 } 748 }
756 749
757 arg = dm_shift_arg(as); 750 arg = dm_shift_arg(as);
@@ -760,20 +753,25 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
760 753
761 if (!strcmp(arg, "-")) { 754 if (!strcmp(arg, "-")) {
762 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && 755 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
763 (!rs->dev[i].rdev.recovery_offset)) 756 (!rs->dev[i].rdev.recovery_offset)) {
764 return ti_error_einval(rs->ti, "Drive designated for rebuild not specified"); 757 rs->ti->error = "Drive designated for rebuild not specified";
758 return -EINVAL;
759 }
765 760
766 if (rs->dev[i].meta_dev) 761 if (rs->dev[i].meta_dev) {
767 return ti_error_einval(rs->ti, "No data device supplied with metadata device"); 762 rs->ti->error = "No data device supplied with metadata device";
763 return -EINVAL;
764 }
768 765
769 continue; 766 continue;
770 } 767 }
771 768
772 r = dm_get_device(rs->ti, arg, 769 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
773 dm_table_get_mode(rs->ti->table), 770 &rs->dev[i].data_dev);
774 &rs->dev[i].data_dev); 771 if (r) {
775 if (r) 772 rs->ti->error = "RAID device lookup failure";
776 return ti_error_ret(rs->ti, "RAID device lookup failure", r); 773 return r;
774 }
777 775
778 if (rs->dev[i].meta_dev) { 776 if (rs->dev[i].meta_dev) {
779 metadata_available = 1; 777 metadata_available = 1;
@@ -801,8 +799,8 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
801 * 799 *
802 * User could specify 'nosync' option if desperate. 800 * User could specify 'nosync' option if desperate.
803 */ 801 */
804 DMERR("Unable to rebuild drive while array is not in-sync"); 802 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
805 return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync"); 803 return -EINVAL;
806 } 804 }
807 805
808 return 0; 806 return 0;
@@ -839,20 +837,27 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
839 /* 837 /*
840 * Validate user-supplied value. 838 * Validate user-supplied value.
841 */ 839 */
842 if (region_size > rs->ti->len) 840 if (region_size > rs->ti->len) {
843 return ti_error_einval(rs->ti, "Supplied region size is too large"); 841 rs->ti->error = "Supplied region size is too large";
842 return -EINVAL;
843 }
844 844
845 if (region_size < min_region_size) { 845 if (region_size < min_region_size) {
846 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", 846 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
847 region_size, min_region_size); 847 region_size, min_region_size);
848 return ti_error_einval(rs->ti, "Supplied region size is too small"); 848 rs->ti->error = "Supplied region size is too small";
849 return -EINVAL;
849 } 850 }
850 851
851 if (!is_power_of_2(region_size)) 852 if (!is_power_of_2(region_size)) {
852 return ti_error_einval(rs->ti, "Region size is not a power of 2"); 853 rs->ti->error = "Region size is not a power of 2";
854 return -EINVAL;
855 }
853 856
854 if (region_size < rs->md.chunk_sectors) 857 if (region_size < rs->md.chunk_sectors) {
855 return ti_error_einval(rs->ti, "Region size is smaller than the chunk size"); 858 rs->ti->error = "Region size is smaller than the chunk size";
859 return -EINVAL;
860 }
856 } 861 }
857 862
858 /* 863 /*
@@ -1000,8 +1005,10 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1000 arg = dm_shift_arg(as); 1005 arg = dm_shift_arg(as);
1001 num_raid_params--; /* Account for chunk_size argument */ 1006 num_raid_params--; /* Account for chunk_size argument */
1002 1007
1003 if (kstrtouint(arg, 10, &value) < 0) 1008 if (kstrtouint(arg, 10, &value) < 0) {
1004 return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size"); 1009 rs->ti->error = "Bad numerical argument given for chunk_size";
1010 return -EINVAL;
1011 }
1005 1012
1006 /* 1013 /*
1007 * First, parse the in-order required arguments 1014 * First, parse the in-order required arguments
@@ -1011,10 +1018,13 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1011 if (value) 1018 if (value)
1012 DMERR("Ignoring chunk size parameter for RAID 1"); 1019 DMERR("Ignoring chunk size parameter for RAID 1");
1013 value = 0; 1020 value = 0;
1014 } else if (!is_power_of_2(value)) 1021 } else if (!is_power_of_2(value)) {
1015 return ti_error_einval(rs->ti, "Chunk size must be a power of 2"); 1022 rs->ti->error = "Chunk size must be a power of 2";
1016 else if (value < 8) 1023 return -EINVAL;
1017 return ti_error_einval(rs->ti, "Chunk size value is too small"); 1024 } else if (value < 8) {
1025 rs->ti->error = "Chunk size value is too small";
1026 return -EINVAL;
1027 }
1018 1028
1019 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 1029 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1020 1030
@@ -1045,49 +1055,67 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1045 */ 1055 */
1046 for (i = 0; i < num_raid_params; i++) { 1056 for (i = 0; i < num_raid_params; i++) {
1047 key = dm_shift_arg(as); 1057 key = dm_shift_arg(as);
1048 if (!key) 1058 if (!key) {
1049 return ti_error_einval(rs->ti, "Not enough raid parameters given"); 1059 rs->ti->error = "Not enough raid parameters given";
1060 return -EINVAL;
1061 }
1050 1062
1051 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_NOSYNC))) { 1063 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_NOSYNC))) {
1052 if (_test_and_set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags)) 1064 if (_test_and_set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1053 return ti_error_einval(rs->ti, "Only one 'nosync' argument allowed"); 1065 rs->ti->error = "Only one 'nosync' argument allowed";
1066 return -EINVAL;
1067 }
1054 rs->md.recovery_cp = MaxSector; 1068 rs->md.recovery_cp = MaxSector;
1055 continue; 1069 continue;
1056 } 1070 }
1057 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_SYNC))) { 1071 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_SYNC))) {
1058 if (_test_and_set_flag(CTR_FLAG_SYNC, &rs->ctr_flags)) 1072 if (_test_and_set_flag(CTR_FLAG_SYNC, &rs->ctr_flags)) {
1059 return ti_error_einval(rs->ti, "Only one 'sync' argument allowed"); 1073 rs->ti->error = "Only one 'sync' argument allowed";
1074 return -EINVAL;
1075 }
1060 rs->md.recovery_cp = 0; 1076 rs->md.recovery_cp = 0;
1061 continue; 1077 continue;
1062 } 1078 }
1063 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { 1079 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1064 if (_test_and_set_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) 1080 if (_test_and_set_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1065 return ti_error_einval(rs->ti, "Only one 'raid10_use_new_sets' argument allowed"); 1081 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1082 return -EINVAL;
1083 }
1066 continue; 1084 continue;
1067 } 1085 }
1068 1086
1069 arg = dm_shift_arg(as); 1087 arg = dm_shift_arg(as);
1070 i++; /* Account for the argument pairs */ 1088 i++; /* Account for the argument pairs */
1071 if (!arg) 1089 if (!arg) {
1072 return ti_error_einval(rs->ti, "Wrong number of raid parameters given"); 1090 rs->ti->error = "Wrong number of raid parameters given";
1091 return -EINVAL;
1092 }
1073 1093
1074 /* 1094 /*
1075 * Parameters that take a string value are checked here. 1095 * Parameters that take a string value are checked here.
1076 */ 1096 */
1077 1097
1078 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) { 1098 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1079 if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) 1099 if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1080 return ti_error_einval(rs->ti, "Only one 'raid10_format' argument pair allowed"); 1100 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1081 if (!rt_is_raid10(rt)) 1101 return -EINVAL;
1082 return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type"); 1102 }
1103 if (!rt_is_raid10(rt)) {
1104 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1105 return -EINVAL;
1106 }
1083 raid10_format = raid10_name_to_format(arg); 1107 raid10_format = raid10_name_to_format(arg);
1084 if (raid10_format < 0) 1108 if (raid10_format < 0) {
1085 return ti_error_ret(rs->ti, "Invalid 'raid10_format' value given", raid10_format); 1109 rs->ti->error = "Invalid 'raid10_format' value given";
1110 return raid10_format;
1111 }
1086 continue; 1112 continue;
1087 } 1113 }
1088 1114
1089 if (kstrtouint(arg, 10, &value) < 0) 1115 if (kstrtouint(arg, 10, &value) < 0) {
1090 return ti_error_einval(rs->ti, "Bad numerical argument given in raid params"); 1116 rs->ti->error = "Bad numerical argument given in raid params";
1117 return -EINVAL;
1118 }
1091 1119
1092 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) { 1120 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) {
1093 /* 1121 /*
@@ -1095,11 +1123,15 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1095 * indexes of replaced devices and to set up additional 1123 * indexes of replaced devices and to set up additional
1096 * devices on raid level takeover. 1124 * devices on raid level takeover.
1097 */ 1125 */
1098 if (!_in_range(value, 0, rs->raid_disks - 1)) 1126 if (!_in_range(value, 0, rs->raid_disks - 1)) {
1099 return ti_error_einval(rs->ti, "Invalid rebuild index given"); 1127 rs->ti->error = "Invalid rebuild index given";
1128 return -EINVAL;
1129 }
1100 1130
1101 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) 1131 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1102 return ti_error_einval(rs->ti, "rebuild for this index already given"); 1132 rs->ti->error = "rebuild for this index already given";
1133 return -EINVAL;
1134 }
1103 1135
1104 rd = rs->dev + value; 1136 rd = rs->dev + value;
1105 clear_bit(In_sync, &rd->rdev.flags); 1137 clear_bit(In_sync, &rd->rdev.flags);
@@ -1107,98 +1139,139 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1107 rd->rdev.recovery_offset = 0; 1139 rd->rdev.recovery_offset = 0;
1108 _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags); 1140 _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags);
1109 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) { 1141 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1110 if (!rt_is_raid1(rt)) 1142 if (!rt_is_raid1(rt)) {
1111 return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1"); 1143 rs->ti->error = "write_mostly option is only valid for RAID1";
1144 return -EINVAL;
1145 }
1112 1146
1113 if (!_in_range(value, 0, rs->md.raid_disks - 1)) 1147 if (!_in_range(value, 0, rs->md.raid_disks - 1)) {
1114 return ti_error_einval(rs->ti, "Invalid write_mostly index given"); 1148 rs->ti->error = "Invalid write_mostly index given";
1149 return -EINVAL;
1150 }
1115 1151
1116 set_bit(WriteMostly, &rs->dev[value].rdev.flags); 1152 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1117 _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); 1153 _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1118 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { 1154 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1119 if (!rt_is_raid1(rt)) 1155 if (!rt_is_raid1(rt)) {
1120 return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1"); 1156 rs->ti->error = "max_write_behind option is only valid for RAID1";
1157 return -EINVAL;
1158 }
1121 1159
1122 if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) 1160 if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1123 return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed"); 1161 rs->ti->error = "Only one max_write_behind argument pair allowed";
1162 return -EINVAL;
1163 }
1124 1164
1125 /* 1165 /*
1126 * In device-mapper, we specify things in sectors, but 1166 * In device-mapper, we specify things in sectors, but
1127 * MD records this value in kB 1167 * MD records this value in kB
1128 */ 1168 */
1129 value /= 2; 1169 value /= 2;
1130 if (value > COUNTER_MAX) 1170 if (value > COUNTER_MAX) {
1131 return ti_error_einval(rs->ti, "Max write-behind limit out of range"); 1171 rs->ti->error = "Max write-behind limit out of range";
1172 return -EINVAL;
1173 }
1132 1174
1133 rs->md.bitmap_info.max_write_behind = value; 1175 rs->md.bitmap_info.max_write_behind = value;
1134 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) { 1176 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1135 if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) 1177 if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1136 return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed"); 1178 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1137 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) 1179 return -EINVAL;
1138 return ti_error_einval(rs->ti, "daemon sleep period out of range"); 1180 }
1181 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
1182 rs->ti->error = "daemon sleep period out of range";
1183 return -EINVAL;
1184 }
1139 rs->md.bitmap_info.daemon_sleep = value; 1185 rs->md.bitmap_info.daemon_sleep = value;
1140 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DATA_OFFSET))) { 1186 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DATA_OFFSET))) {
1141 /* Userspace passes new data_offset after having extended the the data image LV */ 1187 /* Userspace passes new data_offset after having extended the the data image LV */
1142 if (_test_and_set_flag(CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) 1188 if (_test_and_set_flag(CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1143 return ti_error_einval(rs->ti, "Only one data_offset argument pair allowed"); 1189 rs->ti->error = "Only one data_offset argument pair allowed";
1144 1190 return -EINVAL;
1191 }
1145 /* Ensure sensible data offset */ 1192 /* Ensure sensible data offset */
1146 if (value < 0) 1193 if (value < 0) {
1147 return ti_error_einval(rs->ti, "Bogus data_offset value"); 1194 rs->ti->error = "Bogus data_offset value";
1148 1195 return -EINVAL;
1196 }
1149 rs->data_offset = value; 1197 rs->data_offset = value;
1150 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DELTA_DISKS))) { 1198 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DELTA_DISKS))) {
1151 /* Define the +/-# of disks to add to/remove from the given raid set */ 1199 /* Define the +/-# of disks to add to/remove from the given raid set */
1152 if (_test_and_set_flag(CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) 1200 if (_test_and_set_flag(CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1153 return ti_error_einval(rs->ti, "Only one delta_disks argument pair allowed"); 1201 rs->ti->error = "Only one delta_disks argument pair allowed";
1154 1202 return -EINVAL;
1203 }
1155 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ 1204 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1156 if (!_in_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) 1205 if (!_in_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1157 return ti_error_einval(rs->ti, "Too many delta_disk requested"); 1206 rs->ti->error = "Too many delta_disk requested";
1207 return -EINVAL;
1208 }
1158 1209
1159 rs->delta_disks = value; 1210 rs->delta_disks = value;
1160 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) { 1211 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1161 if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) 1212 if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1162 return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed"); 1213 rs->ti->error = "Only one stripe_cache argument pair allowed";
1214 return -EINVAL;
1215 }
1216
1163 /* 1217 /*
1164 * In device-mapper, we specify things in sectors, but 1218 * In device-mapper, we specify things in sectors, but
1165 * MD records this value in kB 1219 * MD records this value in kB
1166 */ 1220 */
1167 value /= 2; 1221 value /= 2;
1168 1222
1169 if (!rt_is_raid456(rt)) 1223 if (!rt_is_raid456(rt)) {
1170 return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache"); 1224 rs->ti->error = "Inappropriate argument: stripe_cache";
1171 if (raid5_set_cache_size(&rs->md, (int)value)) 1225 return -EINVAL;
1172 return ti_error_einval(rs->ti, "Bad stripe_cache size"); 1226 }
1227 if (raid5_set_cache_size(&rs->md, (int)value)) {
1228 rs->ti->error = "Bad stripe_cache size";
1229 return -EINVAL;
1230 }
1173 1231
1174 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { 1232 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1175 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) 1233 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1176 return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed"); 1234 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1177 if (value > INT_MAX) 1235 return -EINVAL;
1178 return ti_error_einval(rs->ti, "min_recovery_rate out of range"); 1236 }
1237 if (value > INT_MAX) {
1238 rs->ti->error = "min_recovery_rate out of range";
1239 return -EINVAL;
1240 }
1179 rs->md.sync_speed_min = (int)value; 1241 rs->md.sync_speed_min = (int)value;
1180 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { 1242 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1181 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) 1243 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1182 return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed"); 1244 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1183 if (value > INT_MAX) 1245 return -EINVAL;
1184 return ti_error_einval(rs->ti, "max_recovery_rate out of range"); 1246 }
1247 if (value > INT_MAX) {
1248 rs->ti->error = "max_recovery_rate out of range";
1249 return -EINVAL;
1250 }
1185 rs->md.sync_speed_max = (int)value; 1251 rs->md.sync_speed_max = (int)value;
1186 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) { 1252 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) {
1187 if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) 1253 if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1188 return ti_error_einval(rs->ti, "Only one region_size argument pair allowed"); 1254 rs->ti->error = "Only one region_size argument pair allowed";
1255 return -EINVAL;
1256 }
1189 1257
1190 region_size = value; 1258 region_size = value;
1191 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) { 1259 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) {
1192 if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) 1260 if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1193 return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed"); 1261 rs->ti->error = "Only one raid10_copies argument pair allowed";
1262 return -EINVAL;
1263 }
1194 1264
1195 if (!_in_range(value, 2, rs->md.raid_disks)) 1265 if (!_in_range(value, 2, rs->md.raid_disks)) {
1196 return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'"); 1266 rs->ti->error = "Bad value for 'raid10_copies'";
1267 return -EINVAL;
1268 }
1197 1269
1198 raid10_copies = value; 1270 raid10_copies = value;
1199 } else { 1271 } else {
1200 DMERR("Unable to parse RAID parameter: %s", key); 1272 DMERR("Unable to parse RAID parameter: %s", key);
1201 return ti_error_einval(rs->ti, "Unable to parse RAID parameters"); 1273 rs->ti->error = "Unable to parse RAID parameter";
1274 return -EINVAL;
1202 } 1275 }
1203 } 1276 }
1204 1277
@@ -1214,21 +1287,29 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1214 return -EINVAL; 1287 return -EINVAL;
1215 1288
1216 if (rt_is_raid10(rt)) { 1289 if (rt_is_raid10(rt)) {
1217 if (raid10_copies > rs->md.raid_disks) 1290 if (raid10_copies > rs->md.raid_disks) {
1218 return ti_error_einval(rs->ti, "Not enough devices to satisfy specification"); 1291 rs->ti->error = "Not enough devices to satisfy specification";
1292 return -EINVAL;
1293 }
1219 1294
1220 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); 1295 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1221 if (rs->md.new_layout < 0) 1296 if (rs->md.new_layout < 0) {
1222 return ti_error_ret(rs->ti, "Error getting raid10 format", rs->md.new_layout); 1297 rs->ti->error = "Error getting raid10 format";
1298 return rs->md.new_layout;
1299 }
1223 1300
1224 rt = get_raid_type_by_ll(10, rs->md.new_layout); 1301 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1225 if (!rt) 1302 if (!rt) {
1226 return ti_error_einval(rs->ti, "Failed to recognize new raid10 layout"); 1303 rs->ti->error = "Failed to recognize new raid10 layout";
1304 return -EINVAL;
1305 }
1227 1306
1228 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || 1307 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1229 rt->algorithm == ALGORITHM_RAID10_NEAR) && 1308 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1230 _test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) 1309 _test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) {
1231 return ti_error_einval(rs->ti, "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"); 1310 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1311 return -EINVAL;
1312 }
1232 1313
1233 /* (Len * #mirrors) / #devices */ 1314 /* (Len * #mirrors) / #devices */
1234 sectors_per_dev = rs->ti->len * raid10_copies; 1315 sectors_per_dev = rs->ti->len * raid10_copies;
@@ -1237,9 +1318,10 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1237 rs->md.layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); 1318 rs->md.layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1238 rs->md.new_layout = rs->md.layout; 1319 rs->md.new_layout = rs->md.layout;
1239 } else if (!rt_is_raid1(rt) && 1320 } else if (!rt_is_raid1(rt) &&
1240 sector_div(sectors_per_dev, 1321 sector_div(sectors_per_dev, (rs->md.raid_disks - rt->parity_devs))) {
1241 (rs->md.raid_disks - rt->parity_devs))) 1322 rs->ti->error = "Target length not divisible by number of data devices";
1242 return ti_error_einval(rs->ti, "Target length not divisible by number of data devices"); 1323 return -EINVAL;
1324 }
1243 1325
1244 rs->raid10_copies = raid10_copies; 1326 rs->raid10_copies = raid10_copies;
1245 rs->md.dev_sectors = sectors_per_dev; 1327 rs->md.dev_sectors = sectors_per_dev;
@@ -1420,7 +1502,8 @@ static int rs_check_takeover(struct raid_set *rs)
1420 break; 1502 break;
1421 } 1503 }
1422 1504
1423 return ti_error_einval(rs->ti, "takeover not possible"); 1505 rs->ti->error = "takeover not possible";
1506 return -EINVAL;
1424} 1507}
1425 1508
1426/* True if @rs requested to be taken over */ 1509/* True if @rs requested to be taken over */
@@ -1870,19 +1953,22 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1870 if (role != r->raid_disk) { 1953 if (role != r->raid_disk) {
1871 if (_is_raid10_near(mddev->layout)) { 1954 if (_is_raid10_near(mddev->layout)) {
1872 if (mddev->raid_disks % _raid10_near_copies(mddev->layout) || 1955 if (mddev->raid_disks % _raid10_near_copies(mddev->layout) ||
1873 rs->raid_disks % rs->raid10_copies) 1956 rs->raid_disks % rs->raid10_copies) {
1874 return ti_error_einval(rs->ti, "Cannot change raid10 near " 1957 rs->ti->error =
1875 "set to odd # of devices!"); 1958 "Cannot change raid10 near set to odd # of devices!";
1959 return -EINVAL;
1960 }
1876 1961
1877 sb2->array_position = cpu_to_le32(r->raid_disk); 1962 sb2->array_position = cpu_to_le32(r->raid_disk);
1878 1963
1879 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && 1964 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
1880 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && 1965 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
1881 !rt_is_raid1(rs->raid_type)) 1966 !rt_is_raid1(rs->raid_type)) {
1882 return ti_error_einval(rs->ti, "Cannot change device positions in raid set"); 1967 rs->ti->error = "Cannot change device positions in raid set";
1968 return -EINVAL;
1969 }
1883 1970
1884 DMINFO("raid device #%d now at position #%d", 1971 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
1885 role, r->raid_disk);
1886 } 1972 }
1887 1973
1888 /* 1974 /*
@@ -2024,15 +2110,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2024 if (!freshest) 2110 if (!freshest)
2025 return 0; 2111 return 0;
2026 2112
2027 if (validate_raid_redundancy(rs)) 2113 if (validate_raid_redundancy(rs)) {
2028 return ti_error_einval(rs->ti, "Insufficient redundancy to activate array"); 2114 rs->ti->error = "Insufficient redundancy to activate array";
2115 return -EINVAL;
2116 }
2029 2117
2030 /* 2118 /*
2031 * Validation of the freshest device provides the source of 2119 * Validation of the freshest device provides the source of
2032 * validation for the remaining devices. 2120 * validation for the remaining devices.
2033 */ 2121 */
2034 if (super_validate(rs, freshest)) 2122 if (super_validate(rs, freshest)) {
2035 return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks"); 2123 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2124 return -EINVAL;
2125 }
2036 2126
2037 rdev_for_each(rdev, mddev) 2127 rdev_for_each(rdev, mddev)
2038 if ((rdev != freshest) && super_validate(rs, rdev)) 2128 if ((rdev != freshest) && super_validate(rs, rdev))
@@ -2176,12 +2266,16 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2176 2266
2177 /* Must have <raid_type> */ 2267 /* Must have <raid_type> */
2178 arg = dm_shift_arg(&as); 2268 arg = dm_shift_arg(&as);
2179 if (!arg) 2269 if (!arg) {
2180 return ti_error_einval(rs->ti, "No arguments"); 2270 ti->error = "No arguments";
2271 return -EINVAL;
2272 }
2181 2273
2182 rt = get_raid_type(arg); 2274 rt = get_raid_type(arg);
2183 if (!rt) 2275 if (!rt) {
2184 return ti_error_einval(rs->ti, "Unrecognised raid_type"); 2276 ti->error = "Unrecognised raid_type";
2277 return -EINVAL;
2278 }
2185 2279
2186 /* Must have <#raid_params> */ 2280 /* Must have <#raid_params> */
2187 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) 2281 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
@@ -2194,8 +2288,10 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2194 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) 2288 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
2195 return -EINVAL; 2289 return -EINVAL;
2196 2290
2197 if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) 2291 if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
2198 return ti_error_einval(rs->ti, "Invalid number of supplied raid devices"); 2292 ti->error = "Invalid number of supplied raid devices";
2293 return -EINVAL;
2294 }
2199 2295
2200 rs = context_alloc(ti, rt, num_raid_devs); 2296 rs = context_alloc(ti, rt, num_raid_devs);
2201 if (IS_ERR(rs)) 2297 if (IS_ERR(rs))
@@ -2265,7 +2361,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2265 } 2361 }
2266 2362
2267 if (ti->len != rs->md.array_sectors) { 2363 if (ti->len != rs->md.array_sectors) {
2268 r = ti_error_einval(ti, "Array size does not match requested target length"); 2364 ti->error = "Array size does not match requested target length";
2365 r = -EINVAL;
2269 goto size_mismatch; 2366 goto size_mismatch;
2270 } 2367 }
2271 rs->callbacks.congested_fn = raid_is_congested; 2368 rs->callbacks.congested_fn = raid_is_congested;