summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-06-02 12:27:46 -0400
committerMike Snitzer <snitzer@redhat.com>2016-06-14 17:25:00 -0400
commit4286325b4b0dc9d67e829e91c5377e070adaffec (patch)
tree796ea4f62ea14b778e77afe5bb942488488e719e /drivers/md/dm-raid.c
parentbb91a63fcc58d5a992fe5e92c6ff1e7f4d20664e (diff)
dm raid: remove all the bitops wrappers
Removes obfuscation that is of little value. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c214
1 files changed, 89 insertions, 125 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 039db81c9d53..32c3bae69aae 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -46,27 +46,46 @@ struct raid_dev {
46}; 46};
47 47
48/* 48/*
49 * Flags for rs->ctr_flags field. 49 * Bits for establishing rs->ctr_flags
50 * 50 *
51 * 1 = no flag value 51 * 1 = no flag value
52 * 2 = flag with value 52 * 2 = flag with value
53 */ 53 */
54#define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */ 54#define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
55#define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */ 55#define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
56#define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */ 56#define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
57#define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */ 57#define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
58#define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */ 58#define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
59#define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */ 59#define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
60#define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */ 60#define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
61#define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */ 61#define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
62#define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */ 62#define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
63#define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */ 63#define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
64#define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */ 64#define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
65#define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */ 65#define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
66/* New for v1.9.0 */ 66/* New for v1.9.0 */
67#define CTR_FLAG_DELTA_DISKS 0x1000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ 67#define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid4/5/6/10! */
68#define CTR_FLAG_DATA_OFFSET 0x2000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ 68#define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
69#define CTR_FLAG_RAID10_USE_NEAR_SETS 0x4000 /* 2 */ /* Only with raid10! */ 69#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
70
71/*
72 * Flags for rs->ctr_flags field.
73 */
74#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
75#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
76#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
77#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
78#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
79#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
80#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
81#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
82#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
83#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
84#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
85#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
86#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
87#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
88#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
70 89
71/* 90/*
72 * Definitions of various constructor flags to 91 * Definitions of various constructor flags to
@@ -158,8 +177,8 @@ struct raid_set {
158 struct dm_target *ti; 177 struct dm_target *ti;
159 178
160 uint32_t bitmap_loaded; 179 uint32_t bitmap_loaded;
161 uint32_t ctr_flags; 180 unsigned long ctr_flags;
162 uint32_t runtime_flags; 181 unsigned long runtime_flags;
163 182
164 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; 183 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
165 184
@@ -249,65 +268,9 @@ static bool __within_range(long v, long min, long max)
249 return v >= min && v <= max; 268 return v >= min && v <= max;
250} 269}
251 270
252/* ctr flag bit manipulation... */
253/* Set single @flag in @flags */
254static void _set_flag(uint32_t flag, uint32_t *flags)
255{
256 WARN_ON_ONCE(hweight32(flag) != 1);
257 *flags |= flag;
258}
259
260/* Clear single @flag in @flags */
261static void _clear_flag(uint32_t flag, uint32_t *flags)
262{
263 WARN_ON_ONCE(hweight32(flag) != 1);
264 *flags &= ~flag;
265}
266
267/* Test single @flag in @flags */
268static bool _test_flag(uint32_t flag, uint32_t flags)
269{
270 WARN_ON_ONCE(hweight32(flag) != 1);
271 return (flag & flags) ? true : false;
272}
273
274/* Test multiple @flags in @all_flags */
275static bool _test_flags(uint32_t flags, uint32_t all_flags)
276{
277 return (flags & all_flags) ? true : false;
278}
279
280/* Clear (multiple) @flags in @all_flags */
281static void _clear_flags(uint32_t flags, uint32_t *all_flags)
282{
283 *all_flags &= ~flags;
284}
285
286/* Return true if single @flag is set in @*flags, else set it and return false */
287static bool _test_and_set_flag(uint32_t flag, uint32_t *flags)
288{
289 if (_test_flag(flag, *flags))
290 return true;
291
292 _set_flag(flag, flags);
293 return false;
294}
295
296/* Return true if single @flag is set in @*flags and clear it, else return false */
297static bool _test_and_clear_flag(uint32_t flag, uint32_t *flags)
298{
299 if (_test_flag(flag, *flags)) {
300 _clear_flag(flag, flags);
301 return true;
302 }
303
304 return false;
305}
306/* ...ctr and runtime flag bit manipulation */
307
308/* All table line arguments are defined here */ 271/* All table line arguments are defined here */
309static struct arg_name_flag { 272static struct arg_name_flag {
310 const uint32_t flag; 273 const unsigned long flag;
311 const char *name; 274 const char *name;
312} _arg_name_flags[] = { 275} _arg_name_flags[] = {
313 { CTR_FLAG_SYNC, "sync"}, 276 { CTR_FLAG_SYNC, "sync"},
@@ -334,7 +297,7 @@ static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
334 struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags); 297 struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags);
335 298
336 while (anf-- > _arg_name_flags) 299 while (anf-- > _arg_name_flags)
337 if (_test_flag(flag, anf->flag)) 300 if (flag & anf->flag)
338 return anf->name; 301 return anf->name;
339 302
340 } else 303 } else
@@ -425,8 +388,8 @@ static uint32_t _invalid_flags(struct raid_set *rs)
425 */ 388 */
426static int rs_check_for_invalid_flags(struct raid_set *rs) 389static int rs_check_for_invalid_flags(struct raid_set *rs)
427{ 390{
428 if (_test_flags(rs->ctr_flags, _invalid_flags(rs))) { 391 if (rs->ctr_flags & _invalid_flags(rs)) {
429 rs->ti->error = "Invalid flag combined"; 392 rs->ti->error = "Invalid flags combination";
430 return -EINVAL; 393 return -EINVAL;
431 } 394 }
432 395
@@ -533,13 +496,13 @@ static int raid10_format_to_md_layout(struct raid_set *rs,
533 else if (algorithm == ALGORITHM_RAID10_OFFSET) { 496 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
534 f = copies; 497 f = copies;
535 r = RAID10_OFFSET; 498 r = RAID10_OFFSET;
536 if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) 499 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
537 r |= RAID10_USE_FAR_SETS; 500 r |= RAID10_USE_FAR_SETS;
538 501
539 } else if (algorithm == ALGORITHM_RAID10_FAR) { 502 } else if (algorithm == ALGORITHM_RAID10_FAR) {
540 f = copies; 503 f = copies;
541 r = !RAID10_OFFSET; 504 r = !RAID10_OFFSET;
542 if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) 505 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
543 r |= RAID10_USE_FAR_SETS; 506 r |= RAID10_USE_FAR_SETS;
544 507
545 } else 508 } else
@@ -1061,7 +1024,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1061 } 1024 }
1062 1025
1063 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) { 1026 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1064 if (_test_and_set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags)) { 1027 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1065 rs->ti->error = "Only one 'nosync' argument allowed"; 1028 rs->ti->error = "Only one 'nosync' argument allowed";
1066 return -EINVAL; 1029 return -EINVAL;
1067 } 1030 }
@@ -1069,7 +1032,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1069 continue; 1032 continue;
1070 } 1033 }
1071 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) { 1034 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1072 if (_test_and_set_flag(CTR_FLAG_SYNC, &rs->ctr_flags)) { 1035 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1073 rs->ti->error = "Only one 'sync' argument allowed"; 1036 rs->ti->error = "Only one 'sync' argument allowed";
1074 return -EINVAL; 1037 return -EINVAL;
1075 } 1038 }
@@ -1077,7 +1040,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1077 continue; 1040 continue;
1078 } 1041 }
1079 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { 1042 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1080 if (_test_and_set_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { 1043 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1081 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; 1044 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1082 return -EINVAL; 1045 return -EINVAL;
1083 } 1046 }
@@ -1096,7 +1059,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1096 */ 1059 */
1097 1060
1098 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) { 1061 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1099 if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { 1062 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1100 rs->ti->error = "Only one 'raid10_format' argument pair allowed"; 1063 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1101 return -EINVAL; 1064 return -EINVAL;
1102 } 1065 }
@@ -1137,7 +1100,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1137 clear_bit(In_sync, &rd->rdev.flags); 1100 clear_bit(In_sync, &rd->rdev.flags);
1138 clear_bit(Faulty, &rd->rdev.flags); 1101 clear_bit(Faulty, &rd->rdev.flags);
1139 rd->rdev.recovery_offset = 0; 1102 rd->rdev.recovery_offset = 0;
1140 _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags); 1103 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1141 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) { 1104 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1142 if (!rt_is_raid1(rt)) { 1105 if (!rt_is_raid1(rt)) {
1143 rs->ti->error = "write_mostly option is only valid for RAID1"; 1106 rs->ti->error = "write_mostly option is only valid for RAID1";
@@ -1150,14 +1113,14 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1150 } 1113 }
1151 1114
1152 set_bit(WriteMostly, &rs->dev[value].rdev.flags); 1115 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1153 _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); 1116 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1154 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { 1117 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1155 if (!rt_is_raid1(rt)) { 1118 if (!rt_is_raid1(rt)) {
1156 rs->ti->error = "max_write_behind option is only valid for RAID1"; 1119 rs->ti->error = "max_write_behind option is only valid for RAID1";
1157 return -EINVAL; 1120 return -EINVAL;
1158 } 1121 }
1159 1122
1160 if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { 1123 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1161 rs->ti->error = "Only one max_write_behind argument pair allowed"; 1124 rs->ti->error = "Only one max_write_behind argument pair allowed";
1162 return -EINVAL; 1125 return -EINVAL;
1163 } 1126 }
@@ -1174,7 +1137,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1174 1137
1175 rs->md.bitmap_info.max_write_behind = value; 1138 rs->md.bitmap_info.max_write_behind = value;
1176 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) { 1139 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1177 if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { 1140 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1178 rs->ti->error = "Only one daemon_sleep argument pair allowed"; 1141 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1179 return -EINVAL; 1142 return -EINVAL;
1180 } 1143 }
@@ -1185,7 +1148,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1185 rs->md.bitmap_info.daemon_sleep = value; 1148 rs->md.bitmap_info.daemon_sleep = value;
1186 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) { 1149 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1187 /* Userspace passes new data_offset after having extended the the data image LV */ 1150 /* Userspace passes new data_offset after having extended the the data image LV */
1188 if (_test_and_set_flag(CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { 1151 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1189 rs->ti->error = "Only one data_offset argument pair allowed"; 1152 rs->ti->error = "Only one data_offset argument pair allowed";
1190 return -EINVAL; 1153 return -EINVAL;
1191 } 1154 }
@@ -1197,7 +1160,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1197 rs->data_offset = value; 1160 rs->data_offset = value;
1198 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) { 1161 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1199 /* Define the +/-# of disks to add to/remove from the given raid set */ 1162 /* Define the +/-# of disks to add to/remove from the given raid set */
1200 if (_test_and_set_flag(CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { 1163 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1201 rs->ti->error = "Only one delta_disks argument pair allowed"; 1164 rs->ti->error = "Only one delta_disks argument pair allowed";
1202 return -EINVAL; 1165 return -EINVAL;
1203 } 1166 }
@@ -1209,7 +1172,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1209 1172
1210 rs->delta_disks = value; 1173 rs->delta_disks = value;
1211 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) { 1174 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1212 if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { 1175 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1213 rs->ti->error = "Only one stripe_cache argument pair allowed"; 1176 rs->ti->error = "Only one stripe_cache argument pair allowed";
1214 return -EINVAL; 1177 return -EINVAL;
1215 } 1178 }
@@ -1230,7 +1193,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1230 } 1193 }
1231 1194
1232 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { 1195 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1233 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { 1196 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1234 rs->ti->error = "Only one min_recovery_rate argument pair allowed"; 1197 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1235 return -EINVAL; 1198 return -EINVAL;
1236 } 1199 }
@@ -1240,7 +1203,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1240 } 1203 }
1241 rs->md.sync_speed_min = (int)value; 1204 rs->md.sync_speed_min = (int)value;
1242 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { 1205 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1243 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { 1206 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1244 rs->ti->error = "Only one max_recovery_rate argument pair allowed"; 1207 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1245 return -EINVAL; 1208 return -EINVAL;
1246 } 1209 }
@@ -1250,14 +1213,14 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1250 } 1213 }
1251 rs->md.sync_speed_max = (int)value; 1214 rs->md.sync_speed_max = (int)value;
1252 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) { 1215 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1253 if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { 1216 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1254 rs->ti->error = "Only one region_size argument pair allowed"; 1217 rs->ti->error = "Only one region_size argument pair allowed";
1255 return -EINVAL; 1218 return -EINVAL;
1256 } 1219 }
1257 1220
1258 region_size = value; 1221 region_size = value;
1259 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) { 1222 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1260 if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { 1223 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1261 rs->ti->error = "Only one raid10_copies argument pair allowed"; 1224 rs->ti->error = "Only one raid10_copies argument pair allowed";
1262 return -EINVAL; 1225 return -EINVAL;
1263 } 1226 }
@@ -1306,7 +1269,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1306 1269
1307 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || 1270 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1308 rt->algorithm == ALGORITHM_RAID10_NEAR) && 1271 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1309 _test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) { 1272 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1310 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; 1273 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1311 return -EINVAL; 1274 return -EINVAL;
1312 } 1275 }
@@ -1624,7 +1587,7 @@ static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *
1624 failed_devices[0] = le64_to_cpu(sb->failed_devices); 1587 failed_devices[0] = le64_to_cpu(sb->failed_devices);
1625 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); 1588 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
1626 1589
1627 if (_test_flag(FEATURE_FLAG_SUPPORTS_V190, le32_to_cpu(sb->compat_features))) { 1590 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
1628 int i = ARRAY_SIZE(sb->extended_failed_devices); 1591 int i = ARRAY_SIZE(sb->extended_failed_devices);
1629 1592
1630 while (i--) 1593 while (i--)
@@ -1702,9 +1665,10 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
1702 1665
1703 if (mddev->delta_disks < 0 || mddev->reshape_backwards) 1666 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
1704 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); 1667 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
1705 } else 1668 } else {
1706 /* Flag no reshape */ 1669 /* Clear reshape flags */
1707 _clear_flags(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS), &sb->flags); 1670 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
1671 }
1708 1672
1709 sb->array_sectors = cpu_to_le64(mddev->array_sectors); 1673 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
1710 sb->data_offset = cpu_to_le64(rdev->data_offset); 1674 sb->data_offset = cpu_to_le64(rdev->data_offset);
@@ -1799,7 +1763,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1799 * Reshaping is supported, e.g. reshape_position is valid 1763 * Reshaping is supported, e.g. reshape_position is valid
1800 * in superblock and superblock content is authoritative. 1764 * in superblock and superblock content is authoritative.
1801 */ 1765 */
1802 if (_test_flag(FEATURE_FLAG_SUPPORTS_V190, le32_to_cpu(sb->compat_features))) { 1766 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
1803 /* Superblock is authoritative wrt given raid set layout! */ 1767 /* Superblock is authoritative wrt given raid set layout! */
1804 mddev->raid_disks = le32_to_cpu(sb->num_devices); 1768 mddev->raid_disks = le32_to_cpu(sb->num_devices);
1805 mddev->level = le32_to_cpu(sb->level); 1769 mddev->level = le32_to_cpu(sb->level);
@@ -1812,14 +1776,14 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1812 mddev->array_sectors = le64_to_cpu(sb->array_sectors); 1776 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
1813 1777
1814 /* raid was reshaping and got interrupted */ 1778 /* raid was reshaping and got interrupted */
1815 if (_test_flag(SB_FLAG_RESHAPE_ACTIVE, le32_to_cpu(sb->flags))) { 1779 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
1816 if (_test_flag(CTR_FLAG_DELTA_DISKS, rs->ctr_flags)) { 1780 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1817 DMERR("Reshape requested but raid set is still reshaping"); 1781 DMERR("Reshape requested but raid set is still reshaping");
1818 return -EINVAL; 1782 return -EINVAL;
1819 } 1783 }
1820 1784
1821 if (mddev->delta_disks < 0 || 1785 if (mddev->delta_disks < 0 ||
1822 (!mddev->delta_disks && _test_flag(SB_FLAG_RESHAPE_BACKWARDS, le32_to_cpu(sb->flags)))) 1786 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
1823 mddev->reshape_backwards = 1; 1787 mddev->reshape_backwards = 1;
1824 else 1788 else
1825 mddev->reshape_backwards = 0; 1789 mddev->reshape_backwards = 0;
@@ -1864,7 +1828,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1864 rs_set_new(rs); 1828 rs_set_new(rs);
1865 } 1829 }
1866 1830
1867 if (!_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) 1831 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
1868 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 1832 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
1869 1833
1870 /* 1834 /*
@@ -1902,7 +1866,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1902 if (new_devs == rs->raid_disks) { 1866 if (new_devs == rs->raid_disks) {
1903 DMINFO("Superblocks created for new raid set"); 1867 DMINFO("Superblocks created for new raid set");
1904 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 1868 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
1905 _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 1869 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
1906 mddev->recovery_cp = 0; 1870 mddev->recovery_cp = 0;
1907 } else if (new_devs && new_devs != rs->raid_disks && !rebuilds) { 1871 } else if (new_devs && new_devs != rs->raid_disks && !rebuilds) {
1908 DMERR("New device injected into existing raid set without " 1872 DMERR("New device injected into existing raid set without "
@@ -2065,7 +2029,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2065 * that the "sync" directive is disallowed during the 2029 * that the "sync" directive is disallowed during the
2066 * reshape. 2030 * reshape.
2067 */ 2031 */
2068 if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags)) 2032 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2069 continue; 2033 continue;
2070 2034
2071 if (!rdev->meta_bdev) 2035 if (!rdev->meta_bdev)
@@ -2342,7 +2306,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2342 return r; 2306 return r;
2343 2307
2344 /* Tell preresume to update superblocks with new layout */ 2308 /* Tell preresume to update superblocks with new layout */
2345 _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2309 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2346 rs_set_new(rs); 2310 rs_set_new(rs);
2347 } else 2311 } else
2348 rs_set_cur(rs); 2312 rs_set_cur(rs);
@@ -2553,7 +2517,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
2553 /* Access most recent mddev properties for status output */ 2517 /* Access most recent mddev properties for status output */
2554 smp_rmb(); 2518 smp_rmb();
2555 /* Get sensible max sectors even if raid set not yet started */ 2519 /* Get sensible max sectors even if raid set not yet started */
2556 resync_max_sectors = _test_flag(RT_FLAG_RS_PRERESUMED, rs->runtime_flags) ? 2520 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
2557 mddev->resync_max_sectors : mddev->dev_sectors; 2521 mddev->resync_max_sectors : mddev->dev_sectors;
2558 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); 2522 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
2559 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 2523 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
@@ -2624,29 +2588,29 @@ static void raid_status(struct dm_target *ti, status_type_t type,
2624 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; 2588 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
2625 /* Emit table line */ 2589 /* Emit table line */
2626 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); 2590 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
2627 if (_test_flag(CTR_FLAG_RAID10_FORMAT, rs->ctr_flags)) 2591 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
2628 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT), 2592 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
2629 raid10_md_layout_to_format(mddev->layout)); 2593 raid10_md_layout_to_format(mddev->layout));
2630 if (_test_flag(CTR_FLAG_RAID10_COPIES, rs->ctr_flags)) 2594 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
2631 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES), 2595 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
2632 raid10_md_layout_to_copies(mddev->layout)); 2596 raid10_md_layout_to_copies(mddev->layout));
2633 if (_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) 2597 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2634 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC)); 2598 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
2635 if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags)) 2599 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2636 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC)); 2600 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
2637 if (_test_flag(CTR_FLAG_REGION_SIZE, rs->ctr_flags)) 2601 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
2638 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE), 2602 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
2639 (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); 2603 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
2640 if (_test_flag(CTR_FLAG_DATA_OFFSET, rs->ctr_flags)) 2604 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
2641 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET), 2605 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
2642 (unsigned long long) rs->data_offset); 2606 (unsigned long long) rs->data_offset);
2643 if (_test_flag(CTR_FLAG_DAEMON_SLEEP, rs->ctr_flags)) 2607 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
2644 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP), 2608 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
2645 mddev->bitmap_info.daemon_sleep); 2609 mddev->bitmap_info.daemon_sleep);
2646 if (_test_flag(CTR_FLAG_DELTA_DISKS, rs->ctr_flags)) 2610 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
2647 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS), 2611 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
2648 mddev->delta_disks); 2612 mddev->delta_disks);
2649 if (_test_flag(CTR_FLAG_STRIPE_CACHE, rs->ctr_flags)) 2613 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
2650 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE), 2614 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
2651 max_nr_stripes); 2615 max_nr_stripes);
2652 rdev_for_each(rdev, mddev) 2616 rdev_for_each(rdev, mddev)
@@ -2657,13 +2621,13 @@ static void raid_status(struct dm_target *ti, status_type_t type,
2657 if (test_bit(WriteMostly, &rdev->flags)) 2621 if (test_bit(WriteMostly, &rdev->flags))
2658 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY), 2622 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
2659 rdev->raid_disk); 2623 rdev->raid_disk);
2660 if (_test_flag(CTR_FLAG_MAX_WRITE_BEHIND, rs->ctr_flags)) 2624 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
2661 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND), 2625 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
2662 mddev->bitmap_info.max_write_behind); 2626 mddev->bitmap_info.max_write_behind);
2663 if (_test_flag(CTR_FLAG_MAX_RECOVERY_RATE, rs->ctr_flags)) 2627 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
2664 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE), 2628 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
2665 mddev->sync_speed_max); 2629 mddev->sync_speed_max);
2666 if (_test_flag(CTR_FLAG_MIN_RECOVERY_RATE, rs->ctr_flags)) 2630 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
2667 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE), 2631 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
2668 mddev->sync_speed_min); 2632 mddev->sync_speed_min);
2669 DMEMIT(" %d", rs->raid_disks); 2633 DMEMIT(" %d", rs->raid_disks);
@@ -2835,7 +2799,7 @@ static int _bitmap_load(struct raid_set *rs)
2835 2799
2836 /* Try loading the bitmap unless "raid0", which does not have one */ 2800 /* Try loading the bitmap unless "raid0", which does not have one */
2837 if (!rs_is_raid0(rs) && 2801 if (!rs_is_raid0(rs) &&
2838 !_test_and_set_flag(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { 2802 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
2839 r = bitmap_load(&rs->md); 2803 r = bitmap_load(&rs->md);
2840 if (r) 2804 if (r)
2841 DMERR("Failed to load bitmap"); 2805 DMERR("Failed to load bitmap");
@@ -2850,7 +2814,7 @@ static int raid_preresume(struct dm_target *ti)
2850 struct mddev *mddev = &rs->md; 2814 struct mddev *mddev = &rs->md;
2851 2815
2852 /* This is a resume after a suspend of the set -> it's already started */ 2816 /* This is a resume after a suspend of the set -> it's already started */
2853 if (_test_and_set_flag(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) 2817 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
2854 return 0; 2818 return 0;
2855 2819
2856 /* 2820 /*
@@ -2865,7 +2829,7 @@ static int raid_preresume(struct dm_target *ti)
2865 * Have to switch to readwrite and back in order to 2829 * Have to switch to readwrite and back in order to
2866 * allow for the superblock updates. 2830 * allow for the superblock updates.
2867 */ 2831 */
2868 if (_test_and_clear_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) { 2832 if (test_and_clear_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) {
2869 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2833 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2870 mddev->ro = 0; 2834 mddev->ro = 0;
2871 md_update_sb(mddev, 1); 2835 md_update_sb(mddev, 1);
@@ -2887,7 +2851,7 @@ static void raid_resume(struct dm_target *ti)
2887 struct raid_set *rs = ti->private; 2851 struct raid_set *rs = ti->private;
2888 struct mddev *mddev = &rs->md; 2852 struct mddev *mddev = &rs->md;
2889 2853
2890 if (_test_and_set_flag(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { 2854 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
2891 /* 2855 /*
2892 * A secondary resume while the device is active. 2856 * A secondary resume while the device is active.
2893 * Take this opportunity to check whether any failed 2857 * Take this opportunity to check whether any failed