aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c621
1 files changed, 571 insertions, 50 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904fc8f6..a002dd85db1e 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,19 +8,19 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10#include "md.h" 10#include "md.h"
11#include "raid1.h"
11#include "raid5.h" 12#include "raid5.h"
12#include "dm.h"
13#include "bitmap.h" 13#include "bitmap.h"
14 14
15#include <linux/device-mapper.h>
16
15#define DM_MSG_PREFIX "raid" 17#define DM_MSG_PREFIX "raid"
16 18
17/* 19/*
18 * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then 20 * The following flags are used by dm-raid.c to set up the array state.
19 * make it so the flag doesn't set anything. 21 * They must be cleared before md_run is called.
20 */ 22 */
21#ifndef MD_SYNC_STATE_FORCED 23#define FirstUse 10 /* rdev flag */
22#define MD_SYNC_STATE_FORCED 0
23#endif
24 24
25struct raid_dev { 25struct raid_dev {
26 /* 26 /*
@@ -43,14 +43,15 @@ struct raid_dev {
43/* 43/*
44 * Flags for rs->print_flags field. 44 * Flags for rs->print_flags field.
45 */ 45 */
46#define DMPF_DAEMON_SLEEP 0x1 46#define DMPF_SYNC 0x1
47#define DMPF_MAX_WRITE_BEHIND 0x2 47#define DMPF_NOSYNC 0x2
48#define DMPF_SYNC 0x4 48#define DMPF_REBUILD 0x4
49#define DMPF_NOSYNC 0x8 49#define DMPF_DAEMON_SLEEP 0x8
50#define DMPF_STRIPE_CACHE 0x10 50#define DMPF_MIN_RECOVERY_RATE 0x10
51#define DMPF_MIN_RECOVERY_RATE 0x20 51#define DMPF_MAX_RECOVERY_RATE 0x20
52#define DMPF_MAX_RECOVERY_RATE 0x40 52#define DMPF_MAX_WRITE_BEHIND 0x40
53 53#define DMPF_STRIPE_CACHE 0x80
54#define DMPF_REGION_SIZE 0X100
54struct raid_set { 55struct raid_set {
55 struct dm_target *ti; 56 struct dm_target *ti;
56 57
@@ -72,6 +73,7 @@ static struct raid_type {
72 const unsigned level; /* RAID level. */ 73 const unsigned level; /* RAID level. */
73 const unsigned algorithm; /* RAID algorithm. */ 74 const unsigned algorithm; /* RAID algorithm. */
74} raid_types[] = { 75} raid_types[] = {
76 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
75 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, 77 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
76 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 78 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
77 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 79 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
105 } 107 }
106 108
107 sectors_per_dev = ti->len; 109 sectors_per_dev = ti->len;
108 if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { 110 if ((raid_type->level > 1) &&
111 sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
109 ti->error = "Target length not divisible by number of data devices"; 112 ti->error = "Target length not divisible by number of data devices";
110 return ERR_PTR(-EINVAL); 113 return ERR_PTR(-EINVAL);
111 } 114 }
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs)
147{ 150{
148 int i; 151 int i;
149 152
150 for (i = 0; i < rs->md.raid_disks; i++) 153 for (i = 0; i < rs->md.raid_disks; i++) {
154 if (rs->dev[i].meta_dev)
155 dm_put_device(rs->ti, rs->dev[i].meta_dev);
156 if (rs->dev[i].rdev.sb_page)
157 put_page(rs->dev[i].rdev.sb_page);
158 rs->dev[i].rdev.sb_page = NULL;
159 rs->dev[i].rdev.sb_loaded = 0;
151 if (rs->dev[i].data_dev) 160 if (rs->dev[i].data_dev)
152 dm_put_device(rs->ti, rs->dev[i].data_dev); 161 dm_put_device(rs->ti, rs->dev[i].data_dev);
162 }
153 163
154 kfree(rs); 164 kfree(rs);
155} 165}
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs)
159 * <meta_dev>: meta device name or '-' if missing 169 * <meta_dev>: meta device name or '-' if missing
160 * <data_dev>: data device name or '-' if missing 170 * <data_dev>: data device name or '-' if missing
161 * 171 *
162 * This code parses those words. 172 * The following are permitted:
173 * - -
174 * - <data_dev>
175 * <meta_dev> <data_dev>
176 *
177 * The following is not allowed:
178 * <meta_dev> -
179 *
180 * This code parses those words. If there is a failure,
181 * the caller must use context_free to unwind the operations.
163 */ 182 */
164static int dev_parms(struct raid_set *rs, char **argv) 183static int dev_parms(struct raid_set *rs, char **argv)
165{ 184{
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv)
182 rs->dev[i].rdev.mddev = &rs->md; 201 rs->dev[i].rdev.mddev = &rs->md;
183 202
184 if (strcmp(argv[0], "-")) { 203 if (strcmp(argv[0], "-")) {
185 rs->ti->error = "Metadata devices not supported"; 204 ret = dm_get_device(rs->ti, argv[0],
186 return -EINVAL; 205 dm_table_get_mode(rs->ti->table),
206 &rs->dev[i].meta_dev);
207 rs->ti->error = "RAID metadata device lookup failure";
208 if (ret)
209 return ret;
210
211 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
212 if (!rs->dev[i].rdev.sb_page)
213 return -ENOMEM;
187 } 214 }
188 215
189 if (!strcmp(argv[1], "-")) { 216 if (!strcmp(argv[1], "-")) {
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
193 return -EINVAL; 220 return -EINVAL;
194 } 221 }
195 222
223 rs->ti->error = "No data device supplied with metadata device";
224 if (rs->dev[i].meta_dev)
225 return -EINVAL;
226
196 continue; 227 continue;
197 } 228 }
198 229
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
204 return ret; 235 return ret;
205 } 236 }
206 237
238 if (rs->dev[i].meta_dev) {
239 metadata_available = 1;
240 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
241 }
207 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; 242 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
208 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); 243 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
209 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 244 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv)
235} 270}
236 271
237/* 272/*
273 * validate_region_size
274 * @rs
275 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
276 *
277 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
278 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
279 *
280 * Returns: 0 on success, -EINVAL on failure.
281 */
282static int validate_region_size(struct raid_set *rs, unsigned long region_size)
283{
284 unsigned long min_region_size = rs->ti->len / (1 << 21);
285
286 if (!region_size) {
287 /*
288 * Choose a reasonable default. All figures in sectors.
289 */
290 if (min_region_size > (1 << 13)) {
291 DMINFO("Choosing default region size of %lu sectors",
292 region_size);
293 region_size = min_region_size;
294 } else {
295 DMINFO("Choosing default region size of 4MiB");
296 region_size = 1 << 13; /* sectors */
297 }
298 } else {
299 /*
300 * Validate user-supplied value.
301 */
302 if (region_size > rs->ti->len) {
303 rs->ti->error = "Supplied region size is too large";
304 return -EINVAL;
305 }
306
307 if (region_size < min_region_size) {
308 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
309 region_size, min_region_size);
310 rs->ti->error = "Supplied region size is too small";
311 return -EINVAL;
312 }
313
314 if (!is_power_of_2(region_size)) {
315 rs->ti->error = "Region size is not a power of 2";
316 return -EINVAL;
317 }
318
319 if (region_size < rs->md.chunk_sectors) {
320 rs->ti->error = "Region size is smaller than the chunk size";
321 return -EINVAL;
322 }
323 }
324
325 /*
326 * Convert sectors to bytes.
327 */
328 rs->md.bitmap_info.chunksize = (region_size << 9);
329
330 return 0;
331}
332
333/*
238 * Possible arguments are... 334 * Possible arguments are...
239 * RAID456:
240 * <chunk_size> [optional_args] 335 * <chunk_size> [optional_args]
241 * 336 *
242 * Optional args: 337 * Argument definitions
243 * [[no]sync] Force or prevent recovery of the entire array 338 * <chunk_size> The number of sectors per disk that
339 * will form the "stripe"
340 * [[no]sync] Force or prevent recovery of the
341 * entire array
244 * [rebuild <idx>] Rebuild the drive indicated by the index 342 * [rebuild <idx>] Rebuild the drive indicated by the index
245 * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits 343 * [daemon_sleep <ms>] Time between bitmap daemon work to
344 * clear bits
246 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 345 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
247 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 346 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
347 * [write_mostly <idx>] Indicate a write mostly drive via index
248 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 348 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
249 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 349 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
350 * [region_size <sectors>] Defines granularity of bitmap
250 */ 351 */
251static int parse_raid_params(struct raid_set *rs, char **argv, 352static int parse_raid_params(struct raid_set *rs, char **argv,
252 unsigned num_raid_params) 353 unsigned num_raid_params)
253{ 354{
254 unsigned i, rebuild_cnt = 0; 355 unsigned i, rebuild_cnt = 0;
255 unsigned long value; 356 unsigned long value, region_size = 0;
256 char *key; 357 char *key;
257 358
258 /* 359 /*
259 * First, parse the in-order required arguments 360 * First, parse the in-order required arguments
361 * "chunk_size" is the only argument of this type.
260 */ 362 */
261 if ((strict_strtoul(argv[0], 10, &value) < 0) || 363 if ((strict_strtoul(argv[0], 10, &value) < 0)) {
262 !is_power_of_2(value) || (value < 8)) {
263 rs->ti->error = "Bad chunk size"; 364 rs->ti->error = "Bad chunk size";
264 return -EINVAL; 365 return -EINVAL;
366 } else if (rs->raid_type->level == 1) {
367 if (value)
368 DMERR("Ignoring chunk size parameter for RAID 1");
369 value = 0;
370 } else if (!is_power_of_2(value)) {
371 rs->ti->error = "Chunk size must be a power of 2";
372 return -EINVAL;
373 } else if (value < 8) {
374 rs->ti->error = "Chunk size value is too small";
375 return -EINVAL;
265 } 376 }
266 377
267 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 378 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
269 num_raid_params--; 380 num_raid_params--;
270 381
271 /* 382 /*
272 * Second, parse the unordered optional arguments 383 * We set each individual device as In_sync with a completed
384 * 'recovery_offset'. If there has been a device failure or
385 * replacement then one of the following cases applies:
386 *
387 * 1) User specifies 'rebuild'.
388 * - Device is reset when param is read.
389 * 2) A new device is supplied.
390 * - No matching superblock found, resets device.
391 * 3) Device failure was transient and returns on reload.
392 * - Failure noticed, resets device for bitmap replay.
393 * 4) Device hadn't completed recovery after previous failure.
394 * - Superblock is read and overrides recovery_offset.
395 *
396 * What is found in the superblocks of the devices is always
397 * authoritative, unless 'rebuild' or '[no]sync' was specified.
273 */ 398 */
274 for (i = 0; i < rs->md.raid_disks; i++) 399 for (i = 0; i < rs->md.raid_disks; i++) {
275 set_bit(In_sync, &rs->dev[i].rdev.flags); 400 set_bit(In_sync, &rs->dev[i].rdev.flags);
401 rs->dev[i].rdev.recovery_offset = MaxSector;
402 }
276 403
404 /*
405 * Second, parse the unordered optional arguments
406 */
277 for (i = 0; i < num_raid_params; i++) { 407 for (i = 0; i < num_raid_params; i++) {
278 if (!strcmp(argv[i], "nosync")) { 408 if (!strcasecmp(argv[i], "nosync")) {
279 rs->md.recovery_cp = MaxSector; 409 rs->md.recovery_cp = MaxSector;
280 rs->print_flags |= DMPF_NOSYNC; 410 rs->print_flags |= DMPF_NOSYNC;
281 rs->md.flags |= MD_SYNC_STATE_FORCED;
282 continue; 411 continue;
283 } 412 }
284 if (!strcmp(argv[i], "sync")) { 413 if (!strcasecmp(argv[i], "sync")) {
285 rs->md.recovery_cp = 0; 414 rs->md.recovery_cp = 0;
286 rs->print_flags |= DMPF_SYNC; 415 rs->print_flags |= DMPF_SYNC;
287 rs->md.flags |= MD_SYNC_STATE_FORCED;
288 continue; 416 continue;
289 } 417 }
290 418
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
300 return -EINVAL; 428 return -EINVAL;
301 } 429 }
302 430
303 if (!strcmp(key, "rebuild")) { 431 if (!strcasecmp(key, "rebuild")) {
304 if (++rebuild_cnt > rs->raid_type->parity_devs) { 432 rebuild_cnt++;
305 rs->ti->error = "Too many rebuild drives given"; 433 if (((rs->raid_type->level != 1) &&
434 (rebuild_cnt > rs->raid_type->parity_devs)) ||
435 ((rs->raid_type->level == 1) &&
436 (rebuild_cnt > (rs->md.raid_disks - 1)))) {
437 rs->ti->error = "Too many rebuild devices specified for given RAID type";
306 return -EINVAL; 438 return -EINVAL;
307 } 439 }
308 if (value > rs->md.raid_disks) { 440 if (value > rs->md.raid_disks) {
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
311 } 443 }
312 clear_bit(In_sync, &rs->dev[value].rdev.flags); 444 clear_bit(In_sync, &rs->dev[value].rdev.flags);
313 rs->dev[value].rdev.recovery_offset = 0; 445 rs->dev[value].rdev.recovery_offset = 0;
314 } else if (!strcmp(key, "max_write_behind")) { 446 rs->print_flags |= DMPF_REBUILD;
447 } else if (!strcasecmp(key, "write_mostly")) {
448 if (rs->raid_type->level != 1) {
449 rs->ti->error = "write_mostly option is only valid for RAID1";
450 return -EINVAL;
451 }
452 if (value > rs->md.raid_disks) {
453 rs->ti->error = "Invalid write_mostly drive index given";
454 return -EINVAL;
455 }
456 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
457 } else if (!strcasecmp(key, "max_write_behind")) {
458 if (rs->raid_type->level != 1) {
459 rs->ti->error = "max_write_behind option is only valid for RAID1";
460 return -EINVAL;
461 }
315 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; 462 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
316 463
317 /* 464 /*
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
324 return -EINVAL; 471 return -EINVAL;
325 } 472 }
326 rs->md.bitmap_info.max_write_behind = value; 473 rs->md.bitmap_info.max_write_behind = value;
327 } else if (!strcmp(key, "daemon_sleep")) { 474 } else if (!strcasecmp(key, "daemon_sleep")) {
328 rs->print_flags |= DMPF_DAEMON_SLEEP; 475 rs->print_flags |= DMPF_DAEMON_SLEEP;
329 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { 476 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
330 rs->ti->error = "daemon sleep period out of range"; 477 rs->ti->error = "daemon sleep period out of range";
331 return -EINVAL; 478 return -EINVAL;
332 } 479 }
333 rs->md.bitmap_info.daemon_sleep = value; 480 rs->md.bitmap_info.daemon_sleep = value;
334 } else if (!strcmp(key, "stripe_cache")) { 481 } else if (!strcasecmp(key, "stripe_cache")) {
335 rs->print_flags |= DMPF_STRIPE_CACHE; 482 rs->print_flags |= DMPF_STRIPE_CACHE;
336 483
337 /* 484 /*
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
348 rs->ti->error = "Bad stripe_cache size"; 495 rs->ti->error = "Bad stripe_cache size";
349 return -EINVAL; 496 return -EINVAL;
350 } 497 }
351 } else if (!strcmp(key, "min_recovery_rate")) { 498 } else if (!strcasecmp(key, "min_recovery_rate")) {
352 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; 499 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
353 if (value > INT_MAX) { 500 if (value > INT_MAX) {
354 rs->ti->error = "min_recovery_rate out of range"; 501 rs->ti->error = "min_recovery_rate out of range";
355 return -EINVAL; 502 return -EINVAL;
356 } 503 }
357 rs->md.sync_speed_min = (int)value; 504 rs->md.sync_speed_min = (int)value;
358 } else if (!strcmp(key, "max_recovery_rate")) { 505 } else if (!strcasecmp(key, "max_recovery_rate")) {
359 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; 506 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
360 if (value > INT_MAX) { 507 if (value > INT_MAX) {
361 rs->ti->error = "max_recovery_rate out of range"; 508 rs->ti->error = "max_recovery_rate out of range";
362 return -EINVAL; 509 return -EINVAL;
363 } 510 }
364 rs->md.sync_speed_max = (int)value; 511 rs->md.sync_speed_max = (int)value;
512 } else if (!strcasecmp(key, "region_size")) {
513 rs->print_flags |= DMPF_REGION_SIZE;
514 region_size = value;
365 } else { 515 } else {
366 DMERR("Unable to parse RAID parameter: %s", key); 516 DMERR("Unable to parse RAID parameter: %s", key);
367 rs->ti->error = "Unable to parse RAID parameters"; 517 rs->ti->error = "Unable to parse RAID parameters";
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
369 } 519 }
370 } 520 }
371 521
522 if (validate_region_size(rs, region_size))
523 return -EINVAL;
524
525 if (rs->md.chunk_sectors)
526 rs->ti->split_io = rs->md.chunk_sectors;
527 else
528 rs->ti->split_io = region_size;
529
530 if (rs->md.chunk_sectors)
531 rs->ti->split_io = rs->md.chunk_sectors;
532 else
533 rs->ti->split_io = region_size;
534
372 /* Assume there are no metadata devices until the drives are parsed */ 535 /* Assume there are no metadata devices until the drives are parsed */
373 rs->md.persistent = 0; 536 rs->md.persistent = 0;
374 rs->md.external = 1; 537 rs->md.external = 1;
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
387{ 550{
388 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 551 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
389 552
553 if (rs->raid_type->level == 1)
554 return md_raid1_congested(&rs->md, bits);
555
390 return md_raid5_congested(&rs->md, bits); 556 return md_raid5_congested(&rs->md, bits);
391} 557}
392 558
393/* 559/*
560 * This structure is never routinely used by userspace, unlike md superblocks.
561 * Devices with this superblock should only ever be accessed via device-mapper.
562 */
563#define DM_RAID_MAGIC 0x64526D44
564struct dm_raid_superblock {
565 __le32 magic; /* "DmRd" */
566 __le32 features; /* Used to indicate possible future changes */
567
568 __le32 num_devices; /* Number of devices in this array. (Max 64) */
569 __le32 array_position; /* The position of this drive in the array */
570
571 __le64 events; /* Incremented by md when superblock updated */
572 __le64 failed_devices; /* Bit field of devices to indicate failures */
573
574 /*
575 * This offset tracks the progress of the repair or replacement of
576 * an individual drive.
577 */
578 __le64 disk_recovery_offset;
579
580 /*
581 * This offset tracks the progress of the initial array
582 * synchronisation/parity calculation.
583 */
584 __le64 array_resync_offset;
585
586 /*
587 * RAID characteristics
588 */
589 __le32 level;
590 __le32 layout;
591 __le32 stripe_sectors;
592
593 __u8 pad[452]; /* Round struct to 512 bytes. */
594 /* Always set to 0 when writing. */
595} __packed;
596
597static int read_disk_sb(mdk_rdev_t *rdev, int size)
598{
599 BUG_ON(!rdev->sb_page);
600
601 if (rdev->sb_loaded)
602 return 0;
603
604 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
605 DMERR("Failed to read device superblock");
606 return -EINVAL;
607 }
608
609 rdev->sb_loaded = 1;
610
611 return 0;
612}
613
614static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
615{
616 mdk_rdev_t *r, *t;
617 uint64_t failed_devices;
618 struct dm_raid_superblock *sb;
619
620 sb = page_address(rdev->sb_page);
621 failed_devices = le64_to_cpu(sb->failed_devices);
622
623 rdev_for_each(r, t, mddev)
624 if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
625 failed_devices |= (1ULL << r->raid_disk);
626
627 memset(sb, 0, sizeof(*sb));
628
629 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
630 sb->features = cpu_to_le32(0); /* No features yet */
631
632 sb->num_devices = cpu_to_le32(mddev->raid_disks);
633 sb->array_position = cpu_to_le32(rdev->raid_disk);
634
635 sb->events = cpu_to_le64(mddev->events);
636 sb->failed_devices = cpu_to_le64(failed_devices);
637
638 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
639 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
640
641 sb->level = cpu_to_le32(mddev->level);
642 sb->layout = cpu_to_le32(mddev->layout);
643 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
644}
645
646/*
647 * super_load
648 *
649 * This function creates a superblock if one is not found on the device
650 * and will decide which superblock to use if there's a choice.
651 *
652 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
653 */
654static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
655{
656 int ret;
657 struct dm_raid_superblock *sb;
658 struct dm_raid_superblock *refsb;
659 uint64_t events_sb, events_refsb;
660
661 rdev->sb_start = 0;
662 rdev->sb_size = sizeof(*sb);
663
664 ret = read_disk_sb(rdev, rdev->sb_size);
665 if (ret)
666 return ret;
667
668 sb = page_address(rdev->sb_page);
669 if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
670 super_sync(rdev->mddev, rdev);
671
672 set_bit(FirstUse, &rdev->flags);
673
674 /* Force writing of superblocks to disk */
675 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
676
677 /* Any superblock is better than none, choose that if given */
678 return refdev ? 0 : 1;
679 }
680
681 if (!refdev)
682 return 1;
683
684 events_sb = le64_to_cpu(sb->events);
685
686 refsb = page_address(refdev->sb_page);
687 events_refsb = le64_to_cpu(refsb->events);
688
689 return (events_sb > events_refsb) ? 1 : 0;
690}
691
692static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
693{
694 int role;
695 struct raid_set *rs = container_of(mddev, struct raid_set, md);
696 uint64_t events_sb;
697 uint64_t failed_devices;
698 struct dm_raid_superblock *sb;
699 uint32_t new_devs = 0;
700 uint32_t rebuilds = 0;
701 mdk_rdev_t *r, *t;
702 struct dm_raid_superblock *sb2;
703
704 sb = page_address(rdev->sb_page);
705 events_sb = le64_to_cpu(sb->events);
706 failed_devices = le64_to_cpu(sb->failed_devices);
707
708 /*
709 * Initialise to 1 if this is a new superblock.
710 */
711 mddev->events = events_sb ? : 1;
712
713 /*
714 * Reshaping is not currently allowed
715 */
716 if ((le32_to_cpu(sb->level) != mddev->level) ||
717 (le32_to_cpu(sb->layout) != mddev->layout) ||
718 (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
719 DMERR("Reshaping arrays not yet supported.");
720 return -EINVAL;
721 }
722
723 /* We can only change the number of devices in RAID1 right now */
724 if ((rs->raid_type->level != 1) &&
725 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
726 DMERR("Reshaping arrays not yet supported.");
727 return -EINVAL;
728 }
729
730 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
731 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
732
733 /*
734 * During load, we set FirstUse if a new superblock was written.
735 * There are two reasons we might not have a superblock:
736 * 1) The array is brand new - in which case, all of the
737 * devices must have their In_sync bit set. Also,
738 * recovery_cp must be 0, unless forced.
739 * 2) This is a new device being added to an old array
740 * and the new device needs to be rebuilt - in which
741 * case the In_sync bit will /not/ be set and
742 * recovery_cp must be MaxSector.
743 */
744 rdev_for_each(r, t, mddev) {
745 if (!test_bit(In_sync, &r->flags)) {
746 if (!test_bit(FirstUse, &r->flags))
747 DMERR("Superblock area of "
748 "rebuild device %d should have been "
749 "cleared.", r->raid_disk);
750 set_bit(FirstUse, &r->flags);
751 rebuilds++;
752 } else if (test_bit(FirstUse, &r->flags))
753 new_devs++;
754 }
755
756 if (!rebuilds) {
757 if (new_devs == mddev->raid_disks) {
758 DMINFO("Superblocks created for new array");
759 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
760 } else if (new_devs) {
761 DMERR("New device injected "
762 "into existing array without 'rebuild' "
763 "parameter specified");
764 return -EINVAL;
765 }
766 } else if (new_devs) {
767 DMERR("'rebuild' devices cannot be "
768 "injected into an array with other first-time devices");
769 return -EINVAL;
770 } else if (mddev->recovery_cp != MaxSector) {
771 DMERR("'rebuild' specified while array is not in-sync");
772 return -EINVAL;
773 }
774
775 /*
776 * Now we set the Faulty bit for those devices that are
777 * recorded in the superblock as failed.
778 */
779 rdev_for_each(r, t, mddev) {
780 if (!r->sb_page)
781 continue;
782 sb2 = page_address(r->sb_page);
783 sb2->failed_devices = 0;
784
785 /*
786 * Check for any device re-ordering.
787 */
788 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
789 role = le32_to_cpu(sb2->array_position);
790 if (role != r->raid_disk) {
791 if (rs->raid_type->level != 1) {
792 rs->ti->error = "Cannot change device "
793 "positions in RAID array";
794 return -EINVAL;
795 }
796 DMINFO("RAID1 device #%d now at position #%d",
797 role, r->raid_disk);
798 }
799
800 /*
801 * Partial recovery is performed on
802 * returning failed devices.
803 */
804 if (failed_devices & (1 << role))
805 set_bit(Faulty, &r->flags);
806 }
807 }
808
809 return 0;
810}
811
812static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
813{
814 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
815
816 /*
817 * If mddev->events is not set, we know we have not yet initialized
818 * the array.
819 */
820 if (!mddev->events && super_init_validation(mddev, rdev))
821 return -EINVAL;
822
823 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
824 rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
825 if (!test_bit(FirstUse, &rdev->flags)) {
826 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
827 if (rdev->recovery_offset != MaxSector)
828 clear_bit(In_sync, &rdev->flags);
829 }
830
831 /*
832 * If a device comes back, set it as not In_sync and no longer faulty.
833 */
834 if (test_bit(Faulty, &rdev->flags)) {
835 clear_bit(Faulty, &rdev->flags);
836 clear_bit(In_sync, &rdev->flags);
837 rdev->saved_raid_disk = rdev->raid_disk;
838 rdev->recovery_offset = 0;
839 }
840
841 clear_bit(FirstUse, &rdev->flags);
842
843 return 0;
844}
845
846/*
847 * Analyse superblocks and select the freshest.
848 */
849static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
850{
851 int ret;
852 mdk_rdev_t *rdev, *freshest, *tmp;
853 mddev_t *mddev = &rs->md;
854
855 freshest = NULL;
856 rdev_for_each(rdev, tmp, mddev) {
857 if (!rdev->meta_bdev)
858 continue;
859
860 ret = super_load(rdev, freshest);
861
862 switch (ret) {
863 case 1:
864 freshest = rdev;
865 break;
866 case 0:
867 break;
868 default:
869 ti->error = "Failed to load superblock";
870 return ret;
871 }
872 }
873
874 if (!freshest)
875 return 0;
876
877 /*
878 * Validation of the freshest device provides the source of
879 * validation for the remaining devices.
880 */
881 ti->error = "Unable to assemble array: Invalid superblocks";
882 if (super_validate(mddev, freshest))
883 return -EINVAL;
884
885 rdev_for_each(rdev, tmp, mddev)
886 if ((rdev != freshest) && super_validate(mddev, rdev))
887 return -EINVAL;
888
889 return 0;
890}
891
892/*
394 * Construct a RAID4/5/6 mapping: 893 * Construct a RAID4/5/6 mapping:
395 * Args: 894 * Args:
396 * <raid_type> <#raid_params> <raid_params> \ 895 * <raid_type> <#raid_params> <raid_params> \
397 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } 896 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
398 * 897 *
399 * ** metadata devices are not supported yet, use '-' instead **
400 *
401 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for 898 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
402 * details on possible <raid_params>. 899 * details on possible <raid_params>.
403 */ 900 */
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
465 if (ret) 962 if (ret)
466 goto bad; 963 goto bad;
467 964
965 rs->md.sync_super = super_sync;
966 ret = analyse_superblocks(ti, rs);
967 if (ret)
968 goto bad;
969
468 INIT_WORK(&rs->md.event_work, do_table_event); 970 INIT_WORK(&rs->md.event_work, do_table_event);
469 ti->split_io = rs->md.chunk_sectors;
470 ti->private = rs; 971 ti->private = rs;
471 972
472 mutex_lock(&rs->md.reconfig_mutex); 973 mutex_lock(&rs->md.reconfig_mutex);
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
482 rs->callbacks.congested_fn = raid_is_congested; 983 rs->callbacks.congested_fn = raid_is_congested;
483 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 984 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
484 985
986 mddev_suspend(&rs->md);
485 return 0; 987 return 0;
486 988
487bad: 989bad:
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type,
546 break; 1048 break;
547 case STATUSTYPE_TABLE: 1049 case STATUSTYPE_TABLE:
548 /* The string you would use to construct this array */ 1050 /* The string you would use to construct this array */
549 for (i = 0; i < rs->md.raid_disks; i++) 1051 for (i = 0; i < rs->md.raid_disks; i++) {
550 if (rs->dev[i].data_dev && 1052 if ((rs->print_flags & DMPF_REBUILD) &&
1053 rs->dev[i].data_dev &&
551 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1054 !test_bit(In_sync, &rs->dev[i].rdev.flags))
552 raid_param_cnt++; /* for rebuilds */ 1055 raid_param_cnt += 2; /* for rebuilds */
1056 if (rs->dev[i].data_dev &&
1057 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1058 raid_param_cnt += 2;
1059 }
553 1060
554 raid_param_cnt += (hweight64(rs->print_flags) * 2); 1061 raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
555 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1062 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
556 raid_param_cnt--; 1063 raid_param_cnt--;
557 1064
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
565 DMEMIT(" nosync"); 1072 DMEMIT(" nosync");
566 1073
567 for (i = 0; i < rs->md.raid_disks; i++) 1074 for (i = 0; i < rs->md.raid_disks; i++)
568 if (rs->dev[i].data_dev && 1075 if ((rs->print_flags & DMPF_REBUILD) &&
1076 rs->dev[i].data_dev &&
569 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1077 !test_bit(In_sync, &rs->dev[i].rdev.flags))
570 DMEMIT(" rebuild %u", i); 1078 DMEMIT(" rebuild %u", i);
571 1079
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
579 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) 1087 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
580 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); 1088 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
581 1089
1090 for (i = 0; i < rs->md.raid_disks; i++)
1091 if (rs->dev[i].data_dev &&
1092 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1093 DMEMIT(" write_mostly %u", i);
1094
582 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) 1095 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
583 DMEMIT(" max_write_behind %lu", 1096 DMEMIT(" max_write_behind %lu",
584 rs->md.bitmap_info.max_write_behind); 1097 rs->md.bitmap_info.max_write_behind);
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type,
591 conf ? conf->max_nr_stripes * 2 : 0); 1104 conf ? conf->max_nr_stripes * 2 : 0);
592 } 1105 }
593 1106
1107 if (rs->print_flags & DMPF_REGION_SIZE)
1108 DMEMIT(" region_size %lu",
1109 rs->md.bitmap_info.chunksize >> 9);
1110
594 DMEMIT(" %d", rs->md.raid_disks); 1111 DMEMIT(" %d", rs->md.raid_disks);
595 for (i = 0; i < rs->md.raid_disks; i++) { 1112 for (i = 0; i < rs->md.raid_disks; i++) {
596 DMEMIT(" -"); /* metadata device */ 1113 if (rs->dev[i].meta_dev)
1114 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1115 else
1116 DMEMIT(" -");
597 1117
598 if (rs->dev[i].data_dev) 1118 if (rs->dev[i].data_dev)
599 DMEMIT(" %s", rs->dev[i].data_dev->name); 1119 DMEMIT(" %s", rs->dev[i].data_dev->name);
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti)
650{ 1170{
651 struct raid_set *rs = ti->private; 1171 struct raid_set *rs = ti->private;
652 1172
1173 bitmap_load(&rs->md);
653 mddev_resume(&rs->md); 1174 mddev_resume(&rs->md);
654} 1175}
655 1176
656static struct target_type raid_target = { 1177static struct target_type raid_target = {
657 .name = "raid", 1178 .name = "raid",
658 .version = {1, 0, 0}, 1179 .version = {1, 1, 0},
659 .module = THIS_MODULE, 1180 .module = THIS_MODULE,
660 .ctr = raid_ctr, 1181 .ctr = raid_ctr,
661 .dtr = raid_dtr, 1182 .dtr = raid_dtr,