aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorHeinz Mauelshagen <heinzm@redhat.com>2015-04-29 08:03:00 -0400
committerMike Snitzer <snitzer@redhat.com>2015-05-29 14:19:00 -0400
commitc76d53f43ec4f9b9f200f031d303f21bdf6927d0 (patch)
tree8b74391602ff250e135a3a0e5b2b7c25a58f8526 /drivers/md/dm-raid.c
parent0f4106b32f36165a4f40b6aad0372e02ff14cf34 (diff)
dm raid: a few cleanups
- ensure maximum device limit in superblock - rename DMPF_* (print flags) to CTR_FLAG_* (constructor flags) and their respective struct raid_set member - use strcasecmp() in raid10_format_to_md_layout() as in the constructor Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com> Reviewed-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c91
1 files changed, 46 insertions, 45 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 423e42e9a1ad..af49ddebaa62 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -17,6 +17,7 @@
17#include <linux/device-mapper.h> 17#include <linux/device-mapper.h>
18 18
19#define DM_MSG_PREFIX "raid" 19#define DM_MSG_PREFIX "raid"
20#define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */
20 21
21static bool devices_handle_discard_safely = false; 22static bool devices_handle_discard_safely = false;
22 23
@@ -45,25 +46,25 @@ struct raid_dev {
45}; 46};
46 47
47/* 48/*
48 * Flags for rs->print_flags field. 49 * Flags for rs->ctr_flags field.
49 */ 50 */
50#define DMPF_SYNC 0x1 51#define CTR_FLAG_SYNC 0x1
51#define DMPF_NOSYNC 0x2 52#define CTR_FLAG_NOSYNC 0x2
52#define DMPF_REBUILD 0x4 53#define CTR_FLAG_REBUILD 0x4
53#define DMPF_DAEMON_SLEEP 0x8 54#define CTR_FLAG_DAEMON_SLEEP 0x8
54#define DMPF_MIN_RECOVERY_RATE 0x10 55#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
55#define DMPF_MAX_RECOVERY_RATE 0x20 56#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
56#define DMPF_MAX_WRITE_BEHIND 0x40 57#define CTR_FLAG_MAX_WRITE_BEHIND 0x40
57#define DMPF_STRIPE_CACHE 0x80 58#define CTR_FLAG_STRIPE_CACHE 0x80
58#define DMPF_REGION_SIZE 0x100 59#define CTR_FLAG_REGION_SIZE 0x100
59#define DMPF_RAID10_COPIES 0x200 60#define CTR_FLAG_RAID10_COPIES 0x200
60#define DMPF_RAID10_FORMAT 0x400 61#define CTR_FLAG_RAID10_FORMAT 0x400
61 62
62struct raid_set { 63struct raid_set {
63 struct dm_target *ti; 64 struct dm_target *ti;
64 65
65 uint32_t bitmap_loaded; 66 uint32_t bitmap_loaded;
66 uint32_t print_flags; 67 uint32_t ctr_flags;
67 68
68 struct mddev md; 69 struct mddev md;
69 struct raid_type *raid_type; 70 struct raid_type *raid_type;
@@ -119,15 +120,15 @@ static int raid10_format_to_md_layout(char *format, unsigned copies)
119{ 120{
120 unsigned n = 1, f = 1; 121 unsigned n = 1, f = 1;
121 122
122 if (!strcmp("near", format)) 123 if (!strcasecmp("near", format))
123 n = copies; 124 n = copies;
124 else 125 else
125 f = copies; 126 f = copies;
126 127
127 if (!strcmp("offset", format)) 128 if (!strcasecmp("offset", format))
128 return 0x30000 | (f << 8) | n; 129 return 0x30000 | (f << 8) | n;
129 130
130 if (!strcmp("far", format)) 131 if (!strcasecmp("far", format))
131 return 0x20000 | (f << 8) | n; 132 return 0x20000 | (f << 8) | n;
132 133
133 return (f << 8) | n; 134 return (f << 8) | n;
@@ -553,12 +554,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
553 for (i = 0; i < num_raid_params; i++) { 554 for (i = 0; i < num_raid_params; i++) {
554 if (!strcasecmp(argv[i], "nosync")) { 555 if (!strcasecmp(argv[i], "nosync")) {
555 rs->md.recovery_cp = MaxSector; 556 rs->md.recovery_cp = MaxSector;
556 rs->print_flags |= DMPF_NOSYNC; 557 rs->ctr_flags |= CTR_FLAG_NOSYNC;
557 continue; 558 continue;
558 } 559 }
559 if (!strcasecmp(argv[i], "sync")) { 560 if (!strcasecmp(argv[i], "sync")) {
560 rs->md.recovery_cp = 0; 561 rs->md.recovery_cp = 0;
561 rs->print_flags |= DMPF_SYNC; 562 rs->ctr_flags |= CTR_FLAG_SYNC;
562 continue; 563 continue;
563 } 564 }
564 565
@@ -583,7 +584,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
583 return -EINVAL; 584 return -EINVAL;
584 } 585 }
585 raid10_format = argv[i]; 586 raid10_format = argv[i];
586 rs->print_flags |= DMPF_RAID10_FORMAT; 587 rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
587 continue; 588 continue;
588 } 589 }
589 590
@@ -600,7 +601,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
600 } 601 }
601 clear_bit(In_sync, &rs->dev[value].rdev.flags); 602 clear_bit(In_sync, &rs->dev[value].rdev.flags);
602 rs->dev[value].rdev.recovery_offset = 0; 603 rs->dev[value].rdev.recovery_offset = 0;
603 rs->print_flags |= DMPF_REBUILD; 604 rs->ctr_flags |= CTR_FLAG_REBUILD;
604 } else if (!strcasecmp(key, "write_mostly")) { 605 } else if (!strcasecmp(key, "write_mostly")) {
605 if (rs->raid_type->level != 1) { 606 if (rs->raid_type->level != 1) {
606 rs->ti->error = "write_mostly option is only valid for RAID1"; 607 rs->ti->error = "write_mostly option is only valid for RAID1";
@@ -616,7 +617,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
616 rs->ti->error = "max_write_behind option is only valid for RAID1"; 617 rs->ti->error = "max_write_behind option is only valid for RAID1";
617 return -EINVAL; 618 return -EINVAL;
618 } 619 }
619 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; 620 rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
620 621
621 /* 622 /*
622 * In device-mapper, we specify things in sectors, but 623 * In device-mapper, we specify things in sectors, but
@@ -629,14 +630,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
629 } 630 }
630 rs->md.bitmap_info.max_write_behind = value; 631 rs->md.bitmap_info.max_write_behind = value;
631 } else if (!strcasecmp(key, "daemon_sleep")) { 632 } else if (!strcasecmp(key, "daemon_sleep")) {
632 rs->print_flags |= DMPF_DAEMON_SLEEP; 633 rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
633 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { 634 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
634 rs->ti->error = "daemon sleep period out of range"; 635 rs->ti->error = "daemon sleep period out of range";
635 return -EINVAL; 636 return -EINVAL;
636 } 637 }
637 rs->md.bitmap_info.daemon_sleep = value; 638 rs->md.bitmap_info.daemon_sleep = value;
638 } else if (!strcasecmp(key, "stripe_cache")) { 639 } else if (!strcasecmp(key, "stripe_cache")) {
639 rs->print_flags |= DMPF_STRIPE_CACHE; 640 rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
640 641
641 /* 642 /*
642 * In device-mapper, we specify things in sectors, but 643 * In device-mapper, we specify things in sectors, but
@@ -654,21 +655,21 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
654 return -EINVAL; 655 return -EINVAL;
655 } 656 }
656 } else if (!strcasecmp(key, "min_recovery_rate")) { 657 } else if (!strcasecmp(key, "min_recovery_rate")) {
657 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; 658 rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
658 if (value > INT_MAX) { 659 if (value > INT_MAX) {
659 rs->ti->error = "min_recovery_rate out of range"; 660 rs->ti->error = "min_recovery_rate out of range";
660 return -EINVAL; 661 return -EINVAL;
661 } 662 }
662 rs->md.sync_speed_min = (int)value; 663 rs->md.sync_speed_min = (int)value;
663 } else if (!strcasecmp(key, "max_recovery_rate")) { 664 } else if (!strcasecmp(key, "max_recovery_rate")) {
664 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; 665 rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
665 if (value > INT_MAX) { 666 if (value > INT_MAX) {
666 rs->ti->error = "max_recovery_rate out of range"; 667 rs->ti->error = "max_recovery_rate out of range";
667 return -EINVAL; 668 return -EINVAL;
668 } 669 }
669 rs->md.sync_speed_max = (int)value; 670 rs->md.sync_speed_max = (int)value;
670 } else if (!strcasecmp(key, "region_size")) { 671 } else if (!strcasecmp(key, "region_size")) {
671 rs->print_flags |= DMPF_REGION_SIZE; 672 rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
672 region_size = value; 673 region_size = value;
673 } else if (!strcasecmp(key, "raid10_copies") && 674 } else if (!strcasecmp(key, "raid10_copies") &&
674 (rs->raid_type->level == 10)) { 675 (rs->raid_type->level == 10)) {
@@ -676,7 +677,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
676 rs->ti->error = "Bad value for 'raid10_copies'"; 677 rs->ti->error = "Bad value for 'raid10_copies'";
677 return -EINVAL; 678 return -EINVAL;
678 } 679 }
679 rs->print_flags |= DMPF_RAID10_COPIES; 680 rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
680 raid10_copies = value; 681 raid10_copies = value;
681 } else { 682 } else {
682 DMERR("Unable to parse RAID parameter: %s", key); 683 DMERR("Unable to parse RAID parameter: %s", key);
@@ -945,7 +946,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
945 return -EINVAL; 946 return -EINVAL;
946 } 947 }
947 948
948 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) 949 if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
949 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 950 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
950 951
951 /* 952 /*
@@ -1071,7 +1072,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1071 freshest = NULL; 1072 freshest = NULL;
1072 rdev_for_each_safe(rdev, tmp, mddev) { 1073 rdev_for_each_safe(rdev, tmp, mddev) {
1073 /* 1074 /*
1074 * Skipping super_load due to DMPF_SYNC will cause 1075 * Skipping super_load due to CTR_FLAG_SYNC will cause
1075 * the array to undergo initialization again as 1076 * the array to undergo initialization again as
1076 * though it were new. This is the intended effect 1077 * though it were new. This is the intended effect
1077 * of the "sync" directive. 1078 * of the "sync" directive.
@@ -1080,7 +1081,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1080 * that the "sync" directive is disallowed during the 1081 * that the "sync" directive is disallowed during the
1081 * reshape. 1082 * reshape.
1082 */ 1083 */
1083 if (rs->print_flags & DMPF_SYNC) 1084 if (rs->ctr_flags & CTR_FLAG_SYNC)
1084 continue; 1085 continue;
1085 1086
1086 if (!rdev->meta_bdev) 1087 if (!rdev->meta_bdev)
@@ -1241,7 +1242,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1241 } 1242 }
1242 1243
1243 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) || 1244 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1244 (num_raid_devs >= INT_MAX)) { 1245 (num_raid_devs > MAX_RAID_DEVICES)) {
1245 ti->error = "Cannot understand number of raid devices"; 1246 ti->error = "Cannot understand number of raid devices";
1246 return -EINVAL; 1247 return -EINVAL;
1247 } 1248 }
@@ -1444,7 +1445,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
1444 case STATUSTYPE_TABLE: 1445 case STATUSTYPE_TABLE:
1445 /* The string you would use to construct this array */ 1446 /* The string you would use to construct this array */
1446 for (i = 0; i < rs->md.raid_disks; i++) { 1447 for (i = 0; i < rs->md.raid_disks; i++) {
1447 if ((rs->print_flags & DMPF_REBUILD) && 1448 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1448 rs->dev[i].data_dev && 1449 rs->dev[i].data_dev &&
1449 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1450 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1450 raid_param_cnt += 2; /* for rebuilds */ 1451 raid_param_cnt += 2; /* for rebuilds */
@@ -1453,33 +1454,33 @@ static void raid_status(struct dm_target *ti, status_type_t type,
1453 raid_param_cnt += 2; 1454 raid_param_cnt += 2;
1454 } 1455 }
1455 1456
1456 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2); 1457 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1457 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1458 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1458 raid_param_cnt--; 1459 raid_param_cnt--;
1459 1460
1460 DMEMIT("%s %u %u", rs->raid_type->name, 1461 DMEMIT("%s %u %u", rs->raid_type->name,
1461 raid_param_cnt, rs->md.chunk_sectors); 1462 raid_param_cnt, rs->md.chunk_sectors);
1462 1463
1463 if ((rs->print_flags & DMPF_SYNC) && 1464 if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
1464 (rs->md.recovery_cp == MaxSector)) 1465 (rs->md.recovery_cp == MaxSector))
1465 DMEMIT(" sync"); 1466 DMEMIT(" sync");
1466 if (rs->print_flags & DMPF_NOSYNC) 1467 if (rs->ctr_flags & CTR_FLAG_NOSYNC)
1467 DMEMIT(" nosync"); 1468 DMEMIT(" nosync");
1468 1469
1469 for (i = 0; i < rs->md.raid_disks; i++) 1470 for (i = 0; i < rs->md.raid_disks; i++)
1470 if ((rs->print_flags & DMPF_REBUILD) && 1471 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1471 rs->dev[i].data_dev && 1472 rs->dev[i].data_dev &&
1472 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1473 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1473 DMEMIT(" rebuild %u", i); 1474 DMEMIT(" rebuild %u", i);
1474 1475
1475 if (rs->print_flags & DMPF_DAEMON_SLEEP) 1476 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
1476 DMEMIT(" daemon_sleep %lu", 1477 DMEMIT(" daemon_sleep %lu",
1477 rs->md.bitmap_info.daemon_sleep); 1478 rs->md.bitmap_info.daemon_sleep);
1478 1479
1479 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE) 1480 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
1480 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); 1481 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1481 1482
1482 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) 1483 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
1483 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); 1484 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1484 1485
1485 for (i = 0; i < rs->md.raid_disks; i++) 1486 for (i = 0; i < rs->md.raid_disks; i++)
@@ -1487,11 +1488,11 @@ static void raid_status(struct dm_target *ti, status_type_t type,
1487 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 1488 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1488 DMEMIT(" write_mostly %u", i); 1489 DMEMIT(" write_mostly %u", i);
1489 1490
1490 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) 1491 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
1491 DMEMIT(" max_write_behind %lu", 1492 DMEMIT(" max_write_behind %lu",
1492 rs->md.bitmap_info.max_write_behind); 1493 rs->md.bitmap_info.max_write_behind);
1493 1494
1494 if (rs->print_flags & DMPF_STRIPE_CACHE) { 1495 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
1495 struct r5conf *conf = rs->md.private; 1496 struct r5conf *conf = rs->md.private;
1496 1497
1497 /* convert from kiB to sectors */ 1498 /* convert from kiB to sectors */
@@ -1499,15 +1500,15 @@ static void raid_status(struct dm_target *ti, status_type_t type,
1499 conf ? conf->max_nr_stripes * 2 : 0); 1500 conf ? conf->max_nr_stripes * 2 : 0);
1500 } 1501 }
1501 1502
1502 if (rs->print_flags & DMPF_REGION_SIZE) 1503 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
1503 DMEMIT(" region_size %lu", 1504 DMEMIT(" region_size %lu",
1504 rs->md.bitmap_info.chunksize >> 9); 1505 rs->md.bitmap_info.chunksize >> 9);
1505 1506
1506 if (rs->print_flags & DMPF_RAID10_COPIES) 1507 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
1507 DMEMIT(" raid10_copies %u", 1508 DMEMIT(" raid10_copies %u",
1508 raid10_md_layout_to_copies(rs->md.layout)); 1509 raid10_md_layout_to_copies(rs->md.layout));
1509 1510
1510 if (rs->print_flags & DMPF_RAID10_FORMAT) 1511 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
1511 DMEMIT(" raid10_format %s", 1512 DMEMIT(" raid10_format %s",
1512 raid10_md_layout_to_format(rs->md.layout)); 1513 raid10_md_layout_to_format(rs->md.layout));
1513 1514