aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorHeinz Mauelshagen <heinzm@redhat.com>2016-06-23 18:21:09 -0400
committerMike Snitzer <snitzer@redhat.com>2016-07-18 15:37:23 -0400
commit4dff2f1e26f2621dc5b02436cb889df15400036b (patch)
tree2590ed9dcfa71751694608c19599a87e6ef8e853 /drivers/md/dm-raid.c
parent0095dbc98bfdcd5a3b6cda6d2dde70ae5ffefec7 (diff)
dm raid: clarify and fix recovery
Add function rs_setup_recovery() to allow for defined setup of RAID set recovery in the constructor. Will be called with dev_sectors={0, rdev->sectors, MaxSectors} to recover a new or enforced sync, grown or not to be synhronized RAID set respectively. Prevents recovery on raid0, which doesn't support it. Enforces recovery on raid6 to ensure properly defined Syndromes mandatory for that MD personality are being created. Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c64
1 files changed, 55 insertions, 9 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 63883f4c550d..7e334b65b1c3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -349,6 +349,12 @@ static bool rs_is_raid10(struct raid_set *rs)
349 return rs->md.level == 10; 349 return rs->md.level == 10;
350} 350}
351 351
352/* Return true, if raid set in @rs is level 6 */
353static bool rs_is_raid6(struct raid_set *rs)
354{
355 return rs->md.level == 6;
356}
357
352/* Return true, if raid set in @rs is level 4, 5 or 6 */ 358/* Return true, if raid set in @rs is level 4, 5 or 6 */
353static bool rs_is_raid456(struct raid_set *rs) 359static bool rs_is_raid456(struct raid_set *rs)
354{ 360{
@@ -681,7 +687,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
681 rs->md.layout = raid_type->algorithm; 687 rs->md.layout = raid_type->algorithm;
682 rs->md.new_layout = rs->md.layout; 688 rs->md.new_layout = rs->md.layout;
683 rs->md.delta_disks = 0; 689 rs->md.delta_disks = 0;
684 rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0; 690 rs->md.recovery_cp = MaxSector;
685 691
686 for (i = 0; i < raid_devs; i++) 692 for (i = 0; i < raid_devs; i++)
687 md_rdev_init(&rs->dev[i].rdev); 693 md_rdev_init(&rs->dev[i].rdev);
@@ -1090,7 +1096,6 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1090 rs->ti->error = "Only one 'nosync' argument allowed"; 1096 rs->ti->error = "Only one 'nosync' argument allowed";
1091 return -EINVAL; 1097 return -EINVAL;
1092 } 1098 }
1093 rs->md.recovery_cp = MaxSector;
1094 continue; 1099 continue;
1095 } 1100 }
1096 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) { 1101 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
@@ -1098,7 +1103,6 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1098 rs->ti->error = "Only one 'sync' argument allowed"; 1103 rs->ti->error = "Only one 'sync' argument allowed";
1099 return -EINVAL; 1104 return -EINVAL;
1100 } 1105 }
1101 rs->md.recovery_cp = 0;
1102 continue; 1106 continue;
1103 } 1107 }
1104 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { 1108 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
@@ -1412,7 +1416,6 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1412 struct mddev *mddev = &rs->md; 1416 struct mddev *mddev = &rs->md;
1413 struct md_rdev *rdev; 1417 struct md_rdev *rdev;
1414 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; 1418 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1415 sector_t cur_dev_sectors = rs->dev[0].rdev.sectors;
1416 1419
1417 if (use_mddev) { 1420 if (use_mddev) {
1418 delta_disks = mddev->delta_disks; 1421 delta_disks = mddev->delta_disks;
@@ -1453,15 +1456,50 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1453 mddev->array_sectors = array_sectors; 1456 mddev->array_sectors = array_sectors;
1454 mddev->dev_sectors = dev_sectors; 1457 mddev->dev_sectors = dev_sectors;
1455 1458
1456 if (!rs_is_raid0(rs) && dev_sectors > cur_dev_sectors)
1457 mddev->recovery_cp = dev_sectors;
1458
1459 return 0; 1459 return 0;
1460bad: 1460bad:
1461 rs->ti->error = "Target length not divisible by number of data devices"; 1461 rs->ti->error = "Target length not divisible by number of data devices";
1462 return EINVAL; 1462 return EINVAL;
1463} 1463}
1464 1464
1465/* Setup recovery on @rs */
1466static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1467{
1468 /* raid0 does not recover */
1469 if (rs_is_raid0(rs))
1470 rs->md.recovery_cp = MaxSector;
1471 /*
1472 * A raid6 set has to be recovered either
1473 * completely or for the grown part to
1474 * ensure proper parity and Q-Syndrome
1475 */
1476 else if (rs_is_raid6(rs))
1477 rs->md.recovery_cp = dev_sectors;
1478 /*
1479 * Other raid set types may skip recovery
1480 * depending on the 'nosync' flag.
1481 */
1482 else
1483 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1484 ? MaxSector : dev_sectors;
1485}
1486
1487/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
1488static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1489{
1490 if (!dev_sectors)
1491 /* New raid set or 'sync' flag provided */
1492 __rs_setup_recovery(rs, 0);
1493 else if (dev_sectors == MaxSector)
1494 /* Prevent recovery */
1495 __rs_setup_recovery(rs, MaxSector);
1496 else if (rs->dev[0].rdev.sectors < dev_sectors)
1497 /* Grown raid set */
1498 __rs_setup_recovery(rs, rs->dev[0].rdev.sectors);
1499 else
1500 __rs_setup_recovery(rs, MaxSector);
1501}
1502
1465static void do_table_event(struct work_struct *ws) 1503static void do_table_event(struct work_struct *ws)
1466{ 1504{
1467 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); 1505 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
@@ -2086,7 +2124,6 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2086 if (new_devs == rs->raid_disks) { 2124 if (new_devs == rs->raid_disks) {
2087 DMINFO("Superblocks created for new raid set"); 2125 DMINFO("Superblocks created for new raid set");
2088 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2126 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2089 mddev->recovery_cp = 0;
2090 } else if (new_devs != rebuilds && 2127 } else if (new_devs != rebuilds &&
2091 new_devs != rs->delta_disks) { 2128 new_devs != rs->delta_disks) {
2092 DMERR("New device injected into existing raid set without " 2129 DMERR("New device injected into existing raid set without "
@@ -2633,6 +2670,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2633 int r; 2670 int r;
2634 struct raid_type *rt; 2671 struct raid_type *rt;
2635 unsigned num_raid_params, num_raid_devs; 2672 unsigned num_raid_params, num_raid_devs;
2673 sector_t calculated_dev_sectors;
2636 struct raid_set *rs = NULL; 2674 struct raid_set *rs = NULL;
2637 const char *arg; 2675 const char *arg;
2638 struct rs_layout rs_layout; 2676 struct rs_layout rs_layout;
@@ -2689,6 +2727,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2689 if (r) 2727 if (r)
2690 return r; 2728 return r;
2691 2729
2730 calculated_dev_sectors = rs->dev[0].rdev.sectors;
2731
2692 /* 2732 /*
2693 * Backup any new raid set level, layout, ... 2733 * Backup any new raid set level, layout, ...
2694 * requested to be able to compare to superblock 2734 * requested to be able to compare to superblock
@@ -2700,6 +2740,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2700 if (r) 2740 if (r)
2701 goto bad; 2741 goto bad;
2702 2742
2743 rs_setup_recovery(rs, calculated_dev_sectors);
2744
2703 INIT_WORK(&rs->md.event_work, do_table_event); 2745 INIT_WORK(&rs->md.event_work, do_table_event);
2704 ti->private = rs; 2746 ti->private = rs;
2705 ti->num_flush_bios = 1; 2747 ti->num_flush_bios = 1;
@@ -2786,8 +2828,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2786 set_bit(MD_ARRAY_FIRST_USE, &rs->md.flags); 2828 set_bit(MD_ARRAY_FIRST_USE, &rs->md.flags);
2787 2829
2788 rs_set_cur(rs); 2830 rs_set_cur(rs);
2789 } else 2831 rs_setup_recovery(rs, MaxSector);
2832 } else {
2790 rs_set_cur(rs); 2833 rs_set_cur(rs);
2834 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
2835 0 : calculated_dev_sectors);
2836 }
2791 2837
2792 /* If constructor requested it, change data and new_data offsets */ 2838 /* If constructor requested it, change data and new_data offsets */
2793 r = rs_adjust_data_offsets(rs); 2839 r = rs_adjust_data_offsets(rs);