aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorJonathan Brassow <jbrassow@redhat.com>2011-08-02 07:32:07 -0400
committerAlasdair G Kergon <agk@redhat.com>2011-08-02 07:32:07 -0400
commit327372797c88b24953f454cd51a3734c02697bdd (patch)
treee7f7e3dff1415817b31af76d746240d80772b448 /drivers/md/dm-raid.c
parentb12d437b73d32203a41fde0d407e91812c866844 (diff)
dm raid: add md raid1 support
Support the MD RAID1 personality through dm-raid. Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c49
1 files changed, 39 insertions, 10 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 69873806c50c..a002dd85db1e 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10#include "md.h" 10#include "md.h"
11#include "raid1.h"
11#include "raid5.h" 12#include "raid5.h"
12#include "bitmap.h" 13#include "bitmap.h"
13 14
@@ -72,6 +73,7 @@ static struct raid_type {
72 const unsigned level; /* RAID level. */ 73 const unsigned level; /* RAID level. */
73 const unsigned algorithm; /* RAID algorithm. */ 74 const unsigned algorithm; /* RAID algorithm. */
74} raid_types[] = { 75} raid_types[] = {
76 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
75 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, 77 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
76 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 78 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
77 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 79 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
105 } 107 }
106 108
107 sectors_per_dev = ti->len; 109 sectors_per_dev = ti->len;
108 if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { 110 if ((raid_type->level > 1) &&
111 sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
109 ti->error = "Target length not divisible by number of data devices"; 112 ti->error = "Target length not divisible by number of data devices";
110 return ERR_PTR(-EINVAL); 113 return ERR_PTR(-EINVAL);
111 } 114 }
@@ -329,13 +332,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
329 332
330/* 333/*
331 * Possible arguments are... 334 * Possible arguments are...
332 * RAID456:
333 * <chunk_size> [optional_args] 335 * <chunk_size> [optional_args]
334 * 336 *
335 * Optional args: 337 * Argument definitions
336 * [[no]sync] Force or prevent recovery of the entire array 338 * <chunk_size> The number of sectors per disk that
339 * will form the "stripe"
340 * [[no]sync] Force or prevent recovery of the
341 * entire array
337 * [rebuild <idx>] Rebuild the drive indicated by the index 342 * [rebuild <idx>] Rebuild the drive indicated by the index
338 * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits 343 * [daemon_sleep <ms>] Time between bitmap daemon work to
344 * clear bits
339 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 345 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
340 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 346 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
341 * [write_mostly <idx>] Indicate a write mostly drive via index 347 * [write_mostly <idx>] Indicate a write mostly drive via index
@@ -352,11 +358,21 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
352 358
353 /* 359 /*
354 * First, parse the in-order required arguments 360 * First, parse the in-order required arguments
361 * "chunk_size" is the only argument of this type.
355 */ 362 */
356 if ((strict_strtoul(argv[0], 10, &value) < 0) || 363 if ((strict_strtoul(argv[0], 10, &value) < 0)) {
357 !is_power_of_2(value) || (value < 8)) {
358 rs->ti->error = "Bad chunk size"; 364 rs->ti->error = "Bad chunk size";
359 return -EINVAL; 365 return -EINVAL;
366 } else if (rs->raid_type->level == 1) {
367 if (value)
368 DMERR("Ignoring chunk size parameter for RAID 1");
369 value = 0;
370 } else if (!is_power_of_2(value)) {
371 rs->ti->error = "Chunk size must be a power of 2";
372 return -EINVAL;
373 } else if (value < 8) {
374 rs->ti->error = "Chunk size value is too small";
375 return -EINVAL;
360 } 376 }
361 377
362 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 378 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -413,8 +429,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
413 } 429 }
414 430
415 if (!strcasecmp(key, "rebuild")) { 431 if (!strcasecmp(key, "rebuild")) {
416 if (++rebuild_cnt > rs->raid_type->parity_devs) { 432 rebuild_cnt++;
417 rs->ti->error = "Too many rebuild drives given"; 433 if (((rs->raid_type->level != 1) &&
434 (rebuild_cnt > rs->raid_type->parity_devs)) ||
435 ((rs->raid_type->level == 1) &&
436 (rebuild_cnt > (rs->md.raid_disks - 1)))) {
437 rs->ti->error = "Too many rebuild devices specified for given RAID type";
418 return -EINVAL; 438 return -EINVAL;
419 } 439 }
420 if (value > rs->md.raid_disks) { 440 if (value > rs->md.raid_disks) {
@@ -507,6 +527,11 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
507 else 527 else
508 rs->ti->split_io = region_size; 528 rs->ti->split_io = region_size;
509 529
530 if (rs->md.chunk_sectors)
531 rs->ti->split_io = rs->md.chunk_sectors;
532 else
533 rs->ti->split_io = region_size;
534
510 /* Assume there are no metadata devices until the drives are parsed */ 535 /* Assume there are no metadata devices until the drives are parsed */
511 rs->md.persistent = 0; 536 rs->md.persistent = 0;
512 rs->md.external = 1; 537 rs->md.external = 1;
@@ -525,6 +550,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
525{ 550{
526 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 551 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
527 552
553 if (rs->raid_type->level == 1)
554 return md_raid1_congested(&rs->md, bits);
555
528 return md_raid5_congested(&rs->md, bits); 556 return md_raid5_congested(&rs->md, bits);
529} 557}
530 558
@@ -955,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
955 rs->callbacks.congested_fn = raid_is_congested; 983 rs->callbacks.congested_fn = raid_is_congested;
956 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 984 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
957 985
986 mddev_suspend(&rs->md);
958 return 0; 987 return 0;
959 988
960bad: 989bad:
@@ -1147,7 +1176,7 @@ static void raid_resume(struct dm_target *ti)
1147 1176
1148static struct target_type raid_target = { 1177static struct target_type raid_target = {
1149 .name = "raid", 1178 .name = "raid",
1150 .version = {1, 0, 0}, 1179 .version = {1, 1, 0},
1151 .module = THIS_MODULE, 1180 .module = THIS_MODULE,
1152 .ctr = raid_ctr, 1181 .ctr = raid_ctr,
1153 .dtr = raid_dtr, 1182 .dtr = raid_dtr,