diff options
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r-- | drivers/md/dm-raid.c | 54 |
1 files changed, 34 insertions, 20 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 017c34d78d61..f4275a8e860c 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -101,20 +101,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra | |||
101 | { | 101 | { |
102 | unsigned i; | 102 | unsigned i; |
103 | struct raid_set *rs; | 103 | struct raid_set *rs; |
104 | sector_t sectors_per_dev; | ||
105 | 104 | ||
106 | if (raid_devs <= raid_type->parity_devs) { | 105 | if (raid_devs <= raid_type->parity_devs) { |
107 | ti->error = "Insufficient number of devices"; | 106 | ti->error = "Insufficient number of devices"; |
108 | return ERR_PTR(-EINVAL); | 107 | return ERR_PTR(-EINVAL); |
109 | } | 108 | } |
110 | 109 | ||
111 | sectors_per_dev = ti->len; | ||
112 | if ((raid_type->level > 1) && | ||
113 | sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { | ||
114 | ti->error = "Target length not divisible by number of data devices"; | ||
115 | return ERR_PTR(-EINVAL); | ||
116 | } | ||
117 | |||
118 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); | 110 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
119 | if (!rs) { | 111 | if (!rs) { |
120 | ti->error = "Cannot allocate raid context"; | 112 | ti->error = "Cannot allocate raid context"; |
@@ -128,7 +120,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra | |||
128 | rs->md.raid_disks = raid_devs; | 120 | rs->md.raid_disks = raid_devs; |
129 | rs->md.level = raid_type->level; | 121 | rs->md.level = raid_type->level; |
130 | rs->md.new_level = rs->md.level; | 122 | rs->md.new_level = rs->md.level; |
131 | rs->md.dev_sectors = sectors_per_dev; | ||
132 | rs->md.layout = raid_type->algorithm; | 123 | rs->md.layout = raid_type->algorithm; |
133 | rs->md.new_layout = rs->md.layout; | 124 | rs->md.new_layout = rs->md.layout; |
134 | rs->md.delta_disks = 0; | 125 | rs->md.delta_disks = 0; |
@@ -143,6 +134,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra | |||
143 | * rs->md.external | 134 | * rs->md.external |
144 | * rs->md.chunk_sectors | 135 | * rs->md.chunk_sectors |
145 | * rs->md.new_chunk_sectors | 136 | * rs->md.new_chunk_sectors |
137 | * rs->md.dev_sectors | ||
146 | */ | 138 | */ |
147 | 139 | ||
148 | return rs; | 140 | return rs; |
@@ -353,6 +345,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
353 | { | 345 | { |
354 | unsigned i, rebuild_cnt = 0; | 346 | unsigned i, rebuild_cnt = 0; |
355 | unsigned long value, region_size = 0; | 347 | unsigned long value, region_size = 0; |
348 | sector_t sectors_per_dev = rs->ti->len; | ||
349 | sector_t max_io_len; | ||
356 | char *key; | 350 | char *key; |
357 | 351 | ||
358 | /* | 352 | /* |
@@ -429,13 +423,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
429 | 423 | ||
430 | if (!strcasecmp(key, "rebuild")) { | 424 | if (!strcasecmp(key, "rebuild")) { |
431 | rebuild_cnt++; | 425 | rebuild_cnt++; |
432 | if (((rs->raid_type->level != 1) && | 426 | |
433 | (rebuild_cnt > rs->raid_type->parity_devs)) || | 427 | switch (rs->raid_type->level) { |
434 | ((rs->raid_type->level == 1) && | 428 | case 1: |
435 | (rebuild_cnt > (rs->md.raid_disks - 1)))) { | 429 | if (rebuild_cnt >= rs->md.raid_disks) { |
436 | rs->ti->error = "Too many rebuild devices specified for given RAID type"; | 430 | rs->ti->error = "Too many rebuild devices specified"; |
431 | return -EINVAL; | ||
432 | } | ||
433 | break; | ||
434 | case 4: | ||
435 | case 5: | ||
436 | case 6: | ||
437 | if (rebuild_cnt > rs->raid_type->parity_devs) { | ||
438 | rs->ti->error = "Too many rebuild devices specified for given RAID type"; | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | break; | ||
442 | default: | ||
443 | DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); | ||
444 | rs->ti->error = "Rebuild not supported for this RAID type"; | ||
437 | return -EINVAL; | 445 | return -EINVAL; |
438 | } | 446 | } |
447 | |||
439 | if (value > rs->md.raid_disks) { | 448 | if (value > rs->md.raid_disks) { |
440 | rs->ti->error = "Invalid rebuild index given"; | 449 | rs->ti->error = "Invalid rebuild index given"; |
441 | return -EINVAL; | 450 | return -EINVAL; |
@@ -522,14 +531,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
522 | return -EINVAL; | 531 | return -EINVAL; |
523 | 532 | ||
524 | if (rs->md.chunk_sectors) | 533 | if (rs->md.chunk_sectors) |
525 | rs->ti->split_io = rs->md.chunk_sectors; | 534 | max_io_len = rs->md.chunk_sectors; |
526 | else | 535 | else |
527 | rs->ti->split_io = region_size; | 536 | max_io_len = region_size; |
528 | 537 | ||
529 | if (rs->md.chunk_sectors) | 538 | if (dm_set_target_max_io_len(rs->ti, max_io_len)) |
530 | rs->ti->split_io = rs->md.chunk_sectors; | 539 | return -EINVAL; |
531 | else | 540 | |
532 | rs->ti->split_io = region_size; | 541 | if ((rs->raid_type->level > 1) && |
542 | sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { | ||
543 | rs->ti->error = "Target length not divisible by number of data devices"; | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | rs->md.dev_sectors = sectors_per_dev; | ||
533 | 547 | ||
534 | /* Assume there are no metadata devices until the drives are parsed */ | 548 | /* Assume there are no metadata devices until the drives are parsed */ |
535 | rs->md.persistent = 0; | 549 | rs->md.persistent = 0; |