aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan E Brassow <jbrassow@redhat.com>2012-07-27 10:08:04 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-07-27 10:08:04 -0400
commitc039c332f23e794deb6d6f37b9f07ff3b27fb2cf (patch)
tree2b6bce014f9359e6152c957594aa86f9549f7dab
parentf999e8fe70bd0b8faa27ccdac14b5942999c6e78 (diff)
dm raid: move sectors_per_dev calculation
In preparation for RAID10 inclusion in dm-raid, we move the sectors_per_dev calculation later in the device creation process. This is because we won't know up-front how many stripes vs how many mirrors there are which will change the calculation. Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-raid.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1717ed33dd7f..f4275a8e860c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -101,20 +101,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
101{ 101{
102 unsigned i; 102 unsigned i;
103 struct raid_set *rs; 103 struct raid_set *rs;
104 sector_t sectors_per_dev;
105 104
106 if (raid_devs <= raid_type->parity_devs) { 105 if (raid_devs <= raid_type->parity_devs) {
107 ti->error = "Insufficient number of devices"; 106 ti->error = "Insufficient number of devices";
108 return ERR_PTR(-EINVAL); 107 return ERR_PTR(-EINVAL);
109 } 108 }
110 109
111 sectors_per_dev = ti->len;
112 if ((raid_type->level > 1) &&
113 sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
114 ti->error = "Target length not divisible by number of data devices";
115 return ERR_PTR(-EINVAL);
116 }
117
118 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); 110 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
119 if (!rs) { 111 if (!rs) {
120 ti->error = "Cannot allocate raid context"; 112 ti->error = "Cannot allocate raid context";
@@ -128,7 +120,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
128 rs->md.raid_disks = raid_devs; 120 rs->md.raid_disks = raid_devs;
129 rs->md.level = raid_type->level; 121 rs->md.level = raid_type->level;
130 rs->md.new_level = rs->md.level; 122 rs->md.new_level = rs->md.level;
131 rs->md.dev_sectors = sectors_per_dev;
132 rs->md.layout = raid_type->algorithm; 123 rs->md.layout = raid_type->algorithm;
133 rs->md.new_layout = rs->md.layout; 124 rs->md.new_layout = rs->md.layout;
134 rs->md.delta_disks = 0; 125 rs->md.delta_disks = 0;
@@ -143,6 +134,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
143 * rs->md.external 134 * rs->md.external
144 * rs->md.chunk_sectors 135 * rs->md.chunk_sectors
145 * rs->md.new_chunk_sectors 136 * rs->md.new_chunk_sectors
137 * rs->md.dev_sectors
146 */ 138 */
147 139
148 return rs; 140 return rs;
@@ -353,6 +345,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
353{ 345{
354 unsigned i, rebuild_cnt = 0; 346 unsigned i, rebuild_cnt = 0;
355 unsigned long value, region_size = 0; 347 unsigned long value, region_size = 0;
348 sector_t sectors_per_dev = rs->ti->len;
356 sector_t max_io_len; 349 sector_t max_io_len;
357 char *key; 350 char *key;
358 351
@@ -545,6 +538,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
545 if (dm_set_target_max_io_len(rs->ti, max_io_len)) 538 if (dm_set_target_max_io_len(rs->ti, max_io_len))
546 return -EINVAL; 539 return -EINVAL;
547 540
541 if ((rs->raid_type->level > 1) &&
542 sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) {
543 rs->ti->error = "Target length not divisible by number of data devices";
544 return -EINVAL;
545 }
546 rs->md.dev_sectors = sectors_per_dev;
547
548 /* Assume there are no metadata devices until the drives are parsed */ 548 /* Assume there are no metadata devices until the drives are parsed */
549 rs->md.persistent = 0; 549 rs->md.persistent = 0;
550 rs->md.external = 1; 550 rs->md.external = 1;