aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c178
1 files changed, 88 insertions, 90 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 8ac6488ad0dc..c605ba805586 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -53,11 +53,10 @@ static int raid0_congested(void *data, int bits)
53static int create_strip_zones (mddev_t *mddev) 53static int create_strip_zones (mddev_t *mddev)
54{ 54{
55 int i, c, j; 55 int i, c, j;
56 sector_t current_offset, curr_zone_offset; 56 sector_t current_start, curr_zone_start;
57 sector_t min_spacing; 57 sector_t min_spacing;
58 raid0_conf_t *conf = mddev_to_conf(mddev); 58 raid0_conf_t *conf = mddev_to_conf(mddev);
59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; 59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
60 struct list_head *tmp1, *tmp2;
61 struct strip_zone *zone; 60 struct strip_zone *zone;
62 int cnt; 61 int cnt;
63 char b[BDEVNAME_SIZE]; 62 char b[BDEVNAME_SIZE];
@@ -67,19 +66,19 @@ static int create_strip_zones (mddev_t *mddev)
67 */ 66 */
68 conf->nr_strip_zones = 0; 67 conf->nr_strip_zones = 0;
69 68
70 rdev_for_each(rdev1, tmp1, mddev) { 69 list_for_each_entry(rdev1, &mddev->disks, same_set) {
71 printk("raid0: looking at %s\n", 70 printk(KERN_INFO "raid0: looking at %s\n",
72 bdevname(rdev1->bdev,b)); 71 bdevname(rdev1->bdev,b));
73 c = 0; 72 c = 0;
74 rdev_for_each(rdev2, tmp2, mddev) { 73 list_for_each_entry(rdev2, &mddev->disks, same_set) {
75 printk("raid0: comparing %s(%llu)", 74 printk(KERN_INFO "raid0: comparing %s(%llu)",
76 bdevname(rdev1->bdev,b), 75 bdevname(rdev1->bdev,b),
77 (unsigned long long)rdev1->size); 76 (unsigned long long)rdev1->size);
78 printk(" with %s(%llu)\n", 77 printk(KERN_INFO " with %s(%llu)\n",
79 bdevname(rdev2->bdev,b), 78 bdevname(rdev2->bdev,b),
80 (unsigned long long)rdev2->size); 79 (unsigned long long)rdev2->size);
81 if (rdev2 == rdev1) { 80 if (rdev2 == rdev1) {
82 printk("raid0: END\n"); 81 printk(KERN_INFO "raid0: END\n");
83 break; 82 break;
84 } 83 }
85 if (rdev2->size == rdev1->size) 84 if (rdev2->size == rdev1->size)
@@ -88,19 +87,20 @@ static int create_strip_zones (mddev_t *mddev)
88 * Not unique, don't count it as a new 87 * Not unique, don't count it as a new
89 * group 88 * group
90 */ 89 */
91 printk("raid0: EQUAL\n"); 90 printk(KERN_INFO "raid0: EQUAL\n");
92 c = 1; 91 c = 1;
93 break; 92 break;
94 } 93 }
95 printk("raid0: NOT EQUAL\n"); 94 printk(KERN_INFO "raid0: NOT EQUAL\n");
96 } 95 }
97 if (!c) { 96 if (!c) {
98 printk("raid0: ==> UNIQUE\n"); 97 printk(KERN_INFO "raid0: ==> UNIQUE\n");
99 conf->nr_strip_zones++; 98 conf->nr_strip_zones++;
100 printk("raid0: %d zones\n", conf->nr_strip_zones); 99 printk(KERN_INFO "raid0: %d zones\n",
100 conf->nr_strip_zones);
101 } 101 }
102 } 102 }
103 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); 103 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
104 104
105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)* 105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
106 conf->nr_strip_zones, GFP_KERNEL); 106 conf->nr_strip_zones, GFP_KERNEL);
@@ -119,16 +119,17 @@ static int create_strip_zones (mddev_t *mddev)
119 cnt = 0; 119 cnt = 0;
120 smallest = NULL; 120 smallest = NULL;
121 zone->dev = conf->devlist; 121 zone->dev = conf->devlist;
122 rdev_for_each(rdev1, tmp1, mddev) { 122 list_for_each_entry(rdev1, &mddev->disks, same_set) {
123 int j = rdev1->raid_disk; 123 int j = rdev1->raid_disk;
124 124
125 if (j < 0 || j >= mddev->raid_disks) { 125 if (j < 0 || j >= mddev->raid_disks) {
126 printk("raid0: bad disk number %d - aborting!\n", j); 126 printk(KERN_ERR "raid0: bad disk number %d - "
127 "aborting!\n", j);
127 goto abort; 128 goto abort;
128 } 129 }
129 if (zone->dev[j]) { 130 if (zone->dev[j]) {
130 printk("raid0: multiple devices for %d - aborting!\n", 131 printk(KERN_ERR "raid0: multiple devices for %d - "
131 j); 132 "aborting!\n", j);
132 goto abort; 133 goto abort;
133 } 134 }
134 zone->dev[j] = rdev1; 135 zone->dev[j] = rdev1;
@@ -149,16 +150,16 @@ static int create_strip_zones (mddev_t *mddev)
149 cnt++; 150 cnt++;
150 } 151 }
151 if (cnt != mddev->raid_disks) { 152 if (cnt != mddev->raid_disks) {
152 printk("raid0: too few disks (%d of %d) - aborting!\n", 153 printk(KERN_ERR "raid0: too few disks (%d of %d) - "
153 cnt, mddev->raid_disks); 154 "aborting!\n", cnt, mddev->raid_disks);
154 goto abort; 155 goto abort;
155 } 156 }
156 zone->nb_dev = cnt; 157 zone->nb_dev = cnt;
157 zone->size = smallest->size * cnt; 158 zone->sectors = smallest->size * cnt * 2;
158 zone->zone_offset = 0; 159 zone->zone_start = 0;
159 160
160 current_offset = smallest->size; 161 current_start = smallest->size * 2;
161 curr_zone_offset = zone->size; 162 curr_zone_start = zone->sectors;
162 163
163 /* now do the other zones */ 164 /* now do the other zones */
164 for (i = 1; i < conf->nr_strip_zones; i++) 165 for (i = 1; i < conf->nr_strip_zones; i++)
@@ -166,40 +167,41 @@ static int create_strip_zones (mddev_t *mddev)
166 zone = conf->strip_zone + i; 167 zone = conf->strip_zone + i;
167 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; 168 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
168 169
169 printk("raid0: zone %d\n", i); 170 printk(KERN_INFO "raid0: zone %d\n", i);
170 zone->dev_offset = current_offset; 171 zone->dev_start = current_start;
171 smallest = NULL; 172 smallest = NULL;
172 c = 0; 173 c = 0;
173 174
174 for (j=0; j<cnt; j++) { 175 for (j=0; j<cnt; j++) {
175 char b[BDEVNAME_SIZE]; 176 char b[BDEVNAME_SIZE];
176 rdev = conf->strip_zone[0].dev[j]; 177 rdev = conf->strip_zone[0].dev[j];
177 printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); 178 printk(KERN_INFO "raid0: checking %s ...",
178 if (rdev->size > current_offset) 179 bdevname(rdev->bdev, b));
179 { 180 if (rdev->size > current_start / 2) {
180 printk(" contained as device %d\n", c); 181 printk(KERN_INFO " contained as device %d\n",
182 c);
181 zone->dev[c] = rdev; 183 zone->dev[c] = rdev;
182 c++; 184 c++;
183 if (!smallest || (rdev->size <smallest->size)) { 185 if (!smallest || (rdev->size <smallest->size)) {
184 smallest = rdev; 186 smallest = rdev;
185 printk(" (%llu) is smallest!.\n", 187 printk(KERN_INFO " (%llu) is smallest!.\n",
186 (unsigned long long)rdev->size); 188 (unsigned long long)rdev->size);
187 } 189 }
188 } else 190 } else
189 printk(" nope.\n"); 191 printk(KERN_INFO " nope.\n");
190 } 192 }
191 193
192 zone->nb_dev = c; 194 zone->nb_dev = c;
193 zone->size = (smallest->size - current_offset) * c; 195 zone->sectors = (smallest->size * 2 - current_start) * c;
194 printk("raid0: zone->nb_dev: %d, size: %llu\n", 196 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
195 zone->nb_dev, (unsigned long long)zone->size); 197 zone->nb_dev, (unsigned long long)zone->sectors);
196 198
197 zone->zone_offset = curr_zone_offset; 199 zone->zone_start = curr_zone_start;
198 curr_zone_offset += zone->size; 200 curr_zone_start += zone->sectors;
199 201
200 current_offset = smallest->size; 202 current_start = smallest->size * 2;
201 printk("raid0: current zone offset: %llu\n", 203 printk(KERN_INFO "raid0: current zone start: %llu\n",
202 (unsigned long long)current_offset); 204 (unsigned long long)current_start);
203 } 205 }
204 206
205 /* Now find appropriate hash spacing. 207 /* Now find appropriate hash spacing.
@@ -210,16 +212,16 @@ static int create_strip_zones (mddev_t *mddev)
210 * strip though as it's size has no bearing on the efficacy of the hash 212 * strip though as it's size has no bearing on the efficacy of the hash
211 * table. 213 * table.
212 */ 214 */
213 conf->hash_spacing = curr_zone_offset; 215 conf->spacing = curr_zone_start;
214 min_spacing = curr_zone_offset; 216 min_spacing = curr_zone_start;
215 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); 217 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
216 for (i=0; i < conf->nr_strip_zones-1; i++) { 218 for (i=0; i < conf->nr_strip_zones-1; i++) {
217 sector_t sz = 0; 219 sector_t s = 0;
218 for (j=i; j<conf->nr_strip_zones-1 && 220 for (j = i; j < conf->nr_strip_zones - 1 &&
219 sz < min_spacing ; j++) 221 s < min_spacing; j++)
220 sz += conf->strip_zone[j].size; 222 s += conf->strip_zone[j].sectors;
221 if (sz >= min_spacing && sz < conf->hash_spacing) 223 if (s >= min_spacing && s < conf->spacing)
222 conf->hash_spacing = sz; 224 conf->spacing = s;
223 } 225 }
224 226
225 mddev->queue->unplug_fn = raid0_unplug; 227 mddev->queue->unplug_fn = raid0_unplug;
@@ -227,7 +229,7 @@ static int create_strip_zones (mddev_t *mddev)
227 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 229 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
228 mddev->queue->backing_dev_info.congested_data = mddev; 230 mddev->queue->backing_dev_info.congested_data = mddev;
229 231
230 printk("raid0: done.\n"); 232 printk(KERN_INFO "raid0: done.\n");
231 return 0; 233 return 0;
232 abort: 234 abort:
233 return 1; 235 return 1;
@@ -262,10 +264,9 @@ static int raid0_mergeable_bvec(struct request_queue *q,
262static int raid0_run (mddev_t *mddev) 264static int raid0_run (mddev_t *mddev)
263{ 265{
264 unsigned cur=0, i=0, nb_zone; 266 unsigned cur=0, i=0, nb_zone;
265 s64 size; 267 s64 sectors;
266 raid0_conf_t *conf; 268 raid0_conf_t *conf;
267 mdk_rdev_t *rdev; 269 mdk_rdev_t *rdev;
268 struct list_head *tmp;
269 270
270 if (mddev->chunk_size == 0) { 271 if (mddev->chunk_size == 0) {
271 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); 272 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
@@ -291,54 +292,54 @@ static int raid0_run (mddev_t *mddev)
291 292
292 /* calculate array device size */ 293 /* calculate array device size */
293 mddev->array_sectors = 0; 294 mddev->array_sectors = 0;
294 rdev_for_each(rdev, tmp, mddev) 295 list_for_each_entry(rdev, &mddev->disks, same_set)
295 mddev->array_sectors += rdev->size * 2; 296 mddev->array_sectors += rdev->size * 2;
296 297
297 printk("raid0 : md_size is %llu blocks.\n", 298 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
298 (unsigned long long)mddev->array_sectors / 2); 299 (unsigned long long)mddev->array_sectors);
299 printk("raid0 : conf->hash_spacing is %llu blocks.\n", 300 printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
300 (unsigned long long)conf->hash_spacing); 301 (unsigned long long)conf->spacing);
301 { 302 {
302 sector_t s = mddev->array_sectors / 2; 303 sector_t s = mddev->array_sectors;
303 sector_t space = conf->hash_spacing; 304 sector_t space = conf->spacing;
304 int round; 305 int round;
305 conf->preshift = 0; 306 conf->sector_shift = 0;
306 if (sizeof(sector_t) > sizeof(u32)) { 307 if (sizeof(sector_t) > sizeof(u32)) {
307 /*shift down space and s so that sector_div will work */ 308 /*shift down space and s so that sector_div will work */
308 while (space > (sector_t) (~(u32)0)) { 309 while (space > (sector_t) (~(u32)0)) {
309 s >>= 1; 310 s >>= 1;
310 space >>= 1; 311 space >>= 1;
311 s += 1; /* force round-up */ 312 s += 1; /* force round-up */
312 conf->preshift++; 313 conf->sector_shift++;
313 } 314 }
314 } 315 }
315 round = sector_div(s, (u32)space) ? 1 : 0; 316 round = sector_div(s, (u32)space) ? 1 : 0;
316 nb_zone = s + round; 317 nb_zone = s + round;
317 } 318 }
318 printk("raid0 : nb_zone is %d.\n", nb_zone); 319 printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
319 320
320 printk("raid0 : Allocating %Zd bytes for hash.\n", 321 printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
321 nb_zone*sizeof(struct strip_zone*)); 322 nb_zone*sizeof(struct strip_zone*));
322 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); 323 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
323 if (!conf->hash_table) 324 if (!conf->hash_table)
324 goto out_free_conf; 325 goto out_free_conf;
325 size = conf->strip_zone[cur].size; 326 sectors = conf->strip_zone[cur].sectors;
326 327
327 conf->hash_table[0] = conf->strip_zone + cur; 328 conf->hash_table[0] = conf->strip_zone + cur;
328 for (i=1; i< nb_zone; i++) { 329 for (i=1; i< nb_zone; i++) {
329 while (size <= conf->hash_spacing) { 330 while (sectors <= conf->spacing) {
330 cur++; 331 cur++;
331 size += conf->strip_zone[cur].size; 332 sectors += conf->strip_zone[cur].sectors;
332 } 333 }
333 size -= conf->hash_spacing; 334 sectors -= conf->spacing;
334 conf->hash_table[i] = conf->strip_zone + cur; 335 conf->hash_table[i] = conf->strip_zone + cur;
335 } 336 }
336 if (conf->preshift) { 337 if (conf->sector_shift) {
337 conf->hash_spacing >>= conf->preshift; 338 conf->spacing >>= conf->sector_shift;
338 /* round hash_spacing up so when we divide by it, we 339 /* round spacing up so when we divide by it, we
339 * err on the side of too-low, which is safest 340 * err on the side of too-low, which is safest
340 */ 341 */
341 conf->hash_spacing++; 342 conf->spacing++;
342 } 343 }
343 344
344 /* calculate the max read-ahead size. 345 /* calculate the max read-ahead size.
@@ -387,12 +388,12 @@ static int raid0_stop (mddev_t *mddev)
387static int raid0_make_request (struct request_queue *q, struct bio *bio) 388static int raid0_make_request (struct request_queue *q, struct bio *bio)
388{ 389{
389 mddev_t *mddev = q->queuedata; 390 mddev_t *mddev = q->queuedata;
390 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 391 unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
391 raid0_conf_t *conf = mddev_to_conf(mddev); 392 raid0_conf_t *conf = mddev_to_conf(mddev);
392 struct strip_zone *zone; 393 struct strip_zone *zone;
393 mdk_rdev_t *tmp_dev; 394 mdk_rdev_t *tmp_dev;
394 sector_t chunk; 395 sector_t chunk;
395 sector_t block, rsect; 396 sector_t sector, rsect;
396 const int rw = bio_data_dir(bio); 397 const int rw = bio_data_dir(bio);
397 int cpu; 398 int cpu;
398 399
@@ -407,11 +408,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
407 bio_sectors(bio)); 408 bio_sectors(bio));
408 part_stat_unlock(); 409 part_stat_unlock();
409 410
410 chunk_size = mddev->chunk_size >> 10;
411 chunk_sects = mddev->chunk_size >> 9; 411 chunk_sects = mddev->chunk_size >> 9;
412 chunksize_bits = ffz(~chunk_size); 412 chunksect_bits = ffz(~chunk_sects);
413 block = bio->bi_sector >> 1; 413 sector = bio->bi_sector;
414
415 414
416 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { 415 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
417 struct bio_pair *bp; 416 struct bio_pair *bp;
@@ -434,28 +433,27 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
434 433
435 434
436 { 435 {
437 sector_t x = block >> conf->preshift; 436 sector_t x = sector >> conf->sector_shift;
438 sector_div(x, (u32)conf->hash_spacing); 437 sector_div(x, (u32)conf->spacing);
439 zone = conf->hash_table[x]; 438 zone = conf->hash_table[x];
440 } 439 }
441 440
442 while (block >= (zone->zone_offset + zone->size)) 441 while (sector >= zone->zone_start + zone->sectors)
443 zone++; 442 zone++;
444 443
445 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); 444 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
446 445
447 446
448 { 447 {
449 sector_t x = (block - zone->zone_offset) >> chunksize_bits; 448 sector_t x = (sector - zone->zone_start) >> chunksect_bits;
450 449
451 sector_div(x, zone->nb_dev); 450 sector_div(x, zone->nb_dev);
452 chunk = x; 451 chunk = x;
453 452
454 x = block >> chunksize_bits; 453 x = sector >> chunksect_bits;
455 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; 454 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
456 } 455 }
457 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) 456 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
458 + sect_in_chunk;
459 457
460 bio->bi_bdev = tmp_dev->bdev; 458 bio->bi_bdev = tmp_dev->bdev;
461 bio->bi_sector = rsect + tmp_dev->data_offset; 459 bio->bi_sector = rsect + tmp_dev->data_offset;
@@ -467,7 +465,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
467 465
468bad_map: 466bad_map:
469 printk("raid0_make_request bug: can't convert block across chunks" 467 printk("raid0_make_request bug: can't convert block across chunks"
470 " or bigger than %dk %llu %d\n", chunk_size, 468 " or bigger than %dk %llu %d\n", chunk_sects / 2,
471 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 469 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
472 470
473 bio_io_error(bio); 471 bio_io_error(bio);
@@ -492,10 +490,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
492 seq_printf(seq, "%s/", bdevname( 490 seq_printf(seq, "%s/", bdevname(
493 conf->strip_zone[j].dev[k]->bdev,b)); 491 conf->strip_zone[j].dev[k]->bdev,b));
494 492
495 seq_printf(seq, "] zo=%d do=%d s=%d\n", 493 seq_printf(seq, "] zs=%d ds=%d s=%d\n",
496 conf->strip_zone[j].zone_offset, 494 conf->strip_zone[j].zone_start,
497 conf->strip_zone[j].dev_offset, 495 conf->strip_zone[j].dev_start,
498 conf->strip_zone[j].size); 496 conf->strip_zone[j].sectors);
499 } 497 }
500#endif 498#endif
501 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); 499 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);