diff options
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r-- | drivers/md/raid0.c | 191 |
1 files changed, 83 insertions, 108 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e86bf3682e1e..0eb08a4df759 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -27,9 +27,9 @@ | |||
27 | 27 | ||
28 | static int raid0_congested(void *data, int bits) | 28 | static int raid0_congested(void *data, int bits) |
29 | { | 29 | { |
30 | mddev_t *mddev = data; | 30 | struct mddev *mddev = data; |
31 | raid0_conf_t *conf = mddev->private; | 31 | struct r0conf *conf = mddev->private; |
32 | mdk_rdev_t **devlist = conf->devlist; | 32 | struct md_rdev **devlist = conf->devlist; |
33 | int raid_disks = conf->strip_zone[0].nb_dev; | 33 | int raid_disks = conf->strip_zone[0].nb_dev; |
34 | int i, ret = 0; | 34 | int i, ret = 0; |
35 | 35 | ||
@@ -47,52 +47,53 @@ static int raid0_congested(void *data, int bits) | |||
47 | /* | 47 | /* |
48 | * inform the user of the raid configuration | 48 | * inform the user of the raid configuration |
49 | */ | 49 | */ |
50 | static void dump_zones(mddev_t *mddev) | 50 | static void dump_zones(struct mddev *mddev) |
51 | { | 51 | { |
52 | int j, k, h; | 52 | int j, k; |
53 | sector_t zone_size = 0; | 53 | sector_t zone_size = 0; |
54 | sector_t zone_start = 0; | 54 | sector_t zone_start = 0; |
55 | char b[BDEVNAME_SIZE]; | 55 | char b[BDEVNAME_SIZE]; |
56 | raid0_conf_t *conf = mddev->private; | 56 | struct r0conf *conf = mddev->private; |
57 | int raid_disks = conf->strip_zone[0].nb_dev; | 57 | int raid_disks = conf->strip_zone[0].nb_dev; |
58 | printk(KERN_INFO "******* %s configuration *********\n", | 58 | printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", |
59 | mdname(mddev)); | 59 | mdname(mddev), |
60 | h = 0; | 60 | conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); |
61 | for (j = 0; j < conf->nr_strip_zones; j++) { | 61 | for (j = 0; j < conf->nr_strip_zones; j++) { |
62 | printk(KERN_INFO "zone%d=[", j); | 62 | printk(KERN_INFO "md: zone%d=[", j); |
63 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | 63 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) |
64 | printk(KERN_CONT "%s/", | 64 | printk(KERN_CONT "%s%s", k?"/":"", |
65 | bdevname(conf->devlist[j*raid_disks | 65 | bdevname(conf->devlist[j*raid_disks |
66 | + k]->bdev, b)); | 66 | + k]->bdev, b)); |
67 | printk(KERN_CONT "]\n"); | 67 | printk(KERN_CONT "]\n"); |
68 | 68 | ||
69 | zone_size = conf->strip_zone[j].zone_end - zone_start; | 69 | zone_size = conf->strip_zone[j].zone_end - zone_start; |
70 | printk(KERN_INFO " zone offset=%llukb " | 70 | printk(KERN_INFO " zone-offset=%10lluKB, " |
71 | "device offset=%llukb size=%llukb\n", | 71 | "device-offset=%10lluKB, size=%10lluKB\n", |
72 | (unsigned long long)zone_start>>1, | 72 | (unsigned long long)zone_start>>1, |
73 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | 73 | (unsigned long long)conf->strip_zone[j].dev_start>>1, |
74 | (unsigned long long)zone_size>>1); | 74 | (unsigned long long)zone_size>>1); |
75 | zone_start = conf->strip_zone[j].zone_end; | 75 | zone_start = conf->strip_zone[j].zone_end; |
76 | } | 76 | } |
77 | printk(KERN_INFO "**********************************\n\n"); | 77 | printk(KERN_INFO "\n"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | 80 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) |
81 | { | 81 | { |
82 | int i, c, err; | 82 | int i, c, err; |
83 | sector_t curr_zone_end, sectors; | 83 | sector_t curr_zone_end, sectors; |
84 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; | 84 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; |
85 | struct strip_zone *zone; | 85 | struct strip_zone *zone; |
86 | int cnt; | 86 | int cnt; |
87 | char b[BDEVNAME_SIZE]; | 87 | char b[BDEVNAME_SIZE]; |
88 | raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 88 | char b2[BDEVNAME_SIZE]; |
89 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | ||
89 | 90 | ||
90 | if (!conf) | 91 | if (!conf) |
91 | return -ENOMEM; | 92 | return -ENOMEM; |
92 | list_for_each_entry(rdev1, &mddev->disks, same_set) { | 93 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
93 | printk(KERN_INFO "md/raid0:%s: looking at %s\n", | 94 | pr_debug("md/raid0:%s: looking at %s\n", |
94 | mdname(mddev), | 95 | mdname(mddev), |
95 | bdevname(rdev1->bdev, b)); | 96 | bdevname(rdev1->bdev, b)); |
96 | c = 0; | 97 | c = 0; |
97 | 98 | ||
98 | /* round size to chunk_size */ | 99 | /* round size to chunk_size */ |
@@ -101,16 +102,16 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
101 | rdev1->sectors = sectors * mddev->chunk_sectors; | 102 | rdev1->sectors = sectors * mddev->chunk_sectors; |
102 | 103 | ||
103 | list_for_each_entry(rdev2, &mddev->disks, same_set) { | 104 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
104 | printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)", | 105 | pr_debug("md/raid0:%s: comparing %s(%llu)" |
105 | mdname(mddev), | 106 | " with %s(%llu)\n", |
106 | bdevname(rdev1->bdev,b), | 107 | mdname(mddev), |
107 | (unsigned long long)rdev1->sectors); | 108 | bdevname(rdev1->bdev,b), |
108 | printk(KERN_CONT " with %s(%llu)\n", | 109 | (unsigned long long)rdev1->sectors, |
109 | bdevname(rdev2->bdev,b), | 110 | bdevname(rdev2->bdev,b2), |
110 | (unsigned long long)rdev2->sectors); | 111 | (unsigned long long)rdev2->sectors); |
111 | if (rdev2 == rdev1) { | 112 | if (rdev2 == rdev1) { |
112 | printk(KERN_INFO "md/raid0:%s: END\n", | 113 | pr_debug("md/raid0:%s: END\n", |
113 | mdname(mddev)); | 114 | mdname(mddev)); |
114 | break; | 115 | break; |
115 | } | 116 | } |
116 | if (rdev2->sectors == rdev1->sectors) { | 117 | if (rdev2->sectors == rdev1->sectors) { |
@@ -118,30 +119,30 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
118 | * Not unique, don't count it as a new | 119 | * Not unique, don't count it as a new |
119 | * group | 120 | * group |
120 | */ | 121 | */ |
121 | printk(KERN_INFO "md/raid0:%s: EQUAL\n", | 122 | pr_debug("md/raid0:%s: EQUAL\n", |
122 | mdname(mddev)); | 123 | mdname(mddev)); |
123 | c = 1; | 124 | c = 1; |
124 | break; | 125 | break; |
125 | } | 126 | } |
126 | printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n", | 127 | pr_debug("md/raid0:%s: NOT EQUAL\n", |
127 | mdname(mddev)); | 128 | mdname(mddev)); |
128 | } | 129 | } |
129 | if (!c) { | 130 | if (!c) { |
130 | printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n", | 131 | pr_debug("md/raid0:%s: ==> UNIQUE\n", |
131 | mdname(mddev)); | 132 | mdname(mddev)); |
132 | conf->nr_strip_zones++; | 133 | conf->nr_strip_zones++; |
133 | printk(KERN_INFO "md/raid0:%s: %d zones\n", | 134 | pr_debug("md/raid0:%s: %d zones\n", |
134 | mdname(mddev), conf->nr_strip_zones); | 135 | mdname(mddev), conf->nr_strip_zones); |
135 | } | 136 | } |
136 | } | 137 | } |
137 | printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n", | 138 | pr_debug("md/raid0:%s: FINAL %d zones\n", |
138 | mdname(mddev), conf->nr_strip_zones); | 139 | mdname(mddev), conf->nr_strip_zones); |
139 | err = -ENOMEM; | 140 | err = -ENOMEM; |
140 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* | 141 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
141 | conf->nr_strip_zones, GFP_KERNEL); | 142 | conf->nr_strip_zones, GFP_KERNEL); |
142 | if (!conf->strip_zone) | 143 | if (!conf->strip_zone) |
143 | goto abort; | 144 | goto abort; |
144 | conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* | 145 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* |
145 | conf->nr_strip_zones*mddev->raid_disks, | 146 | conf->nr_strip_zones*mddev->raid_disks, |
146 | GFP_KERNEL); | 147 | GFP_KERNEL); |
147 | if (!conf->devlist) | 148 | if (!conf->devlist) |
@@ -218,44 +219,45 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
218 | zone = conf->strip_zone + i; | 219 | zone = conf->strip_zone + i; |
219 | dev = conf->devlist + i * mddev->raid_disks; | 220 | dev = conf->devlist + i * mddev->raid_disks; |
220 | 221 | ||
221 | printk(KERN_INFO "md/raid0:%s: zone %d\n", | 222 | pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); |
222 | mdname(mddev), i); | ||
223 | zone->dev_start = smallest->sectors; | 223 | zone->dev_start = smallest->sectors; |
224 | smallest = NULL; | 224 | smallest = NULL; |
225 | c = 0; | 225 | c = 0; |
226 | 226 | ||
227 | for (j=0; j<cnt; j++) { | 227 | for (j=0; j<cnt; j++) { |
228 | rdev = conf->devlist[j]; | 228 | rdev = conf->devlist[j]; |
229 | printk(KERN_INFO "md/raid0:%s: checking %s ...", | ||
230 | mdname(mddev), | ||
231 | bdevname(rdev->bdev, b)); | ||
232 | if (rdev->sectors <= zone->dev_start) { | 229 | if (rdev->sectors <= zone->dev_start) { |
233 | printk(KERN_CONT " nope.\n"); | 230 | pr_debug("md/raid0:%s: checking %s ... nope\n", |
231 | mdname(mddev), | ||
232 | bdevname(rdev->bdev, b)); | ||
234 | continue; | 233 | continue; |
235 | } | 234 | } |
236 | printk(KERN_CONT " contained as device %d\n", c); | 235 | pr_debug("md/raid0:%s: checking %s ..." |
236 | " contained as device %d\n", | ||
237 | mdname(mddev), | ||
238 | bdevname(rdev->bdev, b), c); | ||
237 | dev[c] = rdev; | 239 | dev[c] = rdev; |
238 | c++; | 240 | c++; |
239 | if (!smallest || rdev->sectors < smallest->sectors) { | 241 | if (!smallest || rdev->sectors < smallest->sectors) { |
240 | smallest = rdev; | 242 | smallest = rdev; |
241 | printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n", | 243 | pr_debug("md/raid0:%s: (%llu) is smallest!.\n", |
242 | mdname(mddev), | 244 | mdname(mddev), |
243 | (unsigned long long)rdev->sectors); | 245 | (unsigned long long)rdev->sectors); |
244 | } | 246 | } |
245 | } | 247 | } |
246 | 248 | ||
247 | zone->nb_dev = c; | 249 | zone->nb_dev = c; |
248 | sectors = (smallest->sectors - zone->dev_start) * c; | 250 | sectors = (smallest->sectors - zone->dev_start) * c; |
249 | printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", | 251 | pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", |
250 | mdname(mddev), | 252 | mdname(mddev), |
251 | zone->nb_dev, (unsigned long long)sectors); | 253 | zone->nb_dev, (unsigned long long)sectors); |
252 | 254 | ||
253 | curr_zone_end += sectors; | 255 | curr_zone_end += sectors; |
254 | zone->zone_end = curr_zone_end; | 256 | zone->zone_end = curr_zone_end; |
255 | 257 | ||
256 | printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n", | 258 | pr_debug("md/raid0:%s: current zone start: %llu\n", |
257 | mdname(mddev), | 259 | mdname(mddev), |
258 | (unsigned long long)smallest->sectors); | 260 | (unsigned long long)smallest->sectors); |
259 | } | 261 | } |
260 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; | 262 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
261 | mddev->queue->backing_dev_info.congested_data = mddev; | 263 | mddev->queue->backing_dev_info.congested_data = mddev; |
@@ -275,7 +277,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
275 | blk_queue_io_opt(mddev->queue, | 277 | blk_queue_io_opt(mddev->queue, |
276 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 278 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
277 | 279 | ||
278 | printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev)); | 280 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
279 | *private_conf = conf; | 281 | *private_conf = conf; |
280 | 282 | ||
281 | return 0; | 283 | return 0; |
@@ -299,7 +301,7 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
299 | struct bvec_merge_data *bvm, | 301 | struct bvec_merge_data *bvm, |
300 | struct bio_vec *biovec) | 302 | struct bio_vec *biovec) |
301 | { | 303 | { |
302 | mddev_t *mddev = q->queuedata; | 304 | struct mddev *mddev = q->queuedata; |
303 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 305 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
304 | int max; | 306 | int max; |
305 | unsigned int chunk_sectors = mddev->chunk_sectors; | 307 | unsigned int chunk_sectors = mddev->chunk_sectors; |
@@ -318,10 +320,10 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
318 | return max; | 320 | return max; |
319 | } | 321 | } |
320 | 322 | ||
321 | static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 323 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
322 | { | 324 | { |
323 | sector_t array_sectors = 0; | 325 | sector_t array_sectors = 0; |
324 | mdk_rdev_t *rdev; | 326 | struct md_rdev *rdev; |
325 | 327 | ||
326 | WARN_ONCE(sectors || raid_disks, | 328 | WARN_ONCE(sectors || raid_disks, |
327 | "%s does not support generic reshape\n", __func__); | 329 | "%s does not support generic reshape\n", __func__); |
@@ -332,9 +334,9 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
332 | return array_sectors; | 334 | return array_sectors; |
333 | } | 335 | } |
334 | 336 | ||
335 | static int raid0_run(mddev_t *mddev) | 337 | static int raid0_run(struct mddev *mddev) |
336 | { | 338 | { |
337 | raid0_conf_t *conf; | 339 | struct r0conf *conf; |
338 | int ret; | 340 | int ret; |
339 | 341 | ||
340 | if (mddev->chunk_sectors == 0) { | 342 | if (mddev->chunk_sectors == 0) { |
@@ -382,9 +384,9 @@ static int raid0_run(mddev_t *mddev) | |||
382 | return md_integrity_register(mddev); | 384 | return md_integrity_register(mddev); |
383 | } | 385 | } |
384 | 386 | ||
385 | static int raid0_stop(mddev_t *mddev) | 387 | static int raid0_stop(struct mddev *mddev) |
386 | { | 388 | { |
387 | raid0_conf_t *conf = mddev->private; | 389 | struct r0conf *conf = mddev->private; |
388 | 390 | ||
389 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 391 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
390 | kfree(conf->strip_zone); | 392 | kfree(conf->strip_zone); |
@@ -397,7 +399,7 @@ static int raid0_stop(mddev_t *mddev) | |||
397 | /* Find the zone which holds a particular offset | 399 | /* Find the zone which holds a particular offset |
398 | * Update *sectorp to be an offset in that zone | 400 | * Update *sectorp to be an offset in that zone |
399 | */ | 401 | */ |
400 | static struct strip_zone *find_zone(struct raid0_private_data *conf, | 402 | static struct strip_zone *find_zone(struct r0conf *conf, |
401 | sector_t *sectorp) | 403 | sector_t *sectorp) |
402 | { | 404 | { |
403 | int i; | 405 | int i; |
@@ -417,12 +419,12 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf, | |||
417 | * remaps the bio to the target device. we separate two flows. | 419 | * remaps the bio to the target device. we separate two flows. |
418 | * power 2 flow and a general flow for the sake of perfromance | 420 | * power 2 flow and a general flow for the sake of perfromance |
419 | */ | 421 | */ |
420 | static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | 422 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, |
421 | sector_t sector, sector_t *sector_offset) | 423 | sector_t sector, sector_t *sector_offset) |
422 | { | 424 | { |
423 | unsigned int sect_in_chunk; | 425 | unsigned int sect_in_chunk; |
424 | sector_t chunk; | 426 | sector_t chunk; |
425 | raid0_conf_t *conf = mddev->private; | 427 | struct r0conf *conf = mddev->private; |
426 | int raid_disks = conf->strip_zone[0].nb_dev; | 428 | int raid_disks = conf->strip_zone[0].nb_dev; |
427 | unsigned int chunk_sects = mddev->chunk_sectors; | 429 | unsigned int chunk_sects = mddev->chunk_sectors; |
428 | 430 | ||
@@ -453,7 +455,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | |||
453 | /* | 455 | /* |
454 | * Is io distribute over 1 or more chunks ? | 456 | * Is io distribute over 1 or more chunks ? |
455 | */ | 457 | */ |
456 | static inline int is_io_in_chunk_boundary(mddev_t *mddev, | 458 | static inline int is_io_in_chunk_boundary(struct mddev *mddev, |
457 | unsigned int chunk_sects, struct bio *bio) | 459 | unsigned int chunk_sects, struct bio *bio) |
458 | { | 460 | { |
459 | if (likely(is_power_of_2(chunk_sects))) { | 461 | if (likely(is_power_of_2(chunk_sects))) { |
@@ -466,12 +468,12 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev, | |||
466 | } | 468 | } |
467 | } | 469 | } |
468 | 470 | ||
469 | static int raid0_make_request(mddev_t *mddev, struct bio *bio) | 471 | static int raid0_make_request(struct mddev *mddev, struct bio *bio) |
470 | { | 472 | { |
471 | unsigned int chunk_sects; | 473 | unsigned int chunk_sects; |
472 | sector_t sector_offset; | 474 | sector_t sector_offset; |
473 | struct strip_zone *zone; | 475 | struct strip_zone *zone; |
474 | mdk_rdev_t *tmp_dev; | 476 | struct md_rdev *tmp_dev; |
475 | 477 | ||
476 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
477 | md_flush_request(mddev, bio); | 479 | md_flush_request(mddev, bio); |
@@ -526,43 +528,16 @@ bad_map: | |||
526 | return 0; | 528 | return 0; |
527 | } | 529 | } |
528 | 530 | ||
529 | static void raid0_status(struct seq_file *seq, mddev_t *mddev) | 531 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
530 | { | 532 | { |
531 | #undef MD_DEBUG | ||
532 | #ifdef MD_DEBUG | ||
533 | int j, k, h; | ||
534 | char b[BDEVNAME_SIZE]; | ||
535 | raid0_conf_t *conf = mddev->private; | ||
536 | int raid_disks = conf->strip_zone[0].nb_dev; | ||
537 | |||
538 | sector_t zone_size; | ||
539 | sector_t zone_start = 0; | ||
540 | h = 0; | ||
541 | |||
542 | for (j = 0; j < conf->nr_strip_zones; j++) { | ||
543 | seq_printf(seq, " z%d", j); | ||
544 | seq_printf(seq, "=["); | ||
545 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | ||
546 | seq_printf(seq, "%s/", bdevname( | ||
547 | conf->devlist[j*raid_disks + k] | ||
548 | ->bdev, b)); | ||
549 | |||
550 | zone_size = conf->strip_zone[j].zone_end - zone_start; | ||
551 | seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n", | ||
552 | (unsigned long long)zone_start>>1, | ||
553 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | ||
554 | (unsigned long long)zone_size>>1); | ||
555 | zone_start = conf->strip_zone[j].zone_end; | ||
556 | } | ||
557 | #endif | ||
558 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); | 533 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); |
559 | return; | 534 | return; |
560 | } | 535 | } |
561 | 536 | ||
562 | static void *raid0_takeover_raid45(mddev_t *mddev) | 537 | static void *raid0_takeover_raid45(struct mddev *mddev) |
563 | { | 538 | { |
564 | mdk_rdev_t *rdev; | 539 | struct md_rdev *rdev; |
565 | raid0_conf_t *priv_conf; | 540 | struct r0conf *priv_conf; |
566 | 541 | ||
567 | if (mddev->degraded != 1) { | 542 | if (mddev->degraded != 1) { |
568 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", | 543 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", |
@@ -593,9 +568,9 @@ static void *raid0_takeover_raid45(mddev_t *mddev) | |||
593 | return priv_conf; | 568 | return priv_conf; |
594 | } | 569 | } |
595 | 570 | ||
596 | static void *raid0_takeover_raid10(mddev_t *mddev) | 571 | static void *raid0_takeover_raid10(struct mddev *mddev) |
597 | { | 572 | { |
598 | raid0_conf_t *priv_conf; | 573 | struct r0conf *priv_conf; |
599 | 574 | ||
600 | /* Check layout: | 575 | /* Check layout: |
601 | * - far_copies must be 1 | 576 | * - far_copies must be 1 |
@@ -634,9 +609,9 @@ static void *raid0_takeover_raid10(mddev_t *mddev) | |||
634 | return priv_conf; | 609 | return priv_conf; |
635 | } | 610 | } |
636 | 611 | ||
637 | static void *raid0_takeover_raid1(mddev_t *mddev) | 612 | static void *raid0_takeover_raid1(struct mddev *mddev) |
638 | { | 613 | { |
639 | raid0_conf_t *priv_conf; | 614 | struct r0conf *priv_conf; |
640 | 615 | ||
641 | /* Check layout: | 616 | /* Check layout: |
642 | * - (N - 1) mirror drives must be already faulty | 617 | * - (N - 1) mirror drives must be already faulty |
@@ -660,7 +635,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev) | |||
660 | return priv_conf; | 635 | return priv_conf; |
661 | } | 636 | } |
662 | 637 | ||
663 | static void *raid0_takeover(mddev_t *mddev) | 638 | static void *raid0_takeover(struct mddev *mddev) |
664 | { | 639 | { |
665 | /* raid0 can take over: | 640 | /* raid0 can take over: |
666 | * raid4 - if all data disks are active. | 641 | * raid4 - if all data disks are active. |
@@ -691,11 +666,11 @@ static void *raid0_takeover(mddev_t *mddev) | |||
691 | return ERR_PTR(-EINVAL); | 666 | return ERR_PTR(-EINVAL); |
692 | } | 667 | } |
693 | 668 | ||
694 | static void raid0_quiesce(mddev_t *mddev, int state) | 669 | static void raid0_quiesce(struct mddev *mddev, int state) |
695 | { | 670 | { |
696 | } | 671 | } |
697 | 672 | ||
698 | static struct mdk_personality raid0_personality= | 673 | static struct md_personality raid0_personality= |
699 | { | 674 | { |
700 | .name = "raid0", | 675 | .name = "raid0", |
701 | .level = 0, | 676 | .level = 0, |