summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-10-11 01:47:53 -0400
committerNeilBrown <neilb@suse.de>2011-10-11 01:47:53 -0400
commitfd01b88c75a718020ff77e7f560d33835e9b58de (patch)
treec455d5adefd58f3263dcf265bb8ba2024523b106 /drivers/md/raid0.c
parent3cb03002000f133f9f97269edefd73611eafc873 (diff)
md: remove typedefs: mddev_t -> struct mddev
Having mddev_t and 'struct mddev_s' is ugly and not preferred Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 70fc3d949795..db51e6f68191 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -27,7 +27,7 @@
27 27
28static int raid0_congested(void *data, int bits) 28static int raid0_congested(void *data, int bits)
29{ 29{
30 mddev_t *mddev = data; 30 struct mddev *mddev = data;
31 raid0_conf_t *conf = mddev->private; 31 raid0_conf_t *conf = mddev->private;
32 struct md_rdev **devlist = conf->devlist; 32 struct md_rdev **devlist = conf->devlist;
33 int raid_disks = conf->strip_zone[0].nb_dev; 33 int raid_disks = conf->strip_zone[0].nb_dev;
@@ -47,7 +47,7 @@ static int raid0_congested(void *data, int bits)
47/* 47/*
48 * inform the user of the raid configuration 48 * inform the user of the raid configuration
49*/ 49*/
50static void dump_zones(mddev_t *mddev) 50static void dump_zones(struct mddev *mddev)
51{ 51{
52 int j, k; 52 int j, k;
53 sector_t zone_size = 0; 53 sector_t zone_size = 0;
@@ -77,7 +77,7 @@ static void dump_zones(mddev_t *mddev)
77 printk(KERN_INFO "\n"); 77 printk(KERN_INFO "\n");
78} 78}
79 79
80static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) 80static int create_strip_zones(struct mddev *mddev, raid0_conf_t **private_conf)
81{ 81{
82 int i, c, err; 82 int i, c, err;
83 sector_t curr_zone_end, sectors; 83 sector_t curr_zone_end, sectors;
@@ -301,7 +301,7 @@ static int raid0_mergeable_bvec(struct request_queue *q,
301 struct bvec_merge_data *bvm, 301 struct bvec_merge_data *bvm,
302 struct bio_vec *biovec) 302 struct bio_vec *biovec)
303{ 303{
304 mddev_t *mddev = q->queuedata; 304 struct mddev *mddev = q->queuedata;
305 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 305 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
306 int max; 306 int max;
307 unsigned int chunk_sectors = mddev->chunk_sectors; 307 unsigned int chunk_sectors = mddev->chunk_sectors;
@@ -320,7 +320,7 @@ static int raid0_mergeable_bvec(struct request_queue *q,
320 return max; 320 return max;
321} 321}
322 322
323static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) 323static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
324{ 324{
325 sector_t array_sectors = 0; 325 sector_t array_sectors = 0;
326 struct md_rdev *rdev; 326 struct md_rdev *rdev;
@@ -334,7 +334,7 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
334 return array_sectors; 334 return array_sectors;
335} 335}
336 336
337static int raid0_run(mddev_t *mddev) 337static int raid0_run(struct mddev *mddev)
338{ 338{
339 raid0_conf_t *conf; 339 raid0_conf_t *conf;
340 int ret; 340 int ret;
@@ -384,7 +384,7 @@ static int raid0_run(mddev_t *mddev)
384 return md_integrity_register(mddev); 384 return md_integrity_register(mddev);
385} 385}
386 386
387static int raid0_stop(mddev_t *mddev) 387static int raid0_stop(struct mddev *mddev)
388{ 388{
389 raid0_conf_t *conf = mddev->private; 389 raid0_conf_t *conf = mddev->private;
390 390
@@ -419,7 +419,7 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf,
419 * remaps the bio to the target device. we separate two flows. 419 * remaps the bio to the target device. we separate two flows.
420 * power 2 flow and a general flow for the sake of perfromance 420 * power 2 flow and a general flow for the sake of perfromance
421*/ 421*/
422static struct md_rdev *map_sector(mddev_t *mddev, struct strip_zone *zone, 422static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
423 sector_t sector, sector_t *sector_offset) 423 sector_t sector, sector_t *sector_offset)
424{ 424{
425 unsigned int sect_in_chunk; 425 unsigned int sect_in_chunk;
@@ -455,7 +455,7 @@ static struct md_rdev *map_sector(mddev_t *mddev, struct strip_zone *zone,
455/* 455/*
456 * Is io distribute over 1 or more chunks ? 456 * Is io distribute over 1 or more chunks ?
457*/ 457*/
458static inline int is_io_in_chunk_boundary(mddev_t *mddev, 458static inline int is_io_in_chunk_boundary(struct mddev *mddev,
459 unsigned int chunk_sects, struct bio *bio) 459 unsigned int chunk_sects, struct bio *bio)
460{ 460{
461 if (likely(is_power_of_2(chunk_sects))) { 461 if (likely(is_power_of_2(chunk_sects))) {
@@ -468,7 +468,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
468 } 468 }
469} 469}
470 470
471static int raid0_make_request(mddev_t *mddev, struct bio *bio) 471static int raid0_make_request(struct mddev *mddev, struct bio *bio)
472{ 472{
473 unsigned int chunk_sects; 473 unsigned int chunk_sects;
474 sector_t sector_offset; 474 sector_t sector_offset;
@@ -528,13 +528,13 @@ bad_map:
528 return 0; 528 return 0;
529} 529}
530 530
531static void raid0_status(struct seq_file *seq, mddev_t *mddev) 531static void raid0_status(struct seq_file *seq, struct mddev *mddev)
532{ 532{
533 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); 533 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
534 return; 534 return;
535} 535}
536 536
537static void *raid0_takeover_raid45(mddev_t *mddev) 537static void *raid0_takeover_raid45(struct mddev *mddev)
538{ 538{
539 struct md_rdev *rdev; 539 struct md_rdev *rdev;
540 raid0_conf_t *priv_conf; 540 raid0_conf_t *priv_conf;
@@ -568,7 +568,7 @@ static void *raid0_takeover_raid45(mddev_t *mddev)
568 return priv_conf; 568 return priv_conf;
569} 569}
570 570
571static void *raid0_takeover_raid10(mddev_t *mddev) 571static void *raid0_takeover_raid10(struct mddev *mddev)
572{ 572{
573 raid0_conf_t *priv_conf; 573 raid0_conf_t *priv_conf;
574 574
@@ -609,7 +609,7 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
609 return priv_conf; 609 return priv_conf;
610} 610}
611 611
612static void *raid0_takeover_raid1(mddev_t *mddev) 612static void *raid0_takeover_raid1(struct mddev *mddev)
613{ 613{
614 raid0_conf_t *priv_conf; 614 raid0_conf_t *priv_conf;
615 615
@@ -635,7 +635,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
635 return priv_conf; 635 return priv_conf;
636} 636}
637 637
638static void *raid0_takeover(mddev_t *mddev) 638static void *raid0_takeover(struct mddev *mddev)
639{ 639{
640 /* raid0 can take over: 640 /* raid0 can take over:
641 * raid4 - if all data disks are active. 641 * raid4 - if all data disks are active.
@@ -666,7 +666,7 @@ static void *raid0_takeover(mddev_t *mddev)
666 return ERR_PTR(-EINVAL); 666 return ERR_PTR(-EINVAL);
667} 667}
668 668
669static void raid0_quiesce(mddev_t *mddev, int state) 669static void raid0_quiesce(struct mddev *mddev, int state)
670{ 670{
671} 671}
672 672