diff options
author | NeilBrown <neilb@suse.de> | 2011-10-11 01:45:26 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2011-10-11 01:45:26 -0400 |
commit | 3cb03002000f133f9f97269edefd73611eafc873 (patch) | |
tree | 77fdb146666298b33aaeef0c05f082b6f82840fe /drivers | |
parent | 50de8df4abca1b27dbf7b2f81a56451bd8b5a7d8 (diff) |
md: removing typedefs: mdk_rdev_t -> struct md_rdev
The typedefs are just annoying. 'mdk' probably refers to 'md_k.h'
which used to be an include file that defined this thing.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/bitmap.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 18 | ||||
-rw-r--r-- | drivers/md/faulty.c | 4 | ||||
-rw-r--r-- | drivers/md/linear.c | 4 | ||||
-rw-r--r-- | drivers/md/linear.h | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 212 | ||||
-rw-r--r-- | drivers/md/md.h | 34 | ||||
-rw-r--r-- | drivers/md/multipath.c | 14 | ||||
-rw-r--r-- | drivers/md/multipath.h | 2 | ||||
-rw-r--r-- | drivers/md/raid0.c | 14 | ||||
-rw-r--r-- | drivers/md/raid0.h | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 46 | ||||
-rw-r--r-- | drivers/md/raid1.h | 2 | ||||
-rw-r--r-- | drivers/md/raid10.c | 36 | ||||
-rw-r--r-- | drivers/md/raid10.h | 2 | ||||
-rw-r--r-- | drivers/md/raid5.c | 34 | ||||
-rw-r--r-- | drivers/md/raid5.h | 4 |
17 files changed, 219 insertions, 219 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cd722b83a0c9..70e8736e3009 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -185,7 +185,7 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | |||
185 | { | 185 | { |
186 | /* choose a good rdev and read the page from there */ | 186 | /* choose a good rdev and read the page from there */ |
187 | 187 | ||
188 | mdk_rdev_t *rdev; | 188 | struct md_rdev *rdev; |
189 | sector_t target; | 189 | sector_t target; |
190 | int did_alloc = 0; | 190 | int did_alloc = 0; |
191 | 191 | ||
@@ -218,7 +218,7 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, | |||
218 | 218 | ||
219 | } | 219 | } |
220 | 220 | ||
221 | static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 221 | static struct md_rdev *next_active_rdev(struct md_rdev *rdev, mddev_t *mddev) |
222 | { | 222 | { |
223 | /* Iterate the disks of an mddev, using rcu to protect access to the | 223 | /* Iterate the disks of an mddev, using rcu to protect access to the |
224 | * linked list, and raising the refcount of devices we return to ensure | 224 | * linked list, and raising the refcount of devices we return to ensure |
@@ -239,7 +239,7 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
239 | pos = &rdev->same_set; | 239 | pos = &rdev->same_set; |
240 | } | 240 | } |
241 | list_for_each_continue_rcu(pos, &mddev->disks) { | 241 | list_for_each_continue_rcu(pos, &mddev->disks) { |
242 | rdev = list_entry(pos, mdk_rdev_t, same_set); | 242 | rdev = list_entry(pos, struct md_rdev, same_set); |
243 | if (rdev->raid_disk >= 0 && | 243 | if (rdev->raid_disk >= 0 && |
244 | !test_bit(Faulty, &rdev->flags)) { | 244 | !test_bit(Faulty, &rdev->flags)) { |
245 | /* this is a usable devices */ | 245 | /* this is a usable devices */ |
@@ -254,7 +254,7 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
254 | 254 | ||
255 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | 255 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) |
256 | { | 256 | { |
257 | mdk_rdev_t *rdev = NULL; | 257 | struct md_rdev *rdev = NULL; |
258 | struct block_device *bdev; | 258 | struct block_device *bdev; |
259 | mddev_t *mddev = bitmap->mddev; | 259 | mddev_t *mddev = bitmap->mddev; |
260 | 260 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..a2213d5808f4 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -37,7 +37,7 @@ struct raid_dev { | |||
37 | */ | 37 | */ |
38 | struct dm_dev *meta_dev; | 38 | struct dm_dev *meta_dev; |
39 | struct dm_dev *data_dev; | 39 | struct dm_dev *data_dev; |
40 | struct mdk_rdev_s rdev; | 40 | struct md_rdev rdev; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* | 43 | /* |
@@ -594,7 +594,7 @@ struct dm_raid_superblock { | |||
594 | /* Always set to 0 when writing. */ | 594 | /* Always set to 0 when writing. */ |
595 | } __packed; | 595 | } __packed; |
596 | 596 | ||
597 | static int read_disk_sb(mdk_rdev_t *rdev, int size) | 597 | static int read_disk_sb(struct md_rdev *rdev, int size) |
598 | { | 598 | { |
599 | BUG_ON(!rdev->sb_page); | 599 | BUG_ON(!rdev->sb_page); |
600 | 600 | ||
@@ -611,9 +611,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size) | |||
611 | return 0; | 611 | return 0; |
612 | } | 612 | } |
613 | 613 | ||
614 | static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 614 | static void super_sync(mddev_t *mddev, struct md_rdev *rdev) |
615 | { | 615 | { |
616 | mdk_rdev_t *r, *t; | 616 | struct md_rdev *r, *t; |
617 | uint64_t failed_devices; | 617 | uint64_t failed_devices; |
618 | struct dm_raid_superblock *sb; | 618 | struct dm_raid_superblock *sb; |
619 | 619 | ||
@@ -651,7 +651,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
651 | * | 651 | * |
652 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | 652 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise |
653 | */ | 653 | */ |
654 | static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) | 654 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
655 | { | 655 | { |
656 | int ret; | 656 | int ret; |
657 | struct dm_raid_superblock *sb; | 657 | struct dm_raid_superblock *sb; |
@@ -689,7 +689,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) | |||
689 | return (events_sb > events_refsb) ? 1 : 0; | 689 | return (events_sb > events_refsb) ? 1 : 0; |
690 | } | 690 | } |
691 | 691 | ||
692 | static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | 692 | static int super_init_validation(mddev_t *mddev, struct md_rdev *rdev) |
693 | { | 693 | { |
694 | int role; | 694 | int role; |
695 | struct raid_set *rs = container_of(mddev, struct raid_set, md); | 695 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
@@ -698,7 +698,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | |||
698 | struct dm_raid_superblock *sb; | 698 | struct dm_raid_superblock *sb; |
699 | uint32_t new_devs = 0; | 699 | uint32_t new_devs = 0; |
700 | uint32_t rebuilds = 0; | 700 | uint32_t rebuilds = 0; |
701 | mdk_rdev_t *r, *t; | 701 | struct md_rdev *r, *t; |
702 | struct dm_raid_superblock *sb2; | 702 | struct dm_raid_superblock *sb2; |
703 | 703 | ||
704 | sb = page_address(rdev->sb_page); | 704 | sb = page_address(rdev->sb_page); |
@@ -809,7 +809,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | |||
809 | return 0; | 809 | return 0; |
810 | } | 810 | } |
811 | 811 | ||
812 | static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 812 | static int super_validate(mddev_t *mddev, struct md_rdev *rdev) |
813 | { | 813 | { |
814 | struct dm_raid_superblock *sb = page_address(rdev->sb_page); | 814 | struct dm_raid_superblock *sb = page_address(rdev->sb_page); |
815 | 815 | ||
@@ -849,7 +849,7 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
849 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | 849 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) |
850 | { | 850 | { |
851 | int ret; | 851 | int ret; |
852 | mdk_rdev_t *rdev, *freshest, *tmp; | 852 | struct md_rdev *rdev, *freshest, *tmp; |
853 | mddev_t *mddev = &rs->md; | 853 | mddev_t *mddev = &rs->md; |
854 | 854 | ||
855 | freshest = NULL; | 855 | freshest = NULL; |
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 23078dabb6df..a0fa5a01ee2f 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
@@ -87,7 +87,7 @@ typedef struct faulty_conf { | |||
87 | sector_t faults[MaxFault]; | 87 | sector_t faults[MaxFault]; |
88 | int modes[MaxFault]; | 88 | int modes[MaxFault]; |
89 | int nfaults; | 89 | int nfaults; |
90 | mdk_rdev_t *rdev; | 90 | struct md_rdev *rdev; |
91 | } conf_t; | 91 | } conf_t; |
92 | 92 | ||
93 | static int check_mode(conf_t *conf, int mode) | 93 | static int check_mode(conf_t *conf, int mode) |
@@ -297,7 +297,7 @@ static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
297 | 297 | ||
298 | static int run(mddev_t *mddev) | 298 | static int run(mddev_t *mddev) |
299 | { | 299 | { |
300 | mdk_rdev_t *rdev; | 300 | struct md_rdev *rdev; |
301 | int i; | 301 | int i; |
302 | conf_t *conf; | 302 | conf_t *conf; |
303 | 303 | ||
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 6cd2c313e800..0b5cac0fda1a 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -126,7 +126,7 @@ static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
126 | static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | 126 | static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) |
127 | { | 127 | { |
128 | linear_conf_t *conf; | 128 | linear_conf_t *conf; |
129 | mdk_rdev_t *rdev; | 129 | struct md_rdev *rdev; |
130 | int i, cnt; | 130 | int i, cnt; |
131 | 131 | ||
132 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), | 132 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), |
@@ -213,7 +213,7 @@ static int linear_run (mddev_t *mddev) | |||
213 | return md_integrity_register(mddev); | 213 | return md_integrity_register(mddev); |
214 | } | 214 | } |
215 | 215 | ||
216 | static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | 216 | static int linear_add(mddev_t *mddev, struct md_rdev *rdev) |
217 | { | 217 | { |
218 | /* Adding a drive to a linear array allows the array to grow. | 218 | /* Adding a drive to a linear array allows the array to grow. |
219 | * It is permitted if the new drive has a matching superblock | 219 | * It is permitted if the new drive has a matching superblock |
diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 2f2da05b2ce9..367967a53009 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _LINEAR_H | 2 | #define _LINEAR_H |
3 | 3 | ||
4 | struct dev_info { | 4 | struct dev_info { |
5 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
6 | sector_t end_sector; | 6 | sector_t end_sector; |
7 | }; | 7 | }; |
8 | 8 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index ca4c283cf462..e015f403d69e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -420,7 +420,7 @@ EXPORT_SYMBOL(mddev_congested); | |||
420 | 420 | ||
421 | static void md_end_flush(struct bio *bio, int err) | 421 | static void md_end_flush(struct bio *bio, int err) |
422 | { | 422 | { |
423 | mdk_rdev_t *rdev = bio->bi_private; | 423 | struct md_rdev *rdev = bio->bi_private; |
424 | mddev_t *mddev = rdev->mddev; | 424 | mddev_t *mddev = rdev->mddev; |
425 | 425 | ||
426 | rdev_dec_pending(rdev, mddev); | 426 | rdev_dec_pending(rdev, mddev); |
@@ -437,7 +437,7 @@ static void md_submit_flush_data(struct work_struct *ws); | |||
437 | static void submit_flushes(struct work_struct *ws) | 437 | static void submit_flushes(struct work_struct *ws) |
438 | { | 438 | { |
439 | mddev_t *mddev = container_of(ws, mddev_t, flush_work); | 439 | mddev_t *mddev = container_of(ws, mddev_t, flush_work); |
440 | mdk_rdev_t *rdev; | 440 | struct md_rdev *rdev; |
441 | 441 | ||
442 | INIT_WORK(&mddev->flush_work, md_submit_flush_data); | 442 | INIT_WORK(&mddev->flush_work, md_submit_flush_data); |
443 | atomic_set(&mddev->flush_pending, 1); | 443 | atomic_set(&mddev->flush_pending, 1); |
@@ -749,9 +749,9 @@ static void mddev_unlock(mddev_t * mddev) | |||
749 | spin_unlock(&pers_lock); | 749 | spin_unlock(&pers_lock); |
750 | } | 750 | } |
751 | 751 | ||
752 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 752 | static struct md_rdev * find_rdev_nr(mddev_t *mddev, int nr) |
753 | { | 753 | { |
754 | mdk_rdev_t *rdev; | 754 | struct md_rdev *rdev; |
755 | 755 | ||
756 | list_for_each_entry(rdev, &mddev->disks, same_set) | 756 | list_for_each_entry(rdev, &mddev->disks, same_set) |
757 | if (rdev->desc_nr == nr) | 757 | if (rdev->desc_nr == nr) |
@@ -760,9 +760,9 @@ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | |||
760 | return NULL; | 760 | return NULL; |
761 | } | 761 | } |
762 | 762 | ||
763 | static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) | 763 | static struct md_rdev * find_rdev(mddev_t * mddev, dev_t dev) |
764 | { | 764 | { |
765 | mdk_rdev_t *rdev; | 765 | struct md_rdev *rdev; |
766 | 766 | ||
767 | list_for_each_entry(rdev, &mddev->disks, same_set) | 767 | list_for_each_entry(rdev, &mddev->disks, same_set) |
768 | if (rdev->bdev->bd_dev == dev) | 768 | if (rdev->bdev->bd_dev == dev) |
@@ -784,13 +784,13 @@ static struct mdk_personality *find_pers(int level, char *clevel) | |||
784 | } | 784 | } |
785 | 785 | ||
786 | /* return the offset of the super block in 512byte sectors */ | 786 | /* return the offset of the super block in 512byte sectors */ |
787 | static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev) | 787 | static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) |
788 | { | 788 | { |
789 | sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; | 789 | sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; |
790 | return MD_NEW_SIZE_SECTORS(num_sectors); | 790 | return MD_NEW_SIZE_SECTORS(num_sectors); |
791 | } | 791 | } |
792 | 792 | ||
793 | static int alloc_disk_sb(mdk_rdev_t * rdev) | 793 | static int alloc_disk_sb(struct md_rdev * rdev) |
794 | { | 794 | { |
795 | if (rdev->sb_page) | 795 | if (rdev->sb_page) |
796 | MD_BUG(); | 796 | MD_BUG(); |
@@ -804,7 +804,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) | |||
804 | return 0; | 804 | return 0; |
805 | } | 805 | } |
806 | 806 | ||
807 | static void free_disk_sb(mdk_rdev_t * rdev) | 807 | static void free_disk_sb(struct md_rdev * rdev) |
808 | { | 808 | { |
809 | if (rdev->sb_page) { | 809 | if (rdev->sb_page) { |
810 | put_page(rdev->sb_page); | 810 | put_page(rdev->sb_page); |
@@ -822,7 +822,7 @@ static void free_disk_sb(mdk_rdev_t * rdev) | |||
822 | 822 | ||
823 | static void super_written(struct bio *bio, int error) | 823 | static void super_written(struct bio *bio, int error) |
824 | { | 824 | { |
825 | mdk_rdev_t *rdev = bio->bi_private; | 825 | struct md_rdev *rdev = bio->bi_private; |
826 | mddev_t *mddev = rdev->mddev; | 826 | mddev_t *mddev = rdev->mddev; |
827 | 827 | ||
828 | if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { | 828 | if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { |
@@ -837,7 +837,7 @@ static void super_written(struct bio *bio, int error) | |||
837 | bio_put(bio); | 837 | bio_put(bio); |
838 | } | 838 | } |
839 | 839 | ||
840 | void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 840 | void md_super_write(mddev_t *mddev, struct md_rdev *rdev, |
841 | sector_t sector, int size, struct page *page) | 841 | sector_t sector, int size, struct page *page) |
842 | { | 842 | { |
843 | /* write first size bytes of page to sector of rdev | 843 | /* write first size bytes of page to sector of rdev |
@@ -876,7 +876,7 @@ static void bi_complete(struct bio *bio, int error) | |||
876 | complete((struct completion*)bio->bi_private); | 876 | complete((struct completion*)bio->bi_private); |
877 | } | 877 | } |
878 | 878 | ||
879 | int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | 879 | int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
880 | struct page *page, int rw, bool metadata_op) | 880 | struct page *page, int rw, bool metadata_op) |
881 | { | 881 | { |
882 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); | 882 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); |
@@ -904,7 +904,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | |||
904 | } | 904 | } |
905 | EXPORT_SYMBOL_GPL(sync_page_io); | 905 | EXPORT_SYMBOL_GPL(sync_page_io); |
906 | 906 | ||
907 | static int read_disk_sb(mdk_rdev_t * rdev, int size) | 907 | static int read_disk_sb(struct md_rdev * rdev, int size) |
908 | { | 908 | { |
909 | char b[BDEVNAME_SIZE]; | 909 | char b[BDEVNAME_SIZE]; |
910 | if (!rdev->sb_page) { | 910 | if (!rdev->sb_page) { |
@@ -1011,7 +1011,7 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1011 | * We rely on user-space to write the initial superblock, and support | 1011 | * We rely on user-space to write the initial superblock, and support |
1012 | * reading and updating of superblocks. | 1012 | * reading and updating of superblocks. |
1013 | * Interface methods are: | 1013 | * Interface methods are: |
1014 | * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) | 1014 | * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) |
1015 | * loads and validates a superblock on dev. | 1015 | * loads and validates a superblock on dev. |
1016 | * if refdev != NULL, compare superblocks on both devices | 1016 | * if refdev != NULL, compare superblocks on both devices |
1017 | * Return: | 1017 | * Return: |
@@ -1021,13 +1021,13 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1021 | * -EINVAL superblock incompatible or invalid | 1021 | * -EINVAL superblock incompatible or invalid |
1022 | * -othererror e.g. -EIO | 1022 | * -othererror e.g. -EIO |
1023 | * | 1023 | * |
1024 | * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) | 1024 | * int validate_super(mddev_t *mddev, struct md_rdev *dev) |
1025 | * Verify that dev is acceptable into mddev. | 1025 | * Verify that dev is acceptable into mddev. |
1026 | * The first time, mddev->raid_disks will be 0, and data from | 1026 | * The first time, mddev->raid_disks will be 0, and data from |
1027 | * dev should be merged in. Subsequent calls check that dev | 1027 | * dev should be merged in. Subsequent calls check that dev |
1028 | * is new enough. Return 0 or -EINVAL | 1028 | * is new enough. Return 0 or -EINVAL |
1029 | * | 1029 | * |
1030 | * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) | 1030 | * void sync_super(mddev_t *mddev, struct md_rdev *dev) |
1031 | * Update the superblock for rdev with data in mddev | 1031 | * Update the superblock for rdev with data in mddev |
1032 | * This does not write to disc. | 1032 | * This does not write to disc. |
1033 | * | 1033 | * |
@@ -1036,11 +1036,11 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
1036 | struct super_type { | 1036 | struct super_type { |
1037 | char *name; | 1037 | char *name; |
1038 | struct module *owner; | 1038 | struct module *owner; |
1039 | int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, | 1039 | int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, |
1040 | int minor_version); | 1040 | int minor_version); |
1041 | int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 1041 | int (*validate_super)(mddev_t *mddev, struct md_rdev *rdev); |
1042 | void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 1042 | void (*sync_super)(mddev_t *mddev, struct md_rdev *rdev); |
1043 | unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, | 1043 | unsigned long long (*rdev_size_change)(struct md_rdev *rdev, |
1044 | sector_t num_sectors); | 1044 | sector_t num_sectors); |
1045 | }; | 1045 | }; |
1046 | 1046 | ||
@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL(md_check_no_bitmap); | |||
1065 | /* | 1065 | /* |
1066 | * load_super for 0.90.0 | 1066 | * load_super for 0.90.0 |
1067 | */ | 1067 | */ |
1068 | static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | 1068 | static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) |
1069 | { | 1069 | { |
1070 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 1070 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
1071 | mdp_super_t *sb; | 1071 | mdp_super_t *sb; |
@@ -1160,7 +1160,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version | |||
1160 | /* | 1160 | /* |
1161 | * validate_super for 0.90.0 | 1161 | * validate_super for 0.90.0 |
1162 | */ | 1162 | */ |
1163 | static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 1163 | static int super_90_validate(mddev_t *mddev, struct md_rdev *rdev) |
1164 | { | 1164 | { |
1165 | mdp_disk_t *desc; | 1165 | mdp_disk_t *desc; |
1166 | mdp_super_t *sb = page_address(rdev->sb_page); | 1166 | mdp_super_t *sb = page_address(rdev->sb_page); |
@@ -1272,10 +1272,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1272 | /* | 1272 | /* |
1273 | * sync_super for 0.90.0 | 1273 | * sync_super for 0.90.0 |
1274 | */ | 1274 | */ |
1275 | static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 1275 | static void super_90_sync(mddev_t *mddev, struct md_rdev *rdev) |
1276 | { | 1276 | { |
1277 | mdp_super_t *sb; | 1277 | mdp_super_t *sb; |
1278 | mdk_rdev_t *rdev2; | 1278 | struct md_rdev *rdev2; |
1279 | int next_spare = mddev->raid_disks; | 1279 | int next_spare = mddev->raid_disks; |
1280 | 1280 | ||
1281 | 1281 | ||
@@ -1416,7 +1416,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1416 | * rdev_size_change for 0.90.0 | 1416 | * rdev_size_change for 0.90.0 |
1417 | */ | 1417 | */ |
1418 | static unsigned long long | 1418 | static unsigned long long |
1419 | super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | 1419 | super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) |
1420 | { | 1420 | { |
1421 | if (num_sectors && num_sectors < rdev->mddev->dev_sectors) | 1421 | if (num_sectors && num_sectors < rdev->mddev->dev_sectors) |
1422 | return 0; /* component must fit device */ | 1422 | return 0; /* component must fit device */ |
@@ -1466,7 +1466,7 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) | |||
1466 | 1466 | ||
1467 | static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, | 1467 | static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, |
1468 | int acknowledged); | 1468 | int acknowledged); |
1469 | static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | 1469 | static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) |
1470 | { | 1470 | { |
1471 | struct mdp_superblock_1 *sb; | 1471 | struct mdp_superblock_1 *sb; |
1472 | int ret; | 1472 | int ret; |
@@ -1622,7 +1622,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1622 | return ret; | 1622 | return ret; |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 1625 | static int super_1_validate(mddev_t *mddev, struct md_rdev *rdev) |
1626 | { | 1626 | { |
1627 | struct mdp_superblock_1 *sb = page_address(rdev->sb_page); | 1627 | struct mdp_superblock_1 *sb = page_address(rdev->sb_page); |
1628 | __u64 ev1 = le64_to_cpu(sb->events); | 1628 | __u64 ev1 = le64_to_cpu(sb->events); |
@@ -1723,10 +1723,10 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1723 | return 0; | 1723 | return 0; |
1724 | } | 1724 | } |
1725 | 1725 | ||
1726 | static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | 1726 | static void super_1_sync(mddev_t *mddev, struct md_rdev *rdev) |
1727 | { | 1727 | { |
1728 | struct mdp_superblock_1 *sb; | 1728 | struct mdp_superblock_1 *sb; |
1729 | mdk_rdev_t *rdev2; | 1729 | struct md_rdev *rdev2; |
1730 | int max_dev, i; | 1730 | int max_dev, i; |
1731 | /* make rdev->sb match mddev and rdev data. */ | 1731 | /* make rdev->sb match mddev and rdev data. */ |
1732 | 1732 | ||
@@ -1848,7 +1848,7 @@ retry: | |||
1848 | } | 1848 | } |
1849 | 1849 | ||
1850 | static unsigned long long | 1850 | static unsigned long long |
1851 | super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | 1851 | super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) |
1852 | { | 1852 | { |
1853 | struct mdp_superblock_1 *sb; | 1853 | struct mdp_superblock_1 *sb; |
1854 | sector_t max_sectors; | 1854 | sector_t max_sectors; |
@@ -1902,7 +1902,7 @@ static struct super_type super_types[] = { | |||
1902 | }, | 1902 | }, |
1903 | }; | 1903 | }; |
1904 | 1904 | ||
1905 | static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) | 1905 | static void sync_super(mddev_t *mddev, struct md_rdev *rdev) |
1906 | { | 1906 | { |
1907 | if (mddev->sync_super) { | 1907 | if (mddev->sync_super) { |
1908 | mddev->sync_super(mddev, rdev); | 1908 | mddev->sync_super(mddev, rdev); |
@@ -1916,7 +1916,7 @@ static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1916 | 1916 | ||
1917 | static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) | 1917 | static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) |
1918 | { | 1918 | { |
1919 | mdk_rdev_t *rdev, *rdev2; | 1919 | struct md_rdev *rdev, *rdev2; |
1920 | 1920 | ||
1921 | rcu_read_lock(); | 1921 | rcu_read_lock(); |
1922 | rdev_for_each_rcu(rdev, mddev1) | 1922 | rdev_for_each_rcu(rdev, mddev1) |
@@ -1941,7 +1941,7 @@ static LIST_HEAD(pending_raid_disks); | |||
1941 | */ | 1941 | */ |
1942 | int md_integrity_register(mddev_t *mddev) | 1942 | int md_integrity_register(mddev_t *mddev) |
1943 | { | 1943 | { |
1944 | mdk_rdev_t *rdev, *reference = NULL; | 1944 | struct md_rdev *rdev, *reference = NULL; |
1945 | 1945 | ||
1946 | if (list_empty(&mddev->disks)) | 1946 | if (list_empty(&mddev->disks)) |
1947 | return 0; /* nothing to do */ | 1947 | return 0; /* nothing to do */ |
@@ -1986,7 +1986,7 @@ int md_integrity_register(mddev_t *mddev) | |||
1986 | EXPORT_SYMBOL(md_integrity_register); | 1986 | EXPORT_SYMBOL(md_integrity_register); |
1987 | 1987 | ||
1988 | /* Disable data integrity if non-capable/non-matching disk is being added */ | 1988 | /* Disable data integrity if non-capable/non-matching disk is being added */ |
1989 | void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 1989 | void md_integrity_add_rdev(struct md_rdev *rdev, mddev_t *mddev) |
1990 | { | 1990 | { |
1991 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); | 1991 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); |
1992 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); | 1992 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); |
@@ -2003,7 +2003,7 @@ void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | |||
2003 | } | 2003 | } |
2004 | EXPORT_SYMBOL(md_integrity_add_rdev); | 2004 | EXPORT_SYMBOL(md_integrity_add_rdev); |
2005 | 2005 | ||
2006 | static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | 2006 | static int bind_rdev_to_array(struct md_rdev * rdev, mddev_t * mddev) |
2007 | { | 2007 | { |
2008 | char b[BDEVNAME_SIZE]; | 2008 | char b[BDEVNAME_SIZE]; |
2009 | struct kobject *ko; | 2009 | struct kobject *ko; |
@@ -2083,12 +2083,12 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
2083 | 2083 | ||
2084 | static void md_delayed_delete(struct work_struct *ws) | 2084 | static void md_delayed_delete(struct work_struct *ws) |
2085 | { | 2085 | { |
2086 | mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); | 2086 | struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); |
2087 | kobject_del(&rdev->kobj); | 2087 | kobject_del(&rdev->kobj); |
2088 | kobject_put(&rdev->kobj); | 2088 | kobject_put(&rdev->kobj); |
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | static void unbind_rdev_from_array(mdk_rdev_t * rdev) | 2091 | static void unbind_rdev_from_array(struct md_rdev * rdev) |
2092 | { | 2092 | { |
2093 | char b[BDEVNAME_SIZE]; | 2093 | char b[BDEVNAME_SIZE]; |
2094 | if (!rdev->mddev) { | 2094 | if (!rdev->mddev) { |
@@ -2120,14 +2120,14 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) | |||
2120 | * otherwise reused by a RAID array (or any other kernel | 2120 | * otherwise reused by a RAID array (or any other kernel |
2121 | * subsystem), by bd_claiming the device. | 2121 | * subsystem), by bd_claiming the device. |
2122 | */ | 2122 | */ |
2123 | static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) | 2123 | static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) |
2124 | { | 2124 | { |
2125 | int err = 0; | 2125 | int err = 0; |
2126 | struct block_device *bdev; | 2126 | struct block_device *bdev; |
2127 | char b[BDEVNAME_SIZE]; | 2127 | char b[BDEVNAME_SIZE]; |
2128 | 2128 | ||
2129 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, | 2129 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
2130 | shared ? (mdk_rdev_t *)lock_rdev : rdev); | 2130 | shared ? (struct md_rdev *)lock_rdev : rdev); |
2131 | if (IS_ERR(bdev)) { | 2131 | if (IS_ERR(bdev)) { |
2132 | printk(KERN_ERR "md: could not open %s.\n", | 2132 | printk(KERN_ERR "md: could not open %s.\n", |
2133 | __bdevname(dev, b)); | 2133 | __bdevname(dev, b)); |
@@ -2137,7 +2137,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) | |||
2137 | return err; | 2137 | return err; |
2138 | } | 2138 | } |
2139 | 2139 | ||
2140 | static void unlock_rdev(mdk_rdev_t *rdev) | 2140 | static void unlock_rdev(struct md_rdev *rdev) |
2141 | { | 2141 | { |
2142 | struct block_device *bdev = rdev->bdev; | 2142 | struct block_device *bdev = rdev->bdev; |
2143 | rdev->bdev = NULL; | 2143 | rdev->bdev = NULL; |
@@ -2148,7 +2148,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) | |||
2148 | 2148 | ||
2149 | void md_autodetect_dev(dev_t dev); | 2149 | void md_autodetect_dev(dev_t dev); |
2150 | 2150 | ||
2151 | static void export_rdev(mdk_rdev_t * rdev) | 2151 | static void export_rdev(struct md_rdev * rdev) |
2152 | { | 2152 | { |
2153 | char b[BDEVNAME_SIZE]; | 2153 | char b[BDEVNAME_SIZE]; |
2154 | printk(KERN_INFO "md: export_rdev(%s)\n", | 2154 | printk(KERN_INFO "md: export_rdev(%s)\n", |
@@ -2164,7 +2164,7 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
2164 | kobject_put(&rdev->kobj); | 2164 | kobject_put(&rdev->kobj); |
2165 | } | 2165 | } |
2166 | 2166 | ||
2167 | static void kick_rdev_from_array(mdk_rdev_t * rdev) | 2167 | static void kick_rdev_from_array(struct md_rdev * rdev) |
2168 | { | 2168 | { |
2169 | unbind_rdev_from_array(rdev); | 2169 | unbind_rdev_from_array(rdev); |
2170 | export_rdev(rdev); | 2170 | export_rdev(rdev); |
@@ -2172,7 +2172,7 @@ static void kick_rdev_from_array(mdk_rdev_t * rdev) | |||
2172 | 2172 | ||
2173 | static void export_array(mddev_t *mddev) | 2173 | static void export_array(mddev_t *mddev) |
2174 | { | 2174 | { |
2175 | mdk_rdev_t *rdev, *tmp; | 2175 | struct md_rdev *rdev, *tmp; |
2176 | 2176 | ||
2177 | rdev_for_each(rdev, tmp, mddev) { | 2177 | rdev_for_each(rdev, tmp, mddev) { |
2178 | if (!rdev->mddev) { | 2178 | if (!rdev->mddev) { |
@@ -2268,7 +2268,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) | |||
2268 | ); | 2268 | ); |
2269 | } | 2269 | } |
2270 | 2270 | ||
2271 | static void print_rdev(mdk_rdev_t *rdev, int major_version) | 2271 | static void print_rdev(struct md_rdev *rdev, int major_version) |
2272 | { | 2272 | { |
2273 | char b[BDEVNAME_SIZE]; | 2273 | char b[BDEVNAME_SIZE]; |
2274 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", | 2274 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", |
@@ -2292,7 +2292,7 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version) | |||
2292 | static void md_print_devices(void) | 2292 | static void md_print_devices(void) |
2293 | { | 2293 | { |
2294 | struct list_head *tmp; | 2294 | struct list_head *tmp; |
2295 | mdk_rdev_t *rdev; | 2295 | struct md_rdev *rdev; |
2296 | mddev_t *mddev; | 2296 | mddev_t *mddev; |
2297 | char b[BDEVNAME_SIZE]; | 2297 | char b[BDEVNAME_SIZE]; |
2298 | 2298 | ||
@@ -2326,7 +2326,7 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
2326 | * (which would mean they aren't being marked as dirty | 2326 | * (which would mean they aren't being marked as dirty |
2327 | * with the rest of the array) | 2327 | * with the rest of the array) |
2328 | */ | 2328 | */ |
2329 | mdk_rdev_t *rdev; | 2329 | struct md_rdev *rdev; |
2330 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2330 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2331 | if (rdev->sb_events == mddev->events || | 2331 | if (rdev->sb_events == mddev->events || |
2332 | (nospares && | 2332 | (nospares && |
@@ -2343,7 +2343,7 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
2343 | 2343 | ||
2344 | static void md_update_sb(mddev_t * mddev, int force_change) | 2344 | static void md_update_sb(mddev_t * mddev, int force_change) |
2345 | { | 2345 | { |
2346 | mdk_rdev_t *rdev; | 2346 | struct md_rdev *rdev; |
2347 | int sync_req; | 2347 | int sync_req; |
2348 | int nospares = 0; | 2348 | int nospares = 0; |
2349 | int any_badblocks_changed = 0; | 2349 | int any_badblocks_changed = 0; |
@@ -2521,12 +2521,12 @@ static int cmd_match(const char *cmd, const char *str) | |||
2521 | 2521 | ||
2522 | struct rdev_sysfs_entry { | 2522 | struct rdev_sysfs_entry { |
2523 | struct attribute attr; | 2523 | struct attribute attr; |
2524 | ssize_t (*show)(mdk_rdev_t *, char *); | 2524 | ssize_t (*show)(struct md_rdev *, char *); |
2525 | ssize_t (*store)(mdk_rdev_t *, const char *, size_t); | 2525 | ssize_t (*store)(struct md_rdev *, const char *, size_t); |
2526 | }; | 2526 | }; |
2527 | 2527 | ||
2528 | static ssize_t | 2528 | static ssize_t |
2529 | state_show(mdk_rdev_t *rdev, char *page) | 2529 | state_show(struct md_rdev *rdev, char *page) |
2530 | { | 2530 | { |
2531 | char *sep = ""; | 2531 | char *sep = ""; |
2532 | size_t len = 0; | 2532 | size_t len = 0; |
@@ -2562,7 +2562,7 @@ state_show(mdk_rdev_t *rdev, char *page) | |||
2562 | } | 2562 | } |
2563 | 2563 | ||
2564 | static ssize_t | 2564 | static ssize_t |
2565 | state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2565 | state_store(struct md_rdev *rdev, const char *buf, size_t len) |
2566 | { | 2566 | { |
2567 | /* can write | 2567 | /* can write |
2568 | * faulty - simulates an error | 2568 | * faulty - simulates an error |
@@ -2635,13 +2635,13 @@ static struct rdev_sysfs_entry rdev_state = | |||
2635 | __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); | 2635 | __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); |
2636 | 2636 | ||
2637 | static ssize_t | 2637 | static ssize_t |
2638 | errors_show(mdk_rdev_t *rdev, char *page) | 2638 | errors_show(struct md_rdev *rdev, char *page) |
2639 | { | 2639 | { |
2640 | return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); | 2640 | return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); |
2641 | } | 2641 | } |
2642 | 2642 | ||
2643 | static ssize_t | 2643 | static ssize_t |
2644 | errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2644 | errors_store(struct md_rdev *rdev, const char *buf, size_t len) |
2645 | { | 2645 | { |
2646 | char *e; | 2646 | char *e; |
2647 | unsigned long n = simple_strtoul(buf, &e, 10); | 2647 | unsigned long n = simple_strtoul(buf, &e, 10); |
@@ -2655,7 +2655,7 @@ static struct rdev_sysfs_entry rdev_errors = | |||
2655 | __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); | 2655 | __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); |
2656 | 2656 | ||
2657 | static ssize_t | 2657 | static ssize_t |
2658 | slot_show(mdk_rdev_t *rdev, char *page) | 2658 | slot_show(struct md_rdev *rdev, char *page) |
2659 | { | 2659 | { |
2660 | if (rdev->raid_disk < 0) | 2660 | if (rdev->raid_disk < 0) |
2661 | return sprintf(page, "none\n"); | 2661 | return sprintf(page, "none\n"); |
@@ -2664,7 +2664,7 @@ slot_show(mdk_rdev_t *rdev, char *page) | |||
2664 | } | 2664 | } |
2665 | 2665 | ||
2666 | static ssize_t | 2666 | static ssize_t |
2667 | slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2667 | slot_store(struct md_rdev *rdev, const char *buf, size_t len) |
2668 | { | 2668 | { |
2669 | char *e; | 2669 | char *e; |
2670 | int err; | 2670 | int err; |
@@ -2695,7 +2695,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2695 | set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); | 2695 | set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); |
2696 | md_wakeup_thread(rdev->mddev->thread); | 2696 | md_wakeup_thread(rdev->mddev->thread); |
2697 | } else if (rdev->mddev->pers) { | 2697 | } else if (rdev->mddev->pers) { |
2698 | mdk_rdev_t *rdev2; | 2698 | struct md_rdev *rdev2; |
2699 | /* Activating a spare .. or possibly reactivating | 2699 | /* Activating a spare .. or possibly reactivating |
2700 | * if we ever get bitmaps working here. | 2700 | * if we ever get bitmaps working here. |
2701 | */ | 2701 | */ |
@@ -2751,13 +2751,13 @@ static struct rdev_sysfs_entry rdev_slot = | |||
2751 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); | 2751 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); |
2752 | 2752 | ||
2753 | static ssize_t | 2753 | static ssize_t |
2754 | offset_show(mdk_rdev_t *rdev, char *page) | 2754 | offset_show(struct md_rdev *rdev, char *page) |
2755 | { | 2755 | { |
2756 | return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); | 2756 | return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); |
2757 | } | 2757 | } |
2758 | 2758 | ||
2759 | static ssize_t | 2759 | static ssize_t |
2760 | offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2760 | offset_store(struct md_rdev *rdev, const char *buf, size_t len) |
2761 | { | 2761 | { |
2762 | char *e; | 2762 | char *e; |
2763 | unsigned long long offset = simple_strtoull(buf, &e, 10); | 2763 | unsigned long long offset = simple_strtoull(buf, &e, 10); |
@@ -2777,7 +2777,7 @@ static struct rdev_sysfs_entry rdev_offset = | |||
2777 | __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); | 2777 | __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); |
2778 | 2778 | ||
2779 | static ssize_t | 2779 | static ssize_t |
2780 | rdev_size_show(mdk_rdev_t *rdev, char *page) | 2780 | rdev_size_show(struct md_rdev *rdev, char *page) |
2781 | { | 2781 | { |
2782 | return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); | 2782 | return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); |
2783 | } | 2783 | } |
@@ -2812,7 +2812,7 @@ static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) | |||
2812 | } | 2812 | } |
2813 | 2813 | ||
2814 | static ssize_t | 2814 | static ssize_t |
2815 | rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2815 | rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) |
2816 | { | 2816 | { |
2817 | mddev_t *my_mddev = rdev->mddev; | 2817 | mddev_t *my_mddev = rdev->mddev; |
2818 | sector_t oldsectors = rdev->sectors; | 2818 | sector_t oldsectors = rdev->sectors; |
@@ -2846,7 +2846,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2846 | 2846 | ||
2847 | mddev_unlock(my_mddev); | 2847 | mddev_unlock(my_mddev); |
2848 | for_each_mddev(mddev, tmp) { | 2848 | for_each_mddev(mddev, tmp) { |
2849 | mdk_rdev_t *rdev2; | 2849 | struct md_rdev *rdev2; |
2850 | 2850 | ||
2851 | mddev_lock(mddev); | 2851 | mddev_lock(mddev); |
2852 | list_for_each_entry(rdev2, &mddev->disks, same_set) | 2852 | list_for_each_entry(rdev2, &mddev->disks, same_set) |
@@ -2883,7 +2883,7 @@ static struct rdev_sysfs_entry rdev_size = | |||
2883 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); | 2883 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); |
2884 | 2884 | ||
2885 | 2885 | ||
2886 | static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) | 2886 | static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) |
2887 | { | 2887 | { |
2888 | unsigned long long recovery_start = rdev->recovery_offset; | 2888 | unsigned long long recovery_start = rdev->recovery_offset; |
2889 | 2889 | ||
@@ -2894,7 +2894,7 @@ static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) | |||
2894 | return sprintf(page, "%llu\n", recovery_start); | 2894 | return sprintf(page, "%llu\n", recovery_start); |
2895 | } | 2895 | } |
2896 | 2896 | ||
2897 | static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) | 2897 | static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) |
2898 | { | 2898 | { |
2899 | unsigned long long recovery_start; | 2899 | unsigned long long recovery_start; |
2900 | 2900 | ||
@@ -2924,11 +2924,11 @@ badblocks_show(struct badblocks *bb, char *page, int unack); | |||
2924 | static ssize_t | 2924 | static ssize_t |
2925 | badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); | 2925 | badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); |
2926 | 2926 | ||
2927 | static ssize_t bb_show(mdk_rdev_t *rdev, char *page) | 2927 | static ssize_t bb_show(struct md_rdev *rdev, char *page) |
2928 | { | 2928 | { |
2929 | return badblocks_show(&rdev->badblocks, page, 0); | 2929 | return badblocks_show(&rdev->badblocks, page, 0); |
2930 | } | 2930 | } |
2931 | static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len) | 2931 | static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) |
2932 | { | 2932 | { |
2933 | int rv = badblocks_store(&rdev->badblocks, page, len, 0); | 2933 | int rv = badblocks_store(&rdev->badblocks, page, len, 0); |
2934 | /* Maybe that ack was all we needed */ | 2934 | /* Maybe that ack was all we needed */ |
@@ -2940,11 +2940,11 @@ static struct rdev_sysfs_entry rdev_bad_blocks = | |||
2940 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); | 2940 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); |
2941 | 2941 | ||
2942 | 2942 | ||
2943 | static ssize_t ubb_show(mdk_rdev_t *rdev, char *page) | 2943 | static ssize_t ubb_show(struct md_rdev *rdev, char *page) |
2944 | { | 2944 | { |
2945 | return badblocks_show(&rdev->badblocks, page, 1); | 2945 | return badblocks_show(&rdev->badblocks, page, 1); |
2946 | } | 2946 | } |
2947 | static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len) | 2947 | static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) |
2948 | { | 2948 | { |
2949 | return badblocks_store(&rdev->badblocks, page, len, 1); | 2949 | return badblocks_store(&rdev->badblocks, page, len, 1); |
2950 | } | 2950 | } |
@@ -2966,7 +2966,7 @@ static ssize_t | |||
2966 | rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 2966 | rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
2967 | { | 2967 | { |
2968 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2968 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2969 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2969 | struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); |
2970 | mddev_t *mddev = rdev->mddev; | 2970 | mddev_t *mddev = rdev->mddev; |
2971 | ssize_t rv; | 2971 | ssize_t rv; |
2972 | 2972 | ||
@@ -2989,7 +2989,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2989 | const char *page, size_t length) | 2989 | const char *page, size_t length) |
2990 | { | 2990 | { |
2991 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2991 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2992 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2992 | struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); |
2993 | ssize_t rv; | 2993 | ssize_t rv; |
2994 | mddev_t *mddev = rdev->mddev; | 2994 | mddev_t *mddev = rdev->mddev; |
2995 | 2995 | ||
@@ -3010,7 +3010,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3010 | 3010 | ||
3011 | static void rdev_free(struct kobject *ko) | 3011 | static void rdev_free(struct kobject *ko) |
3012 | { | 3012 | { |
3013 | mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); | 3013 | struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); |
3014 | kfree(rdev); | 3014 | kfree(rdev); |
3015 | } | 3015 | } |
3016 | static const struct sysfs_ops rdev_sysfs_ops = { | 3016 | static const struct sysfs_ops rdev_sysfs_ops = { |
@@ -3023,7 +3023,7 @@ static struct kobj_type rdev_ktype = { | |||
3023 | .default_attrs = rdev_default_attrs, | 3023 | .default_attrs = rdev_default_attrs, |
3024 | }; | 3024 | }; |
3025 | 3025 | ||
3026 | int md_rdev_init(mdk_rdev_t *rdev) | 3026 | int md_rdev_init(struct md_rdev *rdev) |
3027 | { | 3027 | { |
3028 | rdev->desc_nr = -1; | 3028 | rdev->desc_nr = -1; |
3029 | rdev->saved_raid_disk = -1; | 3029 | rdev->saved_raid_disk = -1; |
@@ -3066,11 +3066,11 @@ EXPORT_SYMBOL_GPL(md_rdev_init); | |||
3066 | * | 3066 | * |
3067 | * a faulty rdev _never_ has rdev->sb set. | 3067 | * a faulty rdev _never_ has rdev->sb set. |
3068 | */ | 3068 | */ |
3069 | static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) | 3069 | static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) |
3070 | { | 3070 | { |
3071 | char b[BDEVNAME_SIZE]; | 3071 | char b[BDEVNAME_SIZE]; |
3072 | int err; | 3072 | int err; |
3073 | mdk_rdev_t *rdev; | 3073 | struct md_rdev *rdev; |
3074 | sector_t size; | 3074 | sector_t size; |
3075 | 3075 | ||
3076 | rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); | 3076 | rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); |
@@ -3142,7 +3142,7 @@ abort_free: | |||
3142 | static void analyze_sbs(mddev_t * mddev) | 3142 | static void analyze_sbs(mddev_t * mddev) |
3143 | { | 3143 | { |
3144 | int i; | 3144 | int i; |
3145 | mdk_rdev_t *rdev, *freshest, *tmp; | 3145 | struct md_rdev *rdev, *freshest, *tmp; |
3146 | char b[BDEVNAME_SIZE]; | 3146 | char b[BDEVNAME_SIZE]; |
3147 | 3147 | ||
3148 | freshest = NULL; | 3148 | freshest = NULL; |
@@ -3291,7 +3291,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
3291 | struct mdk_personality *pers; | 3291 | struct mdk_personality *pers; |
3292 | long level; | 3292 | long level; |
3293 | void *priv; | 3293 | void *priv; |
3294 | mdk_rdev_t *rdev; | 3294 | struct md_rdev *rdev; |
3295 | 3295 | ||
3296 | if (mddev->pers == NULL) { | 3296 | if (mddev->pers == NULL) { |
3297 | if (len == 0) | 3297 | if (len == 0) |
@@ -3836,7 +3836,7 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len) | |||
3836 | int major = simple_strtoul(buf, &e, 10); | 3836 | int major = simple_strtoul(buf, &e, 10); |
3837 | int minor; | 3837 | int minor; |
3838 | dev_t dev; | 3838 | dev_t dev; |
3839 | mdk_rdev_t *rdev; | 3839 | struct md_rdev *rdev; |
3840 | int err; | 3840 | int err; |
3841 | 3841 | ||
3842 | if (!*buf || *e != ':' || !e[1] || e[1] == '\n') | 3842 | if (!*buf || *e != ':' || !e[1] || e[1] == '\n') |
@@ -3854,8 +3854,9 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len) | |||
3854 | rdev = md_import_device(dev, mddev->major_version, | 3854 | rdev = md_import_device(dev, mddev->major_version, |
3855 | mddev->minor_version); | 3855 | mddev->minor_version); |
3856 | if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { | 3856 | if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { |
3857 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, | 3857 | struct md_rdev *rdev0 |
3858 | mdk_rdev_t, same_set); | 3858 | = list_entry(mddev->disks.next, |
3859 | struct md_rdev, same_set); | ||
3859 | err = super_types[mddev->major_version] | 3860 | err = super_types[mddev->major_version] |
3860 | .load_super(rdev, rdev0, mddev->minor_version); | 3861 | .load_super(rdev, rdev0, mddev->minor_version); |
3861 | if (err < 0) | 3862 | if (err < 0) |
@@ -4698,7 +4699,7 @@ static int start_dirty_degraded; | |||
4698 | int md_run(mddev_t *mddev) | 4699 | int md_run(mddev_t *mddev) |
4699 | { | 4700 | { |
4700 | int err; | 4701 | int err; |
4701 | mdk_rdev_t *rdev; | 4702 | struct md_rdev *rdev; |
4702 | struct mdk_personality *pers; | 4703 | struct mdk_personality *pers; |
4703 | 4704 | ||
4704 | if (list_empty(&mddev->disks)) | 4705 | if (list_empty(&mddev->disks)) |
@@ -4798,7 +4799,7 @@ int md_run(mddev_t *mddev) | |||
4798 | * configuration. | 4799 | * configuration. |
4799 | */ | 4800 | */ |
4800 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 4801 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
4801 | mdk_rdev_t *rdev2; | 4802 | struct md_rdev *rdev2; |
4802 | int warned = 0; | 4803 | int warned = 0; |
4803 | 4804 | ||
4804 | list_for_each_entry(rdev, &mddev->disks, same_set) | 4805 | list_for_each_entry(rdev, &mddev->disks, same_set) |
@@ -5087,7 +5088,7 @@ out: | |||
5087 | static int do_md_stop(mddev_t * mddev, int mode, int is_open) | 5088 | static int do_md_stop(mddev_t * mddev, int mode, int is_open) |
5088 | { | 5089 | { |
5089 | struct gendisk *disk = mddev->gendisk; | 5090 | struct gendisk *disk = mddev->gendisk; |
5090 | mdk_rdev_t *rdev; | 5091 | struct md_rdev *rdev; |
5091 | 5092 | ||
5092 | mutex_lock(&mddev->open_mutex); | 5093 | mutex_lock(&mddev->open_mutex); |
5093 | if (atomic_read(&mddev->openers) > is_open || | 5094 | if (atomic_read(&mddev->openers) > is_open || |
@@ -5152,7 +5153,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
5152 | #ifndef MODULE | 5153 | #ifndef MODULE |
5153 | static void autorun_array(mddev_t *mddev) | 5154 | static void autorun_array(mddev_t *mddev) |
5154 | { | 5155 | { |
5155 | mdk_rdev_t *rdev; | 5156 | struct md_rdev *rdev; |
5156 | int err; | 5157 | int err; |
5157 | 5158 | ||
5158 | if (list_empty(&mddev->disks)) | 5159 | if (list_empty(&mddev->disks)) |
@@ -5187,7 +5188,7 @@ static void autorun_array(mddev_t *mddev) | |||
5187 | */ | 5188 | */ |
5188 | static void autorun_devices(int part) | 5189 | static void autorun_devices(int part) |
5189 | { | 5190 | { |
5190 | mdk_rdev_t *rdev0, *rdev, *tmp; | 5191 | struct md_rdev *rdev0, *rdev, *tmp; |
5191 | mddev_t *mddev; | 5192 | mddev_t *mddev; |
5192 | char b[BDEVNAME_SIZE]; | 5193 | char b[BDEVNAME_SIZE]; |
5193 | 5194 | ||
@@ -5197,7 +5198,7 @@ static void autorun_devices(int part) | |||
5197 | dev_t dev; | 5198 | dev_t dev; |
5198 | LIST_HEAD(candidates); | 5199 | LIST_HEAD(candidates); |
5199 | rdev0 = list_entry(pending_raid_disks.next, | 5200 | rdev0 = list_entry(pending_raid_disks.next, |
5200 | mdk_rdev_t, same_set); | 5201 | struct md_rdev, same_set); |
5201 | 5202 | ||
5202 | printk(KERN_INFO "md: considering %s ...\n", | 5203 | printk(KERN_INFO "md: considering %s ...\n", |
5203 | bdevname(rdev0->bdev,b)); | 5204 | bdevname(rdev0->bdev,b)); |
@@ -5287,7 +5288,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
5287 | { | 5288 | { |
5288 | mdu_array_info_t info; | 5289 | mdu_array_info_t info; |
5289 | int nr,working,insync,failed,spare; | 5290 | int nr,working,insync,failed,spare; |
5290 | mdk_rdev_t *rdev; | 5291 | struct md_rdev *rdev; |
5291 | 5292 | ||
5292 | nr=working=insync=failed=spare=0; | 5293 | nr=working=insync=failed=spare=0; |
5293 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 5294 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
@@ -5379,7 +5380,7 @@ out: | |||
5379 | static int get_disk_info(mddev_t * mddev, void __user * arg) | 5380 | static int get_disk_info(mddev_t * mddev, void __user * arg) |
5380 | { | 5381 | { |
5381 | mdu_disk_info_t info; | 5382 | mdu_disk_info_t info; |
5382 | mdk_rdev_t *rdev; | 5383 | struct md_rdev *rdev; |
5383 | 5384 | ||
5384 | if (copy_from_user(&info, arg, sizeof(info))) | 5385 | if (copy_from_user(&info, arg, sizeof(info))) |
5385 | return -EFAULT; | 5386 | return -EFAULT; |
@@ -5413,7 +5414,7 @@ static int get_disk_info(mddev_t * mddev, void __user * arg) | |||
5413 | static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | 5414 | static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) |
5414 | { | 5415 | { |
5415 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 5416 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
5416 | mdk_rdev_t *rdev; | 5417 | struct md_rdev *rdev; |
5417 | dev_t dev = MKDEV(info->major,info->minor); | 5418 | dev_t dev = MKDEV(info->major,info->minor); |
5418 | 5419 | ||
5419 | if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) | 5420 | if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) |
@@ -5430,8 +5431,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5430 | return PTR_ERR(rdev); | 5431 | return PTR_ERR(rdev); |
5431 | } | 5432 | } |
5432 | if (!list_empty(&mddev->disks)) { | 5433 | if (!list_empty(&mddev->disks)) { |
5433 | mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, | 5434 | struct md_rdev *rdev0 |
5434 | mdk_rdev_t, same_set); | 5435 | = list_entry(mddev->disks.next, |
5436 | struct md_rdev, same_set); | ||
5435 | err = super_types[mddev->major_version] | 5437 | err = super_types[mddev->major_version] |
5436 | .load_super(rdev, rdev0, mddev->minor_version); | 5438 | .load_super(rdev, rdev0, mddev->minor_version); |
5437 | if (err < 0) { | 5439 | if (err < 0) { |
@@ -5584,7 +5586,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5584 | static int hot_remove_disk(mddev_t * mddev, dev_t dev) | 5586 | static int hot_remove_disk(mddev_t * mddev, dev_t dev) |
5585 | { | 5587 | { |
5586 | char b[BDEVNAME_SIZE]; | 5588 | char b[BDEVNAME_SIZE]; |
5587 | mdk_rdev_t *rdev; | 5589 | struct md_rdev *rdev; |
5588 | 5590 | ||
5589 | rdev = find_rdev(mddev, dev); | 5591 | rdev = find_rdev(mddev, dev); |
5590 | if (!rdev) | 5592 | if (!rdev) |
@@ -5608,7 +5610,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) | |||
5608 | { | 5610 | { |
5609 | char b[BDEVNAME_SIZE]; | 5611 | char b[BDEVNAME_SIZE]; |
5610 | int err; | 5612 | int err; |
5611 | mdk_rdev_t *rdev; | 5613 | struct md_rdev *rdev; |
5612 | 5614 | ||
5613 | if (!mddev->pers) | 5615 | if (!mddev->pers) |
5614 | return -ENODEV; | 5616 | return -ENODEV; |
@@ -5834,7 +5836,7 @@ EXPORT_SYMBOL(md_set_array_sectors); | |||
5834 | 5836 | ||
5835 | static int update_size(mddev_t *mddev, sector_t num_sectors) | 5837 | static int update_size(mddev_t *mddev, sector_t num_sectors) |
5836 | { | 5838 | { |
5837 | mdk_rdev_t *rdev; | 5839 | struct md_rdev *rdev; |
5838 | int rv; | 5840 | int rv; |
5839 | int fit = (num_sectors == 0); | 5841 | int fit = (num_sectors == 0); |
5840 | 5842 | ||
@@ -5993,7 +5995,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) | |||
5993 | 5995 | ||
5994 | static int set_disk_faulty(mddev_t *mddev, dev_t dev) | 5996 | static int set_disk_faulty(mddev_t *mddev, dev_t dev) |
5995 | { | 5997 | { |
5996 | mdk_rdev_t *rdev; | 5998 | struct md_rdev *rdev; |
5997 | 5999 | ||
5998 | if (mddev->pers == NULL) | 6000 | if (mddev->pers == NULL) |
5999 | return -ENODEV; | 6001 | return -ENODEV; |
@@ -6450,7 +6452,7 @@ void md_unregister_thread(mdk_thread_t **threadp) | |||
6450 | kfree(thread); | 6452 | kfree(thread); |
6451 | } | 6453 | } |
6452 | 6454 | ||
6453 | void md_error(mddev_t *mddev, mdk_rdev_t *rdev) | 6455 | void md_error(mddev_t *mddev, struct md_rdev *rdev) |
6454 | { | 6456 | { |
6455 | if (!mddev) { | 6457 | if (!mddev) { |
6456 | MD_BUG(); | 6458 | MD_BUG(); |
@@ -6479,7 +6481,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
6479 | static void status_unused(struct seq_file *seq) | 6481 | static void status_unused(struct seq_file *seq) |
6480 | { | 6482 | { |
6481 | int i = 0; | 6483 | int i = 0; |
6482 | mdk_rdev_t *rdev; | 6484 | struct md_rdev *rdev; |
6483 | 6485 | ||
6484 | seq_printf(seq, "unused devices: "); | 6486 | seq_printf(seq, "unused devices: "); |
6485 | 6487 | ||
@@ -6649,7 +6651,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
6649 | { | 6651 | { |
6650 | mddev_t *mddev = v; | 6652 | mddev_t *mddev = v; |
6651 | sector_t sectors; | 6653 | sector_t sectors; |
6652 | mdk_rdev_t *rdev; | 6654 | struct md_rdev *rdev; |
6653 | struct bitmap *bitmap; | 6655 | struct bitmap *bitmap; |
6654 | 6656 | ||
6655 | if (v == (void*)1) { | 6657 | if (v == (void*)1) { |
@@ -6829,7 +6831,7 @@ int unregister_md_personality(struct mdk_personality *p) | |||
6829 | 6831 | ||
6830 | static int is_mddev_idle(mddev_t *mddev, int init) | 6832 | static int is_mddev_idle(mddev_t *mddev, int init) |
6831 | { | 6833 | { |
6832 | mdk_rdev_t * rdev; | 6834 | struct md_rdev * rdev; |
6833 | int idle; | 6835 | int idle; |
6834 | int curr_events; | 6836 | int curr_events; |
6835 | 6837 | ||
@@ -6987,7 +6989,7 @@ void md_do_sync(mddev_t *mddev) | |||
6987 | struct list_head *tmp; | 6989 | struct list_head *tmp; |
6988 | sector_t last_check; | 6990 | sector_t last_check; |
6989 | int skipped = 0; | 6991 | int skipped = 0; |
6990 | mdk_rdev_t *rdev; | 6992 | struct md_rdev *rdev; |
6991 | char *desc; | 6993 | char *desc; |
6992 | 6994 | ||
6993 | /* just incase thread restarts... */ | 6995 | /* just incase thread restarts... */ |
@@ -7304,7 +7306,7 @@ EXPORT_SYMBOL_GPL(md_do_sync); | |||
7304 | 7306 | ||
7305 | static int remove_and_add_spares(mddev_t *mddev) | 7307 | static int remove_and_add_spares(mddev_t *mddev) |
7306 | { | 7308 | { |
7307 | mdk_rdev_t *rdev; | 7309 | struct md_rdev *rdev; |
7308 | int spares = 0; | 7310 | int spares = 0; |
7309 | 7311 | ||
7310 | mddev->curr_resync_completed = 0; | 7312 | mddev->curr_resync_completed = 0; |
@@ -7348,7 +7350,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
7348 | 7350 | ||
7349 | static void reap_sync_thread(mddev_t *mddev) | 7351 | static void reap_sync_thread(mddev_t *mddev) |
7350 | { | 7352 | { |
7351 | mdk_rdev_t *rdev; | 7353 | struct md_rdev *rdev; |
7352 | 7354 | ||
7353 | /* resync has finished, collect result */ | 7355 | /* resync has finished, collect result */ |
7354 | md_unregister_thread(&mddev->sync_thread); | 7356 | md_unregister_thread(&mddev->sync_thread); |
@@ -7443,7 +7445,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7443 | /* Only thing we do on a ro array is remove | 7445 | /* Only thing we do on a ro array is remove |
7444 | * failed devices. | 7446 | * failed devices. |
7445 | */ | 7447 | */ |
7446 | mdk_rdev_t *rdev; | 7448 | struct md_rdev *rdev; |
7447 | list_for_each_entry(rdev, &mddev->disks, same_set) | 7449 | list_for_each_entry(rdev, &mddev->disks, same_set) |
7448 | if (rdev->raid_disk >= 0 && | 7450 | if (rdev->raid_disk >= 0 && |
7449 | !test_bit(Blocked, &rdev->flags) && | 7451 | !test_bit(Blocked, &rdev->flags) && |
@@ -7567,7 +7569,7 @@ void md_check_recovery(mddev_t *mddev) | |||
7567 | } | 7569 | } |
7568 | } | 7570 | } |
7569 | 7571 | ||
7570 | void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | 7572 | void md_wait_for_blocked_rdev(struct md_rdev *rdev, mddev_t *mddev) |
7571 | { | 7573 | { |
7572 | sysfs_notify_dirent_safe(rdev->sysfs_state); | 7574 | sysfs_notify_dirent_safe(rdev->sysfs_state); |
7573 | wait_event_timeout(rdev->blocked_wait, | 7575 | wait_event_timeout(rdev->blocked_wait, |
@@ -7825,7 +7827,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, | |||
7825 | return rv; | 7827 | return rv; |
7826 | } | 7828 | } |
7827 | 7829 | ||
7828 | int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, | 7830 | int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, |
7829 | int acknowledged) | 7831 | int acknowledged) |
7830 | { | 7832 | { |
7831 | int rv = md_set_badblocks(&rdev->badblocks, | 7833 | int rv = md_set_badblocks(&rdev->badblocks, |
@@ -7934,7 +7936,7 @@ out: | |||
7934 | return rv; | 7936 | return rv; |
7935 | } | 7937 | } |
7936 | 7938 | ||
7937 | int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors) | 7939 | int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) |
7938 | { | 7940 | { |
7939 | return md_clear_badblocks(&rdev->badblocks, | 7941 | return md_clear_badblocks(&rdev->badblocks, |
7940 | s + rdev->data_offset, | 7942 | s + rdev->data_offset, |
@@ -8181,7 +8183,7 @@ void md_autodetect_dev(dev_t dev) | |||
8181 | 8183 | ||
8182 | static void autostart_arrays(int part) | 8184 | static void autostart_arrays(int part) |
8183 | { | 8185 | { |
8184 | mdk_rdev_t *rdev; | 8186 | struct md_rdev *rdev; |
8185 | struct detected_devices_node *node_detected_dev; | 8187 | struct detected_devices_node *node_detected_dev; |
8186 | dev_t dev; | 8188 | dev_t dev; |
8187 | int i_scanned, i_passed; | 8189 | int i_scanned, i_passed; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index f412b6e3aa73..b0e98c868c14 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #define MaxSector (~(sector_t)0) | 27 | #define MaxSector (~(sector_t)0) |
28 | 28 | ||
29 | typedef struct mddev_s mddev_t; | 29 | typedef struct mddev_s mddev_t; |
30 | typedef struct mdk_rdev_s mdk_rdev_t; | ||
31 | 30 | ||
32 | /* Bad block numbers are stored sorted in a single page. | 31 | /* Bad block numbers are stored sorted in a single page. |
33 | * 64bits is used for each block or extent. | 32 | * 64bits is used for each block or extent. |
@@ -39,8 +38,7 @@ typedef struct mdk_rdev_s mdk_rdev_t; | |||
39 | /* | 38 | /* |
40 | * MD's 'extended' device | 39 | * MD's 'extended' device |
41 | */ | 40 | */ |
42 | struct mdk_rdev_s | 41 | struct md_rdev { |
43 | { | ||
44 | struct list_head same_set; /* RAID devices within the same set */ | 42 | struct list_head same_set; /* RAID devices within the same set */ |
45 | 43 | ||
46 | sector_t sectors; /* Device size (in 512bytes sectors) */ | 44 | sector_t sectors; /* Device size (in 512bytes sectors) */ |
@@ -168,7 +166,7 @@ struct mdk_rdev_s | |||
168 | 166 | ||
169 | extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, | 167 | extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, |
170 | sector_t *first_bad, int *bad_sectors); | 168 | sector_t *first_bad, int *bad_sectors); |
171 | static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors, | 169 | static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, |
172 | sector_t *first_bad, int *bad_sectors) | 170 | sector_t *first_bad, int *bad_sectors) |
173 | { | 171 | { |
174 | if (unlikely(rdev->badblocks.count)) { | 172 | if (unlikely(rdev->badblocks.count)) { |
@@ -181,9 +179,9 @@ static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors, | |||
181 | } | 179 | } |
182 | return 0; | 180 | return 0; |
183 | } | 181 | } |
184 | extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, | 182 | extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, |
185 | int acknowledged); | 183 | int acknowledged); |
186 | extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors); | 184 | extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors); |
187 | extern void md_ack_all_badblocks(struct badblocks *bb); | 185 | extern void md_ack_all_badblocks(struct badblocks *bb); |
188 | 186 | ||
189 | struct mddev_s | 187 | struct mddev_s |
@@ -402,11 +400,11 @@ struct mddev_s | |||
402 | atomic_t flush_pending; | 400 | atomic_t flush_pending; |
403 | struct work_struct flush_work; | 401 | struct work_struct flush_work; |
404 | struct work_struct event_work; /* used by dm to report failure event */ | 402 | struct work_struct event_work; /* used by dm to report failure event */ |
405 | void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); | 403 | void (*sync_super)(mddev_t *mddev, struct md_rdev *rdev); |
406 | }; | 404 | }; |
407 | 405 | ||
408 | 406 | ||
409 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | 407 | static inline void rdev_dec_pending(struct md_rdev *rdev, mddev_t *mddev) |
410 | { | 408 | { |
411 | int faulty = test_bit(Faulty, &rdev->flags); | 409 | int faulty = test_bit(Faulty, &rdev->flags); |
412 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | 410 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) |
@@ -431,8 +429,8 @@ struct mdk_personality | |||
431 | /* error_handler must set ->faulty and clear ->in_sync | 429 | /* error_handler must set ->faulty and clear ->in_sync |
432 | * if appropriate, and should abort recovery if needed | 430 | * if appropriate, and should abort recovery if needed |
433 | */ | 431 | */ |
434 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | 432 | void (*error_handler)(mddev_t *mddev, struct md_rdev *rdev); |
435 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | 433 | int (*hot_add_disk) (mddev_t *mddev, struct md_rdev *rdev); |
436 | int (*hot_remove_disk) (mddev_t *mddev, int number); | 434 | int (*hot_remove_disk) (mddev_t *mddev, int number); |
437 | int (*spare_active) (mddev_t *mddev); | 435 | int (*spare_active) (mddev_t *mddev); |
438 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | 436 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); |
@@ -484,14 +482,14 @@ static inline char * mdname (mddev_t * mddev) | |||
484 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | 482 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; |
485 | } | 483 | } |
486 | 484 | ||
487 | static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev) | 485 | static inline int sysfs_link_rdev(mddev_t *mddev, struct md_rdev *rdev) |
488 | { | 486 | { |
489 | char nm[20]; | 487 | char nm[20]; |
490 | sprintf(nm, "rd%d", rdev->raid_disk); | 488 | sprintf(nm, "rd%d", rdev->raid_disk); |
491 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); | 489 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); |
492 | } | 490 | } |
493 | 491 | ||
494 | static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev) | 492 | static inline void sysfs_unlink_rdev(mddev_t *mddev, struct md_rdev *rdev) |
495 | { | 493 | { |
496 | char nm[20]; | 494 | char nm[20]; |
497 | sprintf(nm, "rd%d", rdev->raid_disk); | 495 | sprintf(nm, "rd%d", rdev->raid_disk); |
@@ -566,23 +564,23 @@ extern void md_check_recovery(mddev_t *mddev); | |||
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 564 | extern void md_write_start(mddev_t *mddev, struct bio *bi); |
567 | extern void md_write_end(mddev_t *mddev); | 565 | extern void md_write_end(mddev_t *mddev); |
568 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | 566 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); |
569 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | 567 | extern void md_error(mddev_t *mddev, struct md_rdev *rdev); |
570 | 568 | ||
571 | extern int mddev_congested(mddev_t *mddev, int bits); | 569 | extern int mddev_congested(mddev_t *mddev, int bits); |
572 | extern void md_flush_request(mddev_t *mddev, struct bio *bio); | 570 | extern void md_flush_request(mddev_t *mddev, struct bio *bio); |
573 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 571 | extern void md_super_write(mddev_t *mddev, struct md_rdev *rdev, |
574 | sector_t sector, int size, struct page *page); | 572 | sector_t sector, int size, struct page *page); |
575 | extern void md_super_wait(mddev_t *mddev); | 573 | extern void md_super_wait(mddev_t *mddev); |
576 | extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | 574 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
577 | struct page *page, int rw, bool metadata_op); | 575 | struct page *page, int rw, bool metadata_op); |
578 | extern void md_do_sync(mddev_t *mddev); | 576 | extern void md_do_sync(mddev_t *mddev); |
579 | extern void md_new_event(mddev_t *mddev); | 577 | extern void md_new_event(mddev_t *mddev); |
580 | extern int md_allow_write(mddev_t *mddev); | 578 | extern int md_allow_write(mddev_t *mddev); |
581 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 579 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, mddev_t *mddev); |
582 | extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); | 580 | extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); |
583 | extern int md_check_no_bitmap(mddev_t *mddev); | 581 | extern int md_check_no_bitmap(mddev_t *mddev); |
584 | extern int md_integrity_register(mddev_t *mddev); | 582 | extern int md_integrity_register(mddev_t *mddev); |
585 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 583 | extern void md_integrity_add_rdev(struct md_rdev *rdev, mddev_t *mddev); |
586 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); | 584 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); |
587 | extern void restore_bitmap_write_access(struct file *file); | 585 | extern void restore_bitmap_write_access(struct file *file); |
588 | 586 | ||
@@ -590,7 +588,7 @@ extern void mddev_init(mddev_t *mddev); | |||
590 | extern int md_run(mddev_t *mddev); | 588 | extern int md_run(mddev_t *mddev); |
591 | extern void md_stop(mddev_t *mddev); | 589 | extern void md_stop(mddev_t *mddev); |
592 | extern void md_stop_writes(mddev_t *mddev); | 590 | extern void md_stop_writes(mddev_t *mddev); |
593 | extern int md_rdev_init(mdk_rdev_t *rdev); | 591 | extern int md_rdev_init(struct md_rdev *rdev); |
594 | 592 | ||
595 | extern void mddev_suspend(mddev_t *mddev); | 593 | extern void mddev_suspend(mddev_t *mddev); |
596 | extern void mddev_resume(mddev_t *mddev); | 594 | extern void mddev_resume(mddev_t *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d5b5fb300171..09e8aa3d59f9 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -42,7 +42,7 @@ static int multipath_map (multipath_conf_t *conf) | |||
42 | 42 | ||
43 | rcu_read_lock(); | 43 | rcu_read_lock(); |
44 | for (i = 0; i < disks; i++) { | 44 | for (i = 0; i < disks; i++) { |
45 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | 45 | struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); |
46 | if (rdev && test_bit(In_sync, &rdev->flags)) { | 46 | if (rdev && test_bit(In_sync, &rdev->flags)) { |
47 | atomic_inc(&rdev->nr_pending); | 47 | atomic_inc(&rdev->nr_pending); |
48 | rcu_read_unlock(); | 48 | rcu_read_unlock(); |
@@ -87,7 +87,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
87 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 87 | int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
88 | struct multipath_bh *mp_bh = bio->bi_private; | 88 | struct multipath_bh *mp_bh = bio->bi_private; |
89 | multipath_conf_t *conf = mp_bh->mddev->private; | 89 | multipath_conf_t *conf = mp_bh->mddev->private; |
90 | mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; | 90 | struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; |
91 | 91 | ||
92 | if (uptodate) | 92 | if (uptodate) |
93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
@@ -165,7 +165,7 @@ static int multipath_congested(void *data, int bits) | |||
165 | 165 | ||
166 | rcu_read_lock(); | 166 | rcu_read_lock(); |
167 | for (i = 0; i < mddev->raid_disks ; i++) { | 167 | for (i = 0; i < mddev->raid_disks ; i++) { |
168 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | 168 | struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); |
169 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 169 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
170 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 170 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
171 | 171 | ||
@@ -183,7 +183,7 @@ static int multipath_congested(void *data, int bits) | |||
183 | /* | 183 | /* |
184 | * Careful, this can execute in IRQ contexts as well! | 184 | * Careful, this can execute in IRQ contexts as well! |
185 | */ | 185 | */ |
186 | static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) | 186 | static void multipath_error (mddev_t *mddev, struct md_rdev *rdev) |
187 | { | 187 | { |
188 | multipath_conf_t *conf = mddev->private; | 188 | multipath_conf_t *conf = mddev->private; |
189 | char b[BDEVNAME_SIZE]; | 189 | char b[BDEVNAME_SIZE]; |
@@ -242,7 +242,7 @@ static void print_multipath_conf (multipath_conf_t *conf) | |||
242 | } | 242 | } |
243 | 243 | ||
244 | 244 | ||
245 | static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 245 | static int multipath_add_disk(mddev_t *mddev, struct md_rdev *rdev) |
246 | { | 246 | { |
247 | multipath_conf_t *conf = mddev->private; | 247 | multipath_conf_t *conf = mddev->private; |
248 | struct request_queue *q; | 248 | struct request_queue *q; |
@@ -295,7 +295,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) | |||
295 | { | 295 | { |
296 | multipath_conf_t *conf = mddev->private; | 296 | multipath_conf_t *conf = mddev->private; |
297 | int err = 0; | 297 | int err = 0; |
298 | mdk_rdev_t *rdev; | 298 | struct md_rdev *rdev; |
299 | struct multipath_info *p = conf->multipaths + number; | 299 | struct multipath_info *p = conf->multipaths + number; |
300 | 300 | ||
301 | print_multipath_conf(conf); | 301 | print_multipath_conf(conf); |
@@ -392,7 +392,7 @@ static int multipath_run (mddev_t *mddev) | |||
392 | multipath_conf_t *conf; | 392 | multipath_conf_t *conf; |
393 | int disk_idx; | 393 | int disk_idx; |
394 | struct multipath_info *disk; | 394 | struct multipath_info *disk; |
395 | mdk_rdev_t *rdev; | 395 | struct md_rdev *rdev; |
396 | int working_disks; | 396 | int working_disks; |
397 | 397 | ||
398 | if (md_check_no_bitmap(mddev)) | 398 | if (md_check_no_bitmap(mddev)) |
diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h index 3c5a45eb5f8a..034f8381e046 100644 --- a/drivers/md/multipath.h +++ b/drivers/md/multipath.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _MULTIPATH_H | 2 | #define _MULTIPATH_H |
3 | 3 | ||
4 | struct multipath_info { | 4 | struct multipath_info { |
5 | mdk_rdev_t *rdev; | 5 | struct md_rdev *rdev; |
6 | }; | 6 | }; |
7 | 7 | ||
8 | struct multipath_private_data { | 8 | struct multipath_private_data { |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 138ecd7eef51..70fc3d949795 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -29,7 +29,7 @@ static int raid0_congested(void *data, int bits) | |||
29 | { | 29 | { |
30 | mddev_t *mddev = data; | 30 | mddev_t *mddev = data; |
31 | raid0_conf_t *conf = mddev->private; | 31 | raid0_conf_t *conf = mddev->private; |
32 | mdk_rdev_t **devlist = conf->devlist; | 32 | struct md_rdev **devlist = conf->devlist; |
33 | int raid_disks = conf->strip_zone[0].nb_dev; | 33 | int raid_disks = conf->strip_zone[0].nb_dev; |
34 | int i, ret = 0; | 34 | int i, ret = 0; |
35 | 35 | ||
@@ -81,7 +81,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
81 | { | 81 | { |
82 | int i, c, err; | 82 | int i, c, err; |
83 | sector_t curr_zone_end, sectors; | 83 | sector_t curr_zone_end, sectors; |
84 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; | 84 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; |
85 | struct strip_zone *zone; | 85 | struct strip_zone *zone; |
86 | int cnt; | 86 | int cnt; |
87 | char b[BDEVNAME_SIZE]; | 87 | char b[BDEVNAME_SIZE]; |
@@ -142,7 +142,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
142 | conf->nr_strip_zones, GFP_KERNEL); | 142 | conf->nr_strip_zones, GFP_KERNEL); |
143 | if (!conf->strip_zone) | 143 | if (!conf->strip_zone) |
144 | goto abort; | 144 | goto abort; |
145 | conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* | 145 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* |
146 | conf->nr_strip_zones*mddev->raid_disks, | 146 | conf->nr_strip_zones*mddev->raid_disks, |
147 | GFP_KERNEL); | 147 | GFP_KERNEL); |
148 | if (!conf->devlist) | 148 | if (!conf->devlist) |
@@ -323,7 +323,7 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
323 | static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) | 323 | static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) |
324 | { | 324 | { |
325 | sector_t array_sectors = 0; | 325 | sector_t array_sectors = 0; |
326 | mdk_rdev_t *rdev; | 326 | struct md_rdev *rdev; |
327 | 327 | ||
328 | WARN_ONCE(sectors || raid_disks, | 328 | WARN_ONCE(sectors || raid_disks, |
329 | "%s does not support generic reshape\n", __func__); | 329 | "%s does not support generic reshape\n", __func__); |
@@ -419,7 +419,7 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf, | |||
419 | * remaps the bio to the target device. we separate two flows. | 419 | * remaps the bio to the target device. we separate two flows. |
420 | * power 2 flow and a general flow for the sake of perfromance | 420 | * power 2 flow and a general flow for the sake of perfromance |
421 | */ | 421 | */ |
422 | static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | 422 | static struct md_rdev *map_sector(mddev_t *mddev, struct strip_zone *zone, |
423 | sector_t sector, sector_t *sector_offset) | 423 | sector_t sector, sector_t *sector_offset) |
424 | { | 424 | { |
425 | unsigned int sect_in_chunk; | 425 | unsigned int sect_in_chunk; |
@@ -473,7 +473,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) | |||
473 | unsigned int chunk_sects; | 473 | unsigned int chunk_sects; |
474 | sector_t sector_offset; | 474 | sector_t sector_offset; |
475 | struct strip_zone *zone; | 475 | struct strip_zone *zone; |
476 | mdk_rdev_t *tmp_dev; | 476 | struct md_rdev *tmp_dev; |
477 | 477 | ||
478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
479 | md_flush_request(mddev, bio); | 479 | md_flush_request(mddev, bio); |
@@ -536,7 +536,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev) | |||
536 | 536 | ||
537 | static void *raid0_takeover_raid45(mddev_t *mddev) | 537 | static void *raid0_takeover_raid45(mddev_t *mddev) |
538 | { | 538 | { |
539 | mdk_rdev_t *rdev; | 539 | struct md_rdev *rdev; |
540 | raid0_conf_t *priv_conf; | 540 | raid0_conf_t *priv_conf; |
541 | 541 | ||
542 | if (mddev->degraded != 1) { | 542 | if (mddev->degraded != 1) { |
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 91f8e876ee64..9d877b8d382c 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h | |||
@@ -11,7 +11,7 @@ struct strip_zone | |||
11 | struct raid0_private_data | 11 | struct raid0_private_data |
12 | { | 12 | { |
13 | struct strip_zone *strip_zone; | 13 | struct strip_zone *strip_zone; |
14 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ | 14 | struct md_rdev **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ |
15 | int nr_strip_zones; | 15 | int nr_strip_zones; |
16 | }; | 16 | }; |
17 | 17 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c8993fb8286c..de7d2b677066 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -475,7 +475,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) | |||
475 | int best_disk; | 475 | int best_disk; |
476 | int i; | 476 | int i; |
477 | sector_t best_dist; | 477 | sector_t best_dist; |
478 | mdk_rdev_t *rdev; | 478 | struct md_rdev *rdev; |
479 | int choose_first; | 479 | int choose_first; |
480 | 480 | ||
481 | rcu_read_lock(); | 481 | rcu_read_lock(); |
@@ -600,7 +600,7 @@ int md_raid1_congested(mddev_t *mddev, int bits) | |||
600 | 600 | ||
601 | rcu_read_lock(); | 601 | rcu_read_lock(); |
602 | for (i = 0; i < mddev->raid_disks; i++) { | 602 | for (i = 0; i < mddev->raid_disks; i++) { |
603 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 603 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
604 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 604 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
605 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 605 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
606 | 606 | ||
@@ -808,7 +808,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
808 | const int rw = bio_data_dir(bio); | 808 | const int rw = bio_data_dir(bio); |
809 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 809 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
810 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 810 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
811 | mdk_rdev_t *blocked_rdev; | 811 | struct md_rdev *blocked_rdev; |
812 | int plugged; | 812 | int plugged; |
813 | int first_clone; | 813 | int first_clone; |
814 | int sectors_handled; | 814 | int sectors_handled; |
@@ -963,7 +963,7 @@ read_again: | |||
963 | rcu_read_lock(); | 963 | rcu_read_lock(); |
964 | max_sectors = r1_bio->sectors; | 964 | max_sectors = r1_bio->sectors; |
965 | for (i = 0; i < disks; i++) { | 965 | for (i = 0; i < disks; i++) { |
966 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 966 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
967 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 967 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
968 | atomic_inc(&rdev->nr_pending); | 968 | atomic_inc(&rdev->nr_pending); |
969 | blocked_rdev = rdev; | 969 | blocked_rdev = rdev; |
@@ -1147,7 +1147,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1147 | conf->raid_disks - mddev->degraded); | 1147 | conf->raid_disks - mddev->degraded); |
1148 | rcu_read_lock(); | 1148 | rcu_read_lock(); |
1149 | for (i = 0; i < conf->raid_disks; i++) { | 1149 | for (i = 0; i < conf->raid_disks; i++) { |
1150 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 1150 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
1151 | seq_printf(seq, "%s", | 1151 | seq_printf(seq, "%s", |
1152 | rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); | 1152 | rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
1153 | } | 1153 | } |
@@ -1156,7 +1156,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) | |||
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | 1158 | ||
1159 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1159 | static void error(mddev_t *mddev, struct md_rdev *rdev) |
1160 | { | 1160 | { |
1161 | char b[BDEVNAME_SIZE]; | 1161 | char b[BDEVNAME_SIZE]; |
1162 | conf_t *conf = mddev->private; | 1162 | conf_t *conf = mddev->private; |
@@ -1214,7 +1214,7 @@ static void print_conf(conf_t *conf) | |||
1214 | rcu_read_lock(); | 1214 | rcu_read_lock(); |
1215 | for (i = 0; i < conf->raid_disks; i++) { | 1215 | for (i = 0; i < conf->raid_disks; i++) { |
1216 | char b[BDEVNAME_SIZE]; | 1216 | char b[BDEVNAME_SIZE]; |
1217 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 1217 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
1218 | if (rdev) | 1218 | if (rdev) |
1219 | printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", | 1219 | printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", |
1220 | i, !test_bit(In_sync, &rdev->flags), | 1220 | i, !test_bit(In_sync, &rdev->flags), |
@@ -1246,7 +1246,7 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1246 | * Called under mddev lock, so rcu protection not needed. | 1246 | * Called under mddev lock, so rcu protection not needed. |
1247 | */ | 1247 | */ |
1248 | for (i = 0; i < conf->raid_disks; i++) { | 1248 | for (i = 0; i < conf->raid_disks; i++) { |
1249 | mdk_rdev_t *rdev = conf->mirrors[i].rdev; | 1249 | struct md_rdev *rdev = conf->mirrors[i].rdev; |
1250 | if (rdev | 1250 | if (rdev |
1251 | && !test_bit(Faulty, &rdev->flags) | 1251 | && !test_bit(Faulty, &rdev->flags) |
1252 | && !test_and_set_bit(In_sync, &rdev->flags)) { | 1252 | && !test_and_set_bit(In_sync, &rdev->flags)) { |
@@ -1263,7 +1263,7 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | 1265 | ||
1266 | static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 1266 | static int raid1_add_disk(mddev_t *mddev, struct md_rdev *rdev) |
1267 | { | 1267 | { |
1268 | conf_t *conf = mddev->private; | 1268 | conf_t *conf = mddev->private; |
1269 | int err = -EEXIST; | 1269 | int err = -EEXIST; |
@@ -1315,7 +1315,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1315 | { | 1315 | { |
1316 | conf_t *conf = mddev->private; | 1316 | conf_t *conf = mddev->private; |
1317 | int err = 0; | 1317 | int err = 0; |
1318 | mdk_rdev_t *rdev; | 1318 | struct md_rdev *rdev; |
1319 | mirror_info_t *p = conf->mirrors+ number; | 1319 | mirror_info_t *p = conf->mirrors+ number; |
1320 | 1320 | ||
1321 | print_conf(conf); | 1321 | print_conf(conf); |
@@ -1419,7 +1419,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1419 | } | 1419 | } |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | 1422 | static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, |
1423 | int sectors, struct page *page, int rw) | 1423 | int sectors, struct page *page, int rw) |
1424 | { | 1424 | { |
1425 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) | 1425 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) |
@@ -1457,7 +1457,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio) | |||
1457 | int s = sectors; | 1457 | int s = sectors; |
1458 | int d = r1_bio->read_disk; | 1458 | int d = r1_bio->read_disk; |
1459 | int success = 0; | 1459 | int success = 0; |
1460 | mdk_rdev_t *rdev; | 1460 | struct md_rdev *rdev; |
1461 | int start; | 1461 | int start; |
1462 | 1462 | ||
1463 | if (s > (PAGE_SIZE>>9)) | 1463 | if (s > (PAGE_SIZE>>9)) |
@@ -1696,7 +1696,7 @@ static void fix_read_error(conf_t *conf, int read_disk, | |||
1696 | int d = read_disk; | 1696 | int d = read_disk; |
1697 | int success = 0; | 1697 | int success = 0; |
1698 | int start; | 1698 | int start; |
1699 | mdk_rdev_t *rdev; | 1699 | struct md_rdev *rdev; |
1700 | 1700 | ||
1701 | if (s > (PAGE_SIZE>>9)) | 1701 | if (s > (PAGE_SIZE>>9)) |
1702 | s = PAGE_SIZE >> 9; | 1702 | s = PAGE_SIZE >> 9; |
@@ -1727,7 +1727,7 @@ static void fix_read_error(conf_t *conf, int read_disk, | |||
1727 | 1727 | ||
1728 | if (!success) { | 1728 | if (!success) { |
1729 | /* Cannot read from anywhere - mark it bad */ | 1729 | /* Cannot read from anywhere - mark it bad */ |
1730 | mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev; | 1730 | struct md_rdev *rdev = conf->mirrors[read_disk].rdev; |
1731 | if (!rdev_set_badblocks(rdev, sect, s, 0)) | 1731 | if (!rdev_set_badblocks(rdev, sect, s, 0)) |
1732 | md_error(mddev, rdev); | 1732 | md_error(mddev, rdev); |
1733 | break; | 1733 | break; |
@@ -1794,7 +1794,7 @@ static int narrow_write_error(r1bio_t *r1_bio, int i) | |||
1794 | { | 1794 | { |
1795 | mddev_t *mddev = r1_bio->mddev; | 1795 | mddev_t *mddev = r1_bio->mddev; |
1796 | conf_t *conf = mddev->private; | 1796 | conf_t *conf = mddev->private; |
1797 | mdk_rdev_t *rdev = conf->mirrors[i].rdev; | 1797 | struct md_rdev *rdev = conf->mirrors[i].rdev; |
1798 | int vcnt, idx; | 1798 | int vcnt, idx; |
1799 | struct bio_vec *vec; | 1799 | struct bio_vec *vec; |
1800 | 1800 | ||
@@ -1871,7 +1871,7 @@ static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) | |||
1871 | int m; | 1871 | int m; |
1872 | int s = r1_bio->sectors; | 1872 | int s = r1_bio->sectors; |
1873 | for (m = 0; m < conf->raid_disks ; m++) { | 1873 | for (m = 0; m < conf->raid_disks ; m++) { |
1874 | mdk_rdev_t *rdev = conf->mirrors[m].rdev; | 1874 | struct md_rdev *rdev = conf->mirrors[m].rdev; |
1875 | struct bio *bio = r1_bio->bios[m]; | 1875 | struct bio *bio = r1_bio->bios[m]; |
1876 | if (bio->bi_end_io == NULL) | 1876 | if (bio->bi_end_io == NULL) |
1877 | continue; | 1877 | continue; |
@@ -1894,7 +1894,7 @@ static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) | |||
1894 | int m; | 1894 | int m; |
1895 | for (m = 0; m < conf->raid_disks ; m++) | 1895 | for (m = 0; m < conf->raid_disks ; m++) |
1896 | if (r1_bio->bios[m] == IO_MADE_GOOD) { | 1896 | if (r1_bio->bios[m] == IO_MADE_GOOD) { |
1897 | mdk_rdev_t *rdev = conf->mirrors[m].rdev; | 1897 | struct md_rdev *rdev = conf->mirrors[m].rdev; |
1898 | rdev_clear_badblocks(rdev, | 1898 | rdev_clear_badblocks(rdev, |
1899 | r1_bio->sector, | 1899 | r1_bio->sector, |
1900 | r1_bio->sectors); | 1900 | r1_bio->sectors); |
@@ -1925,7 +1925,7 @@ static void handle_read_error(conf_t *conf, r1bio_t *r1_bio) | |||
1925 | mddev_t *mddev = conf->mddev; | 1925 | mddev_t *mddev = conf->mddev; |
1926 | struct bio *bio; | 1926 | struct bio *bio; |
1927 | char b[BDEVNAME_SIZE]; | 1927 | char b[BDEVNAME_SIZE]; |
1928 | mdk_rdev_t *rdev; | 1928 | struct md_rdev *rdev; |
1929 | 1929 | ||
1930 | clear_bit(R1BIO_ReadError, &r1_bio->state); | 1930 | clear_bit(R1BIO_ReadError, &r1_bio->state); |
1931 | /* we got a read error. Maybe the drive is bad. Maybe just | 1931 | /* we got a read error. Maybe the drive is bad. Maybe just |
@@ -2168,7 +2168,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2168 | set_bit(R1BIO_IsSync, &r1_bio->state); | 2168 | set_bit(R1BIO_IsSync, &r1_bio->state); |
2169 | 2169 | ||
2170 | for (i=0; i < conf->raid_disks; i++) { | 2170 | for (i=0; i < conf->raid_disks; i++) { |
2171 | mdk_rdev_t *rdev; | 2171 | struct md_rdev *rdev; |
2172 | bio = r1_bio->bios[i]; | 2172 | bio = r1_bio->bios[i]; |
2173 | 2173 | ||
2174 | /* take from bio_init */ | 2174 | /* take from bio_init */ |
@@ -2240,7 +2240,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2240 | int ok = 1; | 2240 | int ok = 1; |
2241 | for (i = 0 ; i < conf->raid_disks ; i++) | 2241 | for (i = 0 ; i < conf->raid_disks ; i++) |
2242 | if (r1_bio->bios[i]->bi_end_io == end_sync_write) { | 2242 | if (r1_bio->bios[i]->bi_end_io == end_sync_write) { |
2243 | mdk_rdev_t *rdev = | 2243 | struct md_rdev *rdev = |
2244 | rcu_dereference(conf->mirrors[i].rdev); | 2244 | rcu_dereference(conf->mirrors[i].rdev); |
2245 | ok = rdev_set_badblocks(rdev, sector_nr, | 2245 | ok = rdev_set_badblocks(rdev, sector_nr, |
2246 | min_bad, 0 | 2246 | min_bad, 0 |
@@ -2370,7 +2370,7 @@ static conf_t *setup_conf(mddev_t *mddev) | |||
2370 | conf_t *conf; | 2370 | conf_t *conf; |
2371 | int i; | 2371 | int i; |
2372 | mirror_info_t *disk; | 2372 | mirror_info_t *disk; |
2373 | mdk_rdev_t *rdev; | 2373 | struct md_rdev *rdev; |
2374 | int err = -ENOMEM; | 2374 | int err = -ENOMEM; |
2375 | 2375 | ||
2376 | conf = kzalloc(sizeof(conf_t), GFP_KERNEL); | 2376 | conf = kzalloc(sizeof(conf_t), GFP_KERNEL); |
@@ -2470,7 +2470,7 @@ static int run(mddev_t *mddev) | |||
2470 | { | 2470 | { |
2471 | conf_t *conf; | 2471 | conf_t *conf; |
2472 | int i; | 2472 | int i; |
2473 | mdk_rdev_t *rdev; | 2473 | struct md_rdev *rdev; |
2474 | 2474 | ||
2475 | if (mddev->level != 1) { | 2475 | if (mddev->level != 1) { |
2476 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", | 2476 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", |
@@ -2669,7 +2669,7 @@ static int raid1_reshape(mddev_t *mddev) | |||
2669 | conf->r1bio_pool = newpool; | 2669 | conf->r1bio_pool = newpool; |
2670 | 2670 | ||
2671 | for (d = d2 = 0; d < conf->raid_disks; d++) { | 2671 | for (d = d2 = 0; d < conf->raid_disks; d++) { |
2672 | mdk_rdev_t *rdev = conf->mirrors[d].rdev; | 2672 | struct md_rdev *rdev = conf->mirrors[d].rdev; |
2673 | if (rdev && rdev->raid_disk != d2) { | 2673 | if (rdev && rdev->raid_disk != d2) { |
2674 | sysfs_unlink_rdev(mddev, rdev); | 2674 | sysfs_unlink_rdev(mddev, rdev); |
2675 | rdev->raid_disk = d2; | 2675 | rdev->raid_disk = d2; |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index a3d5483873a2..2377a461f794 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -4,7 +4,7 @@ | |||
4 | typedef struct mirror_info mirror_info_t; | 4 | typedef struct mirror_info mirror_info_t; |
5 | 5 | ||
6 | struct mirror_info { | 6 | struct mirror_info { |
7 | mdk_rdev_t *rdev; | 7 | struct md_rdev *rdev; |
8 | sector_t head_position; | 8 | sector_t head_position; |
9 | }; | 9 | }; |
10 | 10 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0cd9672cf9cb..b3b7238c5416 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -562,7 +562,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors) | |||
562 | int sectors = r10_bio->sectors; | 562 | int sectors = r10_bio->sectors; |
563 | int best_good_sectors; | 563 | int best_good_sectors; |
564 | sector_t new_distance, best_dist; | 564 | sector_t new_distance, best_dist; |
565 | mdk_rdev_t *rdev; | 565 | struct md_rdev *rdev; |
566 | int do_balance; | 566 | int do_balance; |
567 | int best_slot; | 567 | int best_slot; |
568 | 568 | ||
@@ -685,7 +685,7 @@ static int raid10_congested(void *data, int bits) | |||
685 | return 1; | 685 | return 1; |
686 | rcu_read_lock(); | 686 | rcu_read_lock(); |
687 | for (i = 0; i < conf->raid_disks && ret == 0; i++) { | 687 | for (i = 0; i < conf->raid_disks && ret == 0; i++) { |
688 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | 688 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); |
689 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | 689 | if (rdev && !test_bit(Faulty, &rdev->flags)) { |
690 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 690 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
691 | 691 | ||
@@ -842,7 +842,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
842 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 842 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
843 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 843 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
844 | unsigned long flags; | 844 | unsigned long flags; |
845 | mdk_rdev_t *blocked_rdev; | 845 | struct md_rdev *blocked_rdev; |
846 | int plugged; | 846 | int plugged; |
847 | int sectors_handled; | 847 | int sectors_handled; |
848 | int max_sectors; | 848 | int max_sectors; |
@@ -1017,7 +1017,7 @@ retry_write: | |||
1017 | 1017 | ||
1018 | for (i = 0; i < conf->copies; i++) { | 1018 | for (i = 0; i < conf->copies; i++) { |
1019 | int d = r10_bio->devs[i].devnum; | 1019 | int d = r10_bio->devs[i].devnum; |
1020 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); | 1020 | struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); |
1021 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | 1021 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
1022 | atomic_inc(&rdev->nr_pending); | 1022 | atomic_inc(&rdev->nr_pending); |
1023 | blocked_rdev = rdev; | 1023 | blocked_rdev = rdev; |
@@ -1209,7 +1209,7 @@ static int enough(conf_t *conf, int ignore) | |||
1209 | return 1; | 1209 | return 1; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1212 | static void error(mddev_t *mddev, struct md_rdev *rdev) |
1213 | { | 1213 | { |
1214 | char b[BDEVNAME_SIZE]; | 1214 | char b[BDEVNAME_SIZE]; |
1215 | conf_t *conf = mddev->private; | 1215 | conf_t *conf = mddev->private; |
@@ -1309,7 +1309,7 @@ static int raid10_spare_active(mddev_t *mddev) | |||
1309 | } | 1309 | } |
1310 | 1310 | ||
1311 | 1311 | ||
1312 | static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 1312 | static int raid10_add_disk(mddev_t *mddev, struct md_rdev *rdev) |
1313 | { | 1313 | { |
1314 | conf_t *conf = mddev->private; | 1314 | conf_t *conf = mddev->private; |
1315 | int err = -EEXIST; | 1315 | int err = -EEXIST; |
@@ -1372,7 +1372,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) | |||
1372 | { | 1372 | { |
1373 | conf_t *conf = mddev->private; | 1373 | conf_t *conf = mddev->private; |
1374 | int err = 0; | 1374 | int err = 0; |
1375 | mdk_rdev_t *rdev; | 1375 | struct md_rdev *rdev; |
1376 | mirror_info_t *p = conf->mirrors+ number; | 1376 | mirror_info_t *p = conf->mirrors+ number; |
1377 | 1377 | ||
1378 | print_conf(conf); | 1378 | print_conf(conf); |
@@ -1629,7 +1629,7 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1629 | 1629 | ||
1630 | while (sectors) { | 1630 | while (sectors) { |
1631 | int s = sectors; | 1631 | int s = sectors; |
1632 | mdk_rdev_t *rdev; | 1632 | struct md_rdev *rdev; |
1633 | sector_t addr; | 1633 | sector_t addr; |
1634 | int ok; | 1634 | int ok; |
1635 | 1635 | ||
@@ -1663,7 +1663,7 @@ static void fix_recovery_read_error(r10bio_t *r10_bio) | |||
1663 | 1663 | ||
1664 | if (rdev != conf->mirrors[dw].rdev) { | 1664 | if (rdev != conf->mirrors[dw].rdev) { |
1665 | /* need bad block on destination too */ | 1665 | /* need bad block on destination too */ |
1666 | mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev; | 1666 | struct md_rdev *rdev2 = conf->mirrors[dw].rdev; |
1667 | addr = r10_bio->devs[1].addr + sect; | 1667 | addr = r10_bio->devs[1].addr + sect; |
1668 | ok = rdev_set_badblocks(rdev2, addr, s, 0); | 1668 | ok = rdev_set_badblocks(rdev2, addr, s, 0); |
1669 | if (!ok) { | 1669 | if (!ok) { |
@@ -1719,7 +1719,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) | |||
1719 | * since the last recorded read error. | 1719 | * since the last recorded read error. |
1720 | * | 1720 | * |
1721 | */ | 1721 | */ |
1722 | static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) | 1722 | static void check_decay_read_errors(mddev_t *mddev, struct md_rdev *rdev) |
1723 | { | 1723 | { |
1724 | struct timespec cur_time_mon; | 1724 | struct timespec cur_time_mon; |
1725 | unsigned long hours_since_last; | 1725 | unsigned long hours_since_last; |
@@ -1750,7 +1750,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1750 | atomic_set(&rdev->read_errors, read_errors >> hours_since_last); | 1750 | atomic_set(&rdev->read_errors, read_errors >> hours_since_last); |
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, | 1753 | static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, |
1754 | int sectors, struct page *page, int rw) | 1754 | int sectors, struct page *page, int rw) |
1755 | { | 1755 | { |
1756 | sector_t first_bad; | 1756 | sector_t first_bad; |
@@ -1782,7 +1782,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) | |||
1782 | { | 1782 | { |
1783 | int sect = 0; /* Offset from r10_bio->sector */ | 1783 | int sect = 0; /* Offset from r10_bio->sector */ |
1784 | int sectors = r10_bio->sectors; | 1784 | int sectors = r10_bio->sectors; |
1785 | mdk_rdev_t*rdev; | 1785 | struct md_rdev*rdev; |
1786 | int max_read_errors = atomic_read(&mddev->max_corr_read_errors); | 1786 | int max_read_errors = atomic_read(&mddev->max_corr_read_errors); |
1787 | int d = r10_bio->devs[r10_bio->read_slot].devnum; | 1787 | int d = r10_bio->devs[r10_bio->read_slot].devnum; |
1788 | 1788 | ||
@@ -1988,7 +1988,7 @@ static int narrow_write_error(r10bio_t *r10_bio, int i) | |||
1988 | struct bio *bio = r10_bio->master_bio; | 1988 | struct bio *bio = r10_bio->master_bio; |
1989 | mddev_t *mddev = r10_bio->mddev; | 1989 | mddev_t *mddev = r10_bio->mddev; |
1990 | conf_t *conf = mddev->private; | 1990 | conf_t *conf = mddev->private; |
1991 | mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; | 1991 | struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; |
1992 | /* bio has the data to be written to slot 'i' where | 1992 | /* bio has the data to be written to slot 'i' where |
1993 | * we just recently had a write error. | 1993 | * we just recently had a write error. |
1994 | * We repeatedly clone the bio and trim down to one block, | 1994 | * We repeatedly clone the bio and trim down to one block, |
@@ -2046,7 +2046,7 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | |||
2046 | int mirror = r10_bio->devs[slot].devnum; | 2046 | int mirror = r10_bio->devs[slot].devnum; |
2047 | struct bio *bio; | 2047 | struct bio *bio; |
2048 | conf_t *conf = mddev->private; | 2048 | conf_t *conf = mddev->private; |
2049 | mdk_rdev_t *rdev; | 2049 | struct md_rdev *rdev; |
2050 | char b[BDEVNAME_SIZE]; | 2050 | char b[BDEVNAME_SIZE]; |
2051 | unsigned long do_sync; | 2051 | unsigned long do_sync; |
2052 | int max_sectors; | 2052 | int max_sectors; |
@@ -2148,7 +2148,7 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio) | |||
2148 | * a bad block. | 2148 | * a bad block. |
2149 | */ | 2149 | */ |
2150 | int m; | 2150 | int m; |
2151 | mdk_rdev_t *rdev; | 2151 | struct md_rdev *rdev; |
2152 | 2152 | ||
2153 | if (test_bit(R10BIO_IsSync, &r10_bio->state) || | 2153 | if (test_bit(R10BIO_IsSync, &r10_bio->state) || |
2154 | test_bit(R10BIO_IsRecover, &r10_bio->state)) { | 2154 | test_bit(R10BIO_IsRecover, &r10_bio->state)) { |
@@ -2453,7 +2453,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, | |||
2453 | int k; | 2453 | int k; |
2454 | int d = r10_bio->devs[j].devnum; | 2454 | int d = r10_bio->devs[j].devnum; |
2455 | sector_t from_addr, to_addr; | 2455 | sector_t from_addr, to_addr; |
2456 | mdk_rdev_t *rdev; | 2456 | struct md_rdev *rdev; |
2457 | sector_t sector, first_bad; | 2457 | sector_t sector, first_bad; |
2458 | int bad_sectors; | 2458 | int bad_sectors; |
2459 | if (!conf->mirrors[d].rdev || | 2459 | if (!conf->mirrors[d].rdev || |
@@ -2841,7 +2841,7 @@ static int run(mddev_t *mddev) | |||
2841 | conf_t *conf; | 2841 | conf_t *conf; |
2842 | int i, disk_idx, chunk_size; | 2842 | int i, disk_idx, chunk_size; |
2843 | mirror_info_t *disk; | 2843 | mirror_info_t *disk; |
2844 | mdk_rdev_t *rdev; | 2844 | struct md_rdev *rdev; |
2845 | sector_t size; | 2845 | sector_t size; |
2846 | 2846 | ||
2847 | /* | 2847 | /* |
@@ -2999,7 +2999,7 @@ static void raid10_quiesce(mddev_t *mddev, int state) | |||
2999 | 2999 | ||
3000 | static void *raid10_takeover_raid0(mddev_t *mddev) | 3000 | static void *raid10_takeover_raid0(mddev_t *mddev) |
3001 | { | 3001 | { |
3002 | mdk_rdev_t *rdev; | 3002 | struct md_rdev *rdev; |
3003 | conf_t *conf; | 3003 | conf_t *conf; |
3004 | 3004 | ||
3005 | if (mddev->degraded > 0) { | 3005 | if (mddev->degraded > 0) { |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 79cb52a0d4a2..d37260fe9c3f 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -4,7 +4,7 @@ | |||
4 | typedef struct mirror_info mirror_info_t; | 4 | typedef struct mirror_info mirror_info_t; |
5 | 5 | ||
6 | struct mirror_info { | 6 | struct mirror_info { |
7 | mdk_rdev_t *rdev; | 7 | struct md_rdev *rdev; |
8 | sector_t head_position; | 8 | sector_t head_position; |
9 | int recovery_disabled; /* matches | 9 | int recovery_disabled; /* matches |
10 | * mddev->recovery_disabled | 10 | * mddev->recovery_disabled |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 245946e9e4c9..d0e2a345e4d7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -379,7 +379,7 @@ static int has_failed(raid5_conf_t *conf) | |||
379 | rcu_read_lock(); | 379 | rcu_read_lock(); |
380 | degraded = 0; | 380 | degraded = 0; |
381 | for (i = 0; i < conf->previous_raid_disks; i++) { | 381 | for (i = 0; i < conf->previous_raid_disks; i++) { |
382 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 382 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
383 | if (!rdev || test_bit(Faulty, &rdev->flags)) | 383 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
384 | degraded++; | 384 | degraded++; |
385 | else if (test_bit(In_sync, &rdev->flags)) | 385 | else if (test_bit(In_sync, &rdev->flags)) |
@@ -403,7 +403,7 @@ static int has_failed(raid5_conf_t *conf) | |||
403 | rcu_read_lock(); | 403 | rcu_read_lock(); |
404 | degraded = 0; | 404 | degraded = 0; |
405 | for (i = 0; i < conf->raid_disks; i++) { | 405 | for (i = 0; i < conf->raid_disks; i++) { |
406 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | 406 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
407 | if (!rdev || test_bit(Faulty, &rdev->flags)) | 407 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
408 | degraded++; | 408 | degraded++; |
409 | else if (test_bit(In_sync, &rdev->flags)) | 409 | else if (test_bit(In_sync, &rdev->flags)) |
@@ -492,7 +492,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
492 | for (i = disks; i--; ) { | 492 | for (i = disks; i--; ) { |
493 | int rw; | 493 | int rw; |
494 | struct bio *bi; | 494 | struct bio *bi; |
495 | mdk_rdev_t *rdev; | 495 | struct md_rdev *rdev; |
496 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { | 496 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
497 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) | 497 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
498 | rw = WRITE_FUA; | 498 | rw = WRITE_FUA; |
@@ -1582,7 +1582,7 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1582 | int disks = sh->disks, i; | 1582 | int disks = sh->disks, i; |
1583 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 1583 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
1584 | char b[BDEVNAME_SIZE]; | 1584 | char b[BDEVNAME_SIZE]; |
1585 | mdk_rdev_t *rdev; | 1585 | struct md_rdev *rdev; |
1586 | 1586 | ||
1587 | 1587 | ||
1588 | for (i=0 ; i<disks; i++) | 1588 | for (i=0 ; i<disks; i++) |
@@ -1719,7 +1719,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) | |||
1719 | dev->sector = compute_blocknr(sh, i, previous); | 1719 | dev->sector = compute_blocknr(sh, i, previous); |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) | 1722 | static void error(mddev_t *mddev, struct md_rdev *rdev) |
1723 | { | 1723 | { |
1724 | char b[BDEVNAME_SIZE]; | 1724 | char b[BDEVNAME_SIZE]; |
1725 | raid5_conf_t *conf = mddev->private; | 1725 | raid5_conf_t *conf = mddev->private; |
@@ -2257,7 +2257,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
2257 | int bitmap_end = 0; | 2257 | int bitmap_end = 0; |
2258 | 2258 | ||
2259 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | 2259 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
2260 | mdk_rdev_t *rdev; | 2260 | struct md_rdev *rdev; |
2261 | rcu_read_lock(); | 2261 | rcu_read_lock(); |
2262 | rdev = rcu_dereference(conf->disks[i].rdev); | 2262 | rdev = rcu_dereference(conf->disks[i].rdev); |
2263 | if (rdev && test_bit(In_sync, &rdev->flags)) | 2263 | if (rdev && test_bit(In_sync, &rdev->flags)) |
@@ -2371,7 +2371,7 @@ handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh, | |||
2371 | * refcounting of rdevs is not needed | 2371 | * refcounting of rdevs is not needed |
2372 | */ | 2372 | */ |
2373 | for (i = 0; i < conf->raid_disks; i++) { | 2373 | for (i = 0; i < conf->raid_disks; i++) { |
2374 | mdk_rdev_t *rdev = conf->disks[i].rdev; | 2374 | struct md_rdev *rdev = conf->disks[i].rdev; |
2375 | if (!rdev | 2375 | if (!rdev |
2376 | || test_bit(Faulty, &rdev->flags) | 2376 | || test_bit(Faulty, &rdev->flags) |
2377 | || test_bit(In_sync, &rdev->flags)) | 2377 | || test_bit(In_sync, &rdev->flags)) |
@@ -2995,7 +2995,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
2995 | rcu_read_lock(); | 2995 | rcu_read_lock(); |
2996 | spin_lock_irq(&conf->device_lock); | 2996 | spin_lock_irq(&conf->device_lock); |
2997 | for (i=disks; i--; ) { | 2997 | for (i=disks; i--; ) { |
2998 | mdk_rdev_t *rdev; | 2998 | struct md_rdev *rdev; |
2999 | sector_t first_bad; | 2999 | sector_t first_bad; |
3000 | int bad_sectors; | 3000 | int bad_sectors; |
3001 | int is_bad = 0; | 3001 | int is_bad = 0; |
@@ -3334,7 +3334,7 @@ finish: | |||
3334 | 3334 | ||
3335 | if (s.handle_bad_blocks) | 3335 | if (s.handle_bad_blocks) |
3336 | for (i = disks; i--; ) { | 3336 | for (i = disks; i--; ) { |
3337 | mdk_rdev_t *rdev; | 3337 | struct md_rdev *rdev; |
3338 | struct r5dev *dev = &sh->dev[i]; | 3338 | struct r5dev *dev = &sh->dev[i]; |
3339 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { | 3339 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { |
3340 | /* We own a safe reference to the rdev */ | 3340 | /* We own a safe reference to the rdev */ |
@@ -3523,7 +3523,7 @@ static void raid5_align_endio(struct bio *bi, int error) | |||
3523 | mddev_t *mddev; | 3523 | mddev_t *mddev; |
3524 | raid5_conf_t *conf; | 3524 | raid5_conf_t *conf; |
3525 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); | 3525 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
3526 | mdk_rdev_t *rdev; | 3526 | struct md_rdev *rdev; |
3527 | 3527 | ||
3528 | bio_put(bi); | 3528 | bio_put(bi); |
3529 | 3529 | ||
@@ -3572,7 +3572,7 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) | |||
3572 | raid5_conf_t *conf = mddev->private; | 3572 | raid5_conf_t *conf = mddev->private; |
3573 | int dd_idx; | 3573 | int dd_idx; |
3574 | struct bio* align_bi; | 3574 | struct bio* align_bi; |
3575 | mdk_rdev_t *rdev; | 3575 | struct md_rdev *rdev; |
3576 | 3576 | ||
3577 | if (!in_chunk_boundary(mddev, raid_bio)) { | 3577 | if (!in_chunk_boundary(mddev, raid_bio)) { |
3578 | pr_debug("chunk_aligned_read : non aligned\n"); | 3578 | pr_debug("chunk_aligned_read : non aligned\n"); |
@@ -4544,7 +4544,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4544 | { | 4544 | { |
4545 | raid5_conf_t *conf; | 4545 | raid5_conf_t *conf; |
4546 | int raid_disk, memory, max_disks; | 4546 | int raid_disk, memory, max_disks; |
4547 | mdk_rdev_t *rdev; | 4547 | struct md_rdev *rdev; |
4548 | struct disk_info *disk; | 4548 | struct disk_info *disk; |
4549 | 4549 | ||
4550 | if (mddev->new_level != 5 | 4550 | if (mddev->new_level != 5 |
@@ -4710,7 +4710,7 @@ static int run(mddev_t *mddev) | |||
4710 | raid5_conf_t *conf; | 4710 | raid5_conf_t *conf; |
4711 | int working_disks = 0; | 4711 | int working_disks = 0; |
4712 | int dirty_parity_disks = 0; | 4712 | int dirty_parity_disks = 0; |
4713 | mdk_rdev_t *rdev; | 4713 | struct md_rdev *rdev; |
4714 | sector_t reshape_offset = 0; | 4714 | sector_t reshape_offset = 0; |
4715 | 4715 | ||
4716 | if (mddev->recovery_cp != MaxSector) | 4716 | if (mddev->recovery_cp != MaxSector) |
@@ -5023,7 +5023,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) | |||
5023 | { | 5023 | { |
5024 | raid5_conf_t *conf = mddev->private; | 5024 | raid5_conf_t *conf = mddev->private; |
5025 | int err = 0; | 5025 | int err = 0; |
5026 | mdk_rdev_t *rdev; | 5026 | struct md_rdev *rdev; |
5027 | struct disk_info *p = conf->disks + number; | 5027 | struct disk_info *p = conf->disks + number; |
5028 | 5028 | ||
5029 | print_raid5_conf(conf); | 5029 | print_raid5_conf(conf); |
@@ -5062,7 +5062,7 @@ abort: | |||
5062 | return err; | 5062 | return err; |
5063 | } | 5063 | } |
5064 | 5064 | ||
5065 | static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | 5065 | static int raid5_add_disk(mddev_t *mddev, struct md_rdev *rdev) |
5066 | { | 5066 | { |
5067 | raid5_conf_t *conf = mddev->private; | 5067 | raid5_conf_t *conf = mddev->private; |
5068 | int err = -EEXIST; | 5068 | int err = -EEXIST; |
@@ -5191,7 +5191,7 @@ static int check_reshape(mddev_t *mddev) | |||
5191 | static int raid5_start_reshape(mddev_t *mddev) | 5191 | static int raid5_start_reshape(mddev_t *mddev) |
5192 | { | 5192 | { |
5193 | raid5_conf_t *conf = mddev->private; | 5193 | raid5_conf_t *conf = mddev->private; |
5194 | mdk_rdev_t *rdev; | 5194 | struct md_rdev *rdev; |
5195 | int spares = 0; | 5195 | int spares = 0; |
5196 | unsigned long flags; | 5196 | unsigned long flags; |
5197 | 5197 | ||
@@ -5353,7 +5353,7 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5353 | for (d = conf->raid_disks ; | 5353 | for (d = conf->raid_disks ; |
5354 | d < conf->raid_disks - mddev->delta_disks; | 5354 | d < conf->raid_disks - mddev->delta_disks; |
5355 | d++) { | 5355 | d++) { |
5356 | mdk_rdev_t *rdev = conf->disks[d].rdev; | 5356 | struct md_rdev *rdev = conf->disks[d].rdev; |
5357 | if (rdev && raid5_remove_disk(mddev, d) == 0) { | 5357 | if (rdev && raid5_remove_disk(mddev, d) == 0) { |
5358 | sysfs_unlink_rdev(mddev, rdev); | 5358 | sysfs_unlink_rdev(mddev, rdev); |
5359 | rdev->raid_disk = -1; | 5359 | rdev->raid_disk = -1; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 11b9566184b2..6b234af7bf17 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -248,7 +248,7 @@ struct stripe_head_state { | |||
248 | unsigned long ops_request; | 248 | unsigned long ops_request; |
249 | 249 | ||
250 | struct bio *return_bi; | 250 | struct bio *return_bi; |
251 | mdk_rdev_t *blocked_rdev; | 251 | struct md_rdev *blocked_rdev; |
252 | int handle_bad_blocks; | 252 | int handle_bad_blocks; |
253 | }; | 253 | }; |
254 | 254 | ||
@@ -344,7 +344,7 @@ enum { | |||
344 | 344 | ||
345 | 345 | ||
346 | struct disk_info { | 346 | struct disk_info { |
347 | mdk_rdev_t *rdev; | 347 | struct md_rdev *rdev; |
348 | }; | 348 | }; |
349 | 349 | ||
350 | struct raid5_private_data { | 350 | struct raid5_private_data { |