diff options
author | NeilBrown <neilb@suse.de> | 2014-09-30 00:23:59 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2014-10-13 22:08:29 -0400 |
commit | f72ffdd68616e3697bc782b21c82197aeb480fd5 (patch) | |
tree | e688042b2faf1992e4f94411a4d4c4f2f5272fe4 | |
parent | ac05f256691fe427a3e84c19261adb0b67dd73c0 (diff) |
md: remove unwanted white space from md.c
My editor shows much of this is RED.
Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r-- | drivers/md/linear.c | 8 | ||||
-rw-r--r-- | drivers/md/md.c | 175 | ||||
-rw-r--r-- | drivers/md/md.h | 34 | ||||
-rw-r--r-- | drivers/md/multipath.c | 28 | ||||
-rw-r--r-- | drivers/md/raid0.c | 9 | ||||
-rw-r--r-- | drivers/md/raid1.c | 14 | ||||
-rw-r--r-- | drivers/md/raid1.h | 2 | ||||
-rw-r--r-- | drivers/md/raid10.c | 8 | ||||
-rw-r--r-- | drivers/md/raid5.c | 23 | ||||
-rw-r--r-- | drivers/md/raid5.h | 4 |
10 files changed, 114 insertions, 191 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 56f534b4a2d2..64713b77df1c 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -10,10 +10,10 @@ | |||
10 | it under the terms of the GNU General Public License as published by | 10 | it under the terms of the GNU General Public License as published by |
11 | the Free Software Foundation; either version 2, or (at your option) | 11 | the Free Software Foundation; either version 2, or (at your option) |
12 | any later version. | 12 | any later version. |
13 | 13 | ||
14 | You should have received a copy of the GNU General Public License | 14 | You should have received a copy of the GNU General Public License |
15 | (for example /usr/src/linux/COPYING); if not, write to the Free | 15 | (for example /usr/src/linux/COPYING); if not, write to the Free |
16 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
@@ -25,7 +25,7 @@ | |||
25 | #include "linear.h" | 25 | #include "linear.h" |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * find which device holds a particular offset | 28 | * find which device holds a particular offset |
29 | */ | 29 | */ |
30 | static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) | 30 | static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) |
31 | { | 31 | { |
@@ -355,7 +355,6 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev) | |||
355 | seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); | 355 | seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); |
356 | } | 356 | } |
357 | 357 | ||
358 | |||
359 | static struct md_personality linear_personality = | 358 | static struct md_personality linear_personality = |
360 | { | 359 | { |
361 | .name = "linear", | 360 | .name = "linear", |
@@ -379,7 +378,6 @@ static void linear_exit (void) | |||
379 | unregister_md_personality (&linear_personality); | 378 | unregister_md_personality (&linear_personality); |
380 | } | 379 | } |
381 | 380 | ||
382 | |||
383 | module_init(linear_init); | 381 | module_init(linear_init); |
384 | module_exit(linear_exit); | 382 | module_exit(linear_exit); |
385 | MODULE_LICENSE("GPL"); | 383 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 19171c58d790..3ca611fabfaf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | md.c : Multiple Devices driver for Linux | 2 | md.c : Multiple Devices driver for Linux |
3 | Copyright (C) 1998, 1999, 2000 Ingo Molnar | 3 | Copyright (C) 1998, 1999, 2000 Ingo Molnar |
4 | 4 | ||
5 | completely rewritten, based on the MD driver code from Marc Zyngier | 5 | completely rewritten, based on the MD driver code from Marc Zyngier |
6 | 6 | ||
@@ -218,7 +218,6 @@ static void md_new_event_inintr(struct mddev *mddev) | |||
218 | static LIST_HEAD(all_mddevs); | 218 | static LIST_HEAD(all_mddevs); |
219 | static DEFINE_SPINLOCK(all_mddevs_lock); | 219 | static DEFINE_SPINLOCK(all_mddevs_lock); |
220 | 220 | ||
221 | |||
222 | /* | 221 | /* |
223 | * iterates through all used mddevs in the system. | 222 | * iterates through all used mddevs in the system. |
224 | * We take care to grab the all_mddevs_lock whenever navigating | 223 | * We take care to grab the all_mddevs_lock whenever navigating |
@@ -228,7 +227,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
228 | */ | 227 | */ |
229 | #define for_each_mddev(_mddev,_tmp) \ | 228 | #define for_each_mddev(_mddev,_tmp) \ |
230 | \ | 229 | \ |
231 | for (({ spin_lock(&all_mddevs_lock); \ | 230 | for (({ spin_lock(&all_mddevs_lock); \ |
232 | _tmp = all_mddevs.next; \ | 231 | _tmp = all_mddevs.next; \ |
233 | _mddev = NULL;}); \ | 232 | _mddev = NULL;}); \ |
234 | ({ if (_tmp != &all_mddevs) \ | 233 | ({ if (_tmp != &all_mddevs) \ |
@@ -241,7 +240,6 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
241 | _tmp = _tmp->next;}) \ | 240 | _tmp = _tmp->next;}) \ |
242 | ) | 241 | ) |
243 | 242 | ||
244 | |||
245 | /* Rather than calling directly into the personality make_request function, | 243 | /* Rather than calling directly into the personality make_request function, |
246 | * IO requests come here first so that we can check if the device is | 244 | * IO requests come here first so that we can check if the device is |
247 | * being suspended pending a reconfiguration. | 245 | * being suspended pending a reconfiguration. |
@@ -488,7 +486,7 @@ void mddev_init(struct mddev *mddev) | |||
488 | } | 486 | } |
489 | EXPORT_SYMBOL_GPL(mddev_init); | 487 | EXPORT_SYMBOL_GPL(mddev_init); |
490 | 488 | ||
491 | static struct mddev * mddev_find(dev_t unit) | 489 | static struct mddev *mddev_find(dev_t unit) |
492 | { | 490 | { |
493 | struct mddev *mddev, *new = NULL; | 491 | struct mddev *mddev, *new = NULL; |
494 | 492 | ||
@@ -530,7 +528,7 @@ static struct mddev * mddev_find(dev_t unit) | |||
530 | kfree(new); | 528 | kfree(new); |
531 | return NULL; | 529 | return NULL; |
532 | } | 530 | } |
533 | 531 | ||
534 | is_free = 1; | 532 | is_free = 1; |
535 | list_for_each_entry(mddev, &all_mddevs, all_mddevs) | 533 | list_for_each_entry(mddev, &all_mddevs, all_mddevs) |
536 | if (mddev->unit == dev) { | 534 | if (mddev->unit == dev) { |
@@ -562,7 +560,7 @@ static struct mddev * mddev_find(dev_t unit) | |||
562 | goto retry; | 560 | goto retry; |
563 | } | 561 | } |
564 | 562 | ||
565 | static inline int __must_check mddev_lock(struct mddev * mddev) | 563 | static inline int __must_check mddev_lock(struct mddev *mddev) |
566 | { | 564 | { |
567 | return mutex_lock_interruptible(&mddev->reconfig_mutex); | 565 | return mutex_lock_interruptible(&mddev->reconfig_mutex); |
568 | } | 566 | } |
@@ -570,7 +568,7 @@ static inline int __must_check mddev_lock(struct mddev * mddev) | |||
570 | /* Sometimes we need to take the lock in a situation where | 568 | /* Sometimes we need to take the lock in a situation where |
571 | * failure due to interrupts is not acceptable. | 569 | * failure due to interrupts is not acceptable. |
572 | */ | 570 | */ |
573 | static inline void mddev_lock_nointr(struct mddev * mddev) | 571 | static inline void mddev_lock_nointr(struct mddev *mddev) |
574 | { | 572 | { |
575 | mutex_lock(&mddev->reconfig_mutex); | 573 | mutex_lock(&mddev->reconfig_mutex); |
576 | } | 574 | } |
@@ -580,14 +578,14 @@ static inline int mddev_is_locked(struct mddev *mddev) | |||
580 | return mutex_is_locked(&mddev->reconfig_mutex); | 578 | return mutex_is_locked(&mddev->reconfig_mutex); |
581 | } | 579 | } |
582 | 580 | ||
583 | static inline int mddev_trylock(struct mddev * mddev) | 581 | static inline int mddev_trylock(struct mddev *mddev) |
584 | { | 582 | { |
585 | return mutex_trylock(&mddev->reconfig_mutex); | 583 | return mutex_trylock(&mddev->reconfig_mutex); |
586 | } | 584 | } |
587 | 585 | ||
588 | static struct attribute_group md_redundancy_group; | 586 | static struct attribute_group md_redundancy_group; |
589 | 587 | ||
590 | static void mddev_unlock(struct mddev * mddev) | 588 | static void mddev_unlock(struct mddev *mddev) |
591 | { | 589 | { |
592 | if (mddev->to_remove) { | 590 | if (mddev->to_remove) { |
593 | /* These cannot be removed under reconfig_mutex as | 591 | /* These cannot be removed under reconfig_mutex as |
@@ -682,7 +680,7 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) | |||
682 | return MD_NEW_SIZE_SECTORS(num_sectors); | 680 | return MD_NEW_SIZE_SECTORS(num_sectors); |
683 | } | 681 | } |
684 | 682 | ||
685 | static int alloc_disk_sb(struct md_rdev * rdev) | 683 | static int alloc_disk_sb(struct md_rdev *rdev) |
686 | { | 684 | { |
687 | if (rdev->sb_page) | 685 | if (rdev->sb_page) |
688 | MD_BUG(); | 686 | MD_BUG(); |
@@ -783,7 +781,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
783 | } | 781 | } |
784 | EXPORT_SYMBOL_GPL(sync_page_io); | 782 | EXPORT_SYMBOL_GPL(sync_page_io); |
785 | 783 | ||
786 | static int read_disk_sb(struct md_rdev * rdev, int size) | 784 | static int read_disk_sb(struct md_rdev *rdev, int size) |
787 | { | 785 | { |
788 | char b[BDEVNAME_SIZE]; | 786 | char b[BDEVNAME_SIZE]; |
789 | if (!rdev->sb_page) { | 787 | if (!rdev->sb_page) { |
@@ -793,7 +791,6 @@ static int read_disk_sb(struct md_rdev * rdev, int size) | |||
793 | if (rdev->sb_loaded) | 791 | if (rdev->sb_loaded) |
794 | return 0; | 792 | return 0; |
795 | 793 | ||
796 | |||
797 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) | 794 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) |
798 | goto fail; | 795 | goto fail; |
799 | rdev->sb_loaded = 1; | 796 | rdev->sb_loaded = 1; |
@@ -807,7 +804,7 @@ fail: | |||
807 | 804 | ||
808 | static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) | 805 | static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) |
809 | { | 806 | { |
810 | return sb1->set_uuid0 == sb2->set_uuid0 && | 807 | return sb1->set_uuid0 == sb2->set_uuid0 && |
811 | sb1->set_uuid1 == sb2->set_uuid1 && | 808 | sb1->set_uuid1 == sb2->set_uuid1 && |
812 | sb1->set_uuid2 == sb2->set_uuid2 && | 809 | sb1->set_uuid2 == sb2->set_uuid2 && |
813 | sb1->set_uuid3 == sb2->set_uuid3; | 810 | sb1->set_uuid3 == sb2->set_uuid3; |
@@ -843,14 +840,13 @@ abort: | |||
843 | return ret; | 840 | return ret; |
844 | } | 841 | } |
845 | 842 | ||
846 | |||
847 | static u32 md_csum_fold(u32 csum) | 843 | static u32 md_csum_fold(u32 csum) |
848 | { | 844 | { |
849 | csum = (csum & 0xffff) + (csum >> 16); | 845 | csum = (csum & 0xffff) + (csum >> 16); |
850 | return (csum & 0xffff) + (csum >> 16); | 846 | return (csum & 0xffff) + (csum >> 16); |
851 | } | 847 | } |
852 | 848 | ||
853 | static unsigned int calc_sb_csum(mdp_super_t * sb) | 849 | static unsigned int calc_sb_csum(mdp_super_t *sb) |
854 | { | 850 | { |
855 | u64 newcsum = 0; | 851 | u64 newcsum = 0; |
856 | u32 *sb32 = (u32*)sb; | 852 | u32 *sb32 = (u32*)sb; |
@@ -864,7 +860,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
864 | newcsum += sb32[i]; | 860 | newcsum += sb32[i]; |
865 | csum = (newcsum & 0xffffffff) + (newcsum>>32); | 861 | csum = (newcsum & 0xffffffff) + (newcsum>>32); |
866 | 862 | ||
867 | |||
868 | #ifdef CONFIG_ALPHA | 863 | #ifdef CONFIG_ALPHA |
869 | /* This used to use csum_partial, which was wrong for several | 864 | /* This used to use csum_partial, which was wrong for several |
870 | * reasons including that different results are returned on | 865 | * reasons including that different results are returned on |
@@ -881,7 +876,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb) | |||
881 | return csum; | 876 | return csum; |
882 | } | 877 | } |
883 | 878 | ||
884 | |||
885 | /* | 879 | /* |
886 | * Handle superblock details. | 880 | * Handle superblock details. |
887 | * We want to be able to handle multiple superblock formats | 881 | * We want to be able to handle multiple superblock formats |
@@ -947,7 +941,7 @@ int md_check_no_bitmap(struct mddev *mddev) | |||
947 | EXPORT_SYMBOL(md_check_no_bitmap); | 941 | EXPORT_SYMBOL(md_check_no_bitmap); |
948 | 942 | ||
949 | /* | 943 | /* |
950 | * load_super for 0.90.0 | 944 | * load_super for 0.90.0 |
951 | */ | 945 | */ |
952 | static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) | 946 | static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) |
953 | { | 947 | { |
@@ -1026,7 +1020,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor | |||
1026 | ev2 = md_event(refsb); | 1020 | ev2 = md_event(refsb); |
1027 | if (ev1 > ev2) | 1021 | if (ev1 > ev2) |
1028 | ret = 1; | 1022 | ret = 1; |
1029 | else | 1023 | else |
1030 | ret = 0; | 1024 | ret = 0; |
1031 | } | 1025 | } |
1032 | rdev->sectors = rdev->sb_start; | 1026 | rdev->sectors = rdev->sb_start; |
@@ -1100,7 +1094,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) | |||
1100 | if (sb->state & (1<<MD_SB_CLEAN)) | 1094 | if (sb->state & (1<<MD_SB_CLEAN)) |
1101 | mddev->recovery_cp = MaxSector; | 1095 | mddev->recovery_cp = MaxSector; |
1102 | else { | 1096 | else { |
1103 | if (sb->events_hi == sb->cp_events_hi && | 1097 | if (sb->events_hi == sb->cp_events_hi && |
1104 | sb->events_lo == sb->cp_events_lo) { | 1098 | sb->events_lo == sb->cp_events_lo) { |
1105 | mddev->recovery_cp = sb->recovery_cp; | 1099 | mddev->recovery_cp = sb->recovery_cp; |
1106 | } else | 1100 | } else |
@@ -1128,7 +1122,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) | |||
1128 | ++ev1; | 1122 | ++ev1; |
1129 | if (sb->disks[rdev->desc_nr].state & ( | 1123 | if (sb->disks[rdev->desc_nr].state & ( |
1130 | (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) | 1124 | (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) |
1131 | if (ev1 < mddev->events) | 1125 | if (ev1 < mddev->events) |
1132 | return -EINVAL; | 1126 | return -EINVAL; |
1133 | } else if (mddev->bitmap) { | 1127 | } else if (mddev->bitmap) { |
1134 | /* if adding to array with a bitmap, then we can accept an | 1128 | /* if adding to array with a bitmap, then we can accept an |
@@ -1179,7 +1173,6 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) | |||
1179 | struct md_rdev *rdev2; | 1173 | struct md_rdev *rdev2; |
1180 | int next_spare = mddev->raid_disks; | 1174 | int next_spare = mddev->raid_disks; |
1181 | 1175 | ||
1182 | |||
1183 | /* make rdev->sb match mddev data.. | 1176 | /* make rdev->sb match mddev data.. |
1184 | * | 1177 | * |
1185 | * 1/ zero out disks | 1178 | * 1/ zero out disks |
@@ -1348,7 +1341,7 @@ super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) | |||
1348 | * version 1 superblock | 1341 | * version 1 superblock |
1349 | */ | 1342 | */ |
1350 | 1343 | ||
1351 | static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) | 1344 | static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) |
1352 | { | 1345 | { |
1353 | __le32 disk_csum; | 1346 | __le32 disk_csum; |
1354 | u32 csum; | 1347 | u32 csum; |
@@ -1412,7 +1405,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ | |||
1412 | ret = read_disk_sb(rdev, 4096); | 1405 | ret = read_disk_sb(rdev, 4096); |
1413 | if (ret) return ret; | 1406 | if (ret) return ret; |
1414 | 1407 | ||
1415 | |||
1416 | sb = page_address(rdev->sb_page); | 1408 | sb = page_address(rdev->sb_page); |
1417 | 1409 | ||
1418 | if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || | 1410 | if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || |
@@ -1799,7 +1791,7 @@ retry: | |||
1799 | 1791 | ||
1800 | for (i=0; i<max_dev;i++) | 1792 | for (i=0; i<max_dev;i++) |
1801 | sb->dev_roles[i] = cpu_to_le16(0xfffe); | 1793 | sb->dev_roles[i] = cpu_to_le16(0xfffe); |
1802 | 1794 | ||
1803 | rdev_for_each(rdev2, mddev) { | 1795 | rdev_for_each(rdev2, mddev) { |
1804 | i = rdev2->desc_nr; | 1796 | i = rdev2->desc_nr; |
1805 | if (test_bit(Faulty, &rdev2->flags)) | 1797 | if (test_bit(Faulty, &rdev2->flags)) |
@@ -2015,7 +2007,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) | |||
2015 | } | 2007 | } |
2016 | EXPORT_SYMBOL(md_integrity_add_rdev); | 2008 | EXPORT_SYMBOL(md_integrity_add_rdev); |
2017 | 2009 | ||
2018 | static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev) | 2010 | static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) |
2019 | { | 2011 | { |
2020 | char b[BDEVNAME_SIZE]; | 2012 | char b[BDEVNAME_SIZE]; |
2021 | struct kobject *ko; | 2013 | struct kobject *ko; |
@@ -2105,7 +2097,7 @@ static void md_delayed_delete(struct work_struct *ws) | |||
2105 | kobject_put(&rdev->kobj); | 2097 | kobject_put(&rdev->kobj); |
2106 | } | 2098 | } |
2107 | 2099 | ||
2108 | static void unbind_rdev_from_array(struct md_rdev * rdev) | 2100 | static void unbind_rdev_from_array(struct md_rdev *rdev) |
2109 | { | 2101 | { |
2110 | char b[BDEVNAME_SIZE]; | 2102 | char b[BDEVNAME_SIZE]; |
2111 | if (!rdev->mddev) { | 2103 | if (!rdev->mddev) { |
@@ -2163,7 +2155,7 @@ static void unlock_rdev(struct md_rdev *rdev) | |||
2163 | 2155 | ||
2164 | void md_autodetect_dev(dev_t dev); | 2156 | void md_autodetect_dev(dev_t dev); |
2165 | 2157 | ||
2166 | static void export_rdev(struct md_rdev * rdev) | 2158 | static void export_rdev(struct md_rdev *rdev) |
2167 | { | 2159 | { |
2168 | char b[BDEVNAME_SIZE]; | 2160 | char b[BDEVNAME_SIZE]; |
2169 | printk(KERN_INFO "md: export_rdev(%s)\n", | 2161 | printk(KERN_INFO "md: export_rdev(%s)\n", |
@@ -2179,7 +2171,7 @@ static void export_rdev(struct md_rdev * rdev) | |||
2179 | kobject_put(&rdev->kobj); | 2171 | kobject_put(&rdev->kobj); |
2180 | } | 2172 | } |
2181 | 2173 | ||
2182 | static void kick_rdev_from_array(struct md_rdev * rdev) | 2174 | static void kick_rdev_from_array(struct md_rdev *rdev) |
2183 | { | 2175 | { |
2184 | unbind_rdev_from_array(rdev); | 2176 | unbind_rdev_from_array(rdev); |
2185 | export_rdev(rdev); | 2177 | export_rdev(rdev); |
@@ -2208,7 +2200,7 @@ static void print_sb_90(mdp_super_t *sb) | |||
2208 | { | 2200 | { |
2209 | int i; | 2201 | int i; |
2210 | 2202 | ||
2211 | printk(KERN_INFO | 2203 | printk(KERN_INFO |
2212 | "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", | 2204 | "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", |
2213 | sb->major_version, sb->minor_version, sb->patch_version, | 2205 | sb->major_version, sb->minor_version, sb->patch_version, |
2214 | sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, | 2206 | sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, |
@@ -2283,9 +2275,9 @@ static void print_rdev(struct md_rdev *rdev, int major_version) | |||
2283 | { | 2275 | { |
2284 | char b[BDEVNAME_SIZE]; | 2276 | char b[BDEVNAME_SIZE]; |
2285 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", | 2277 | printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", |
2286 | bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, | 2278 | bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, |
2287 | test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), | 2279 | test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), |
2288 | rdev->desc_nr); | 2280 | rdev->desc_nr); |
2289 | if (rdev->sb_loaded) { | 2281 | if (rdev->sb_loaded) { |
2290 | printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); | 2282 | printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); |
2291 | switch (major_version) { | 2283 | switch (major_version) { |
@@ -2328,8 +2320,7 @@ static void md_print_devices(void) | |||
2328 | printk("\n"); | 2320 | printk("\n"); |
2329 | } | 2321 | } |
2330 | 2322 | ||
2331 | 2323 | static void sync_sbs(struct mddev *mddev, int nospares) | |
2332 | static void sync_sbs(struct mddev * mddev, int nospares) | ||
2333 | { | 2324 | { |
2334 | /* Update each superblock (in-memory image), but | 2325 | /* Update each superblock (in-memory image), but |
2335 | * if we are allowed to, skip spares which already | 2326 | * if we are allowed to, skip spares which already |
@@ -2352,7 +2343,7 @@ static void sync_sbs(struct mddev * mddev, int nospares) | |||
2352 | } | 2343 | } |
2353 | } | 2344 | } |
2354 | 2345 | ||
2355 | static void md_update_sb(struct mddev * mddev, int force_change) | 2346 | static void md_update_sb(struct mddev *mddev, int force_change) |
2356 | { | 2347 | { |
2357 | struct md_rdev *rdev; | 2348 | struct md_rdev *rdev; |
2358 | int sync_req; | 2349 | int sync_req; |
@@ -2373,7 +2364,7 @@ repeat: | |||
2373 | mddev->curr_resync_completed > rdev->recovery_offset) | 2364 | mddev->curr_resync_completed > rdev->recovery_offset) |
2374 | rdev->recovery_offset = mddev->curr_resync_completed; | 2365 | rdev->recovery_offset = mddev->curr_resync_completed; |
2375 | 2366 | ||
2376 | } | 2367 | } |
2377 | if (!mddev->persistent) { | 2368 | if (!mddev->persistent) { |
2378 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | 2369 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); |
2379 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | 2370 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -2812,7 +2803,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
2812 | return len; | 2803 | return len; |
2813 | } | 2804 | } |
2814 | 2805 | ||
2815 | |||
2816 | static struct rdev_sysfs_entry rdev_slot = | 2806 | static struct rdev_sysfs_entry rdev_slot = |
2817 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); | 2807 | __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); |
2818 | 2808 | ||
@@ -3009,7 +2999,6 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
3009 | static struct rdev_sysfs_entry rdev_size = | 2999 | static struct rdev_sysfs_entry rdev_size = |
3010 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); | 3000 | __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); |
3011 | 3001 | ||
3012 | |||
3013 | static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) | 3002 | static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) |
3014 | { | 3003 | { |
3015 | unsigned long long recovery_start = rdev->recovery_offset; | 3004 | unsigned long long recovery_start = rdev->recovery_offset; |
@@ -3045,7 +3034,6 @@ static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_ | |||
3045 | static struct rdev_sysfs_entry rdev_recovery_start = | 3034 | static struct rdev_sysfs_entry rdev_recovery_start = |
3046 | __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); | 3035 | __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); |
3047 | 3036 | ||
3048 | |||
3049 | static ssize_t | 3037 | static ssize_t |
3050 | badblocks_show(struct badblocks *bb, char *page, int unack); | 3038 | badblocks_show(struct badblocks *bb, char *page, int unack); |
3051 | static ssize_t | 3039 | static ssize_t |
@@ -3066,7 +3054,6 @@ static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) | |||
3066 | static struct rdev_sysfs_entry rdev_bad_blocks = | 3054 | static struct rdev_sysfs_entry rdev_bad_blocks = |
3067 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); | 3055 | __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); |
3068 | 3056 | ||
3069 | |||
3070 | static ssize_t ubb_show(struct md_rdev *rdev, char *page) | 3057 | static ssize_t ubb_show(struct md_rdev *rdev, char *page) |
3071 | { | 3058 | { |
3072 | return badblocks_show(&rdev->badblocks, page, 1); | 3059 | return badblocks_show(&rdev->badblocks, page, 1); |
@@ -3223,7 +3210,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe | |||
3223 | 3210 | ||
3224 | size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; | 3211 | size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; |
3225 | if (!size) { | 3212 | if (!size) { |
3226 | printk(KERN_WARNING | 3213 | printk(KERN_WARNING |
3227 | "md: %s has zero or unknown size, marking faulty!\n", | 3214 | "md: %s has zero or unknown size, marking faulty!\n", |
3228 | bdevname(rdev->bdev,b)); | 3215 | bdevname(rdev->bdev,b)); |
3229 | err = -EINVAL; | 3216 | err = -EINVAL; |
@@ -3242,7 +3229,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe | |||
3242 | goto abort_free; | 3229 | goto abort_free; |
3243 | } | 3230 | } |
3244 | if (err < 0) { | 3231 | if (err < 0) { |
3245 | printk(KERN_WARNING | 3232 | printk(KERN_WARNING |
3246 | "md: could not read %s's sb, not importing!\n", | 3233 | "md: could not read %s's sb, not importing!\n", |
3247 | bdevname(rdev->bdev,b)); | 3234 | bdevname(rdev->bdev,b)); |
3248 | goto abort_free; | 3235 | goto abort_free; |
@@ -3263,8 +3250,7 @@ abort_free: | |||
3263 | * Check a full RAID array for plausibility | 3250 | * Check a full RAID array for plausibility |
3264 | */ | 3251 | */ |
3265 | 3252 | ||
3266 | 3253 | static void analyze_sbs(struct mddev *mddev) | |
3267 | static void analyze_sbs(struct mddev * mddev) | ||
3268 | { | 3254 | { |
3269 | int i; | 3255 | int i; |
3270 | struct md_rdev *rdev, *freshest, *tmp; | 3256 | struct md_rdev *rdev, *freshest, *tmp; |
@@ -3282,12 +3268,11 @@ static void analyze_sbs(struct mddev * mddev) | |||
3282 | default: | 3268 | default: |
3283 | printk( KERN_ERR \ | 3269 | printk( KERN_ERR \ |
3284 | "md: fatal superblock inconsistency in %s" | 3270 | "md: fatal superblock inconsistency in %s" |
3285 | " -- removing from array\n", | 3271 | " -- removing from array\n", |
3286 | bdevname(rdev->bdev,b)); | 3272 | bdevname(rdev->bdev,b)); |
3287 | kick_rdev_from_array(rdev); | 3273 | kick_rdev_from_array(rdev); |
3288 | } | 3274 | } |
3289 | 3275 | ||
3290 | |||
3291 | super_types[mddev->major_version]. | 3276 | super_types[mddev->major_version]. |
3292 | validate_super(mddev, freshest); | 3277 | validate_super(mddev, freshest); |
3293 | 3278 | ||
@@ -3326,7 +3311,7 @@ static void analyze_sbs(struct mddev * mddev) | |||
3326 | /* Read a fixed-point number. | 3311 | /* Read a fixed-point number. |
3327 | * Numbers in sysfs attributes should be in "standard" units where | 3312 | * Numbers in sysfs attributes should be in "standard" units where |
3328 | * possible, so time should be in seconds. | 3313 | * possible, so time should be in seconds. |
3329 | * However we internally use a a much smaller unit such as | 3314 | * However we internally use a a much smaller unit such as |
3330 | * milliseconds or jiffies. | 3315 | * milliseconds or jiffies. |
3331 | * This function takes a decimal number with a possible fractional | 3316 | * This function takes a decimal number with a possible fractional |
3332 | * component, and produces an integer which is the result of | 3317 | * component, and produces an integer which is the result of |
@@ -3363,7 +3348,6 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) | |||
3363 | return 0; | 3348 | return 0; |
3364 | } | 3349 | } |
3365 | 3350 | ||
3366 | |||
3367 | static void md_safemode_timeout(unsigned long data); | 3351 | static void md_safemode_timeout(unsigned long data); |
3368 | 3352 | ||
3369 | static ssize_t | 3353 | static ssize_t |
@@ -3506,7 +3490,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) | |||
3506 | /* Looks like we have a winner */ | 3490 | /* Looks like we have a winner */ |
3507 | mddev_suspend(mddev); | 3491 | mddev_suspend(mddev); |
3508 | mddev->pers->stop(mddev); | 3492 | mddev->pers->stop(mddev); |
3509 | 3493 | ||
3510 | if (mddev->pers->sync_request == NULL && | 3494 | if (mddev->pers->sync_request == NULL && |
3511 | pers->sync_request != NULL) { | 3495 | pers->sync_request != NULL) { |
3512 | /* need to add the md_redundancy_group */ | 3496 | /* need to add the md_redundancy_group */ |
@@ -3515,7 +3499,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) | |||
3515 | "md: cannot register extra attributes for %s\n", | 3499 | "md: cannot register extra attributes for %s\n", |
3516 | mdname(mddev)); | 3500 | mdname(mddev)); |
3517 | mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); | 3501 | mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); |
3518 | } | 3502 | } |
3519 | if (mddev->pers->sync_request != NULL && | 3503 | if (mddev->pers->sync_request != NULL && |
3520 | pers->sync_request == NULL) { | 3504 | pers->sync_request == NULL) { |
3521 | /* need to remove the md_redundancy_group */ | 3505 | /* need to remove the md_redundancy_group */ |
@@ -3593,7 +3577,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len) | |||
3593 | static struct md_sysfs_entry md_level = | 3577 | static struct md_sysfs_entry md_level = |
3594 | __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); | 3578 | __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); |
3595 | 3579 | ||
3596 | |||
3597 | static ssize_t | 3580 | static ssize_t |
3598 | layout_show(struct mddev *mddev, char *page) | 3581 | layout_show(struct mddev *mddev, char *page) |
3599 | { | 3582 | { |
@@ -3636,7 +3619,6 @@ layout_store(struct mddev *mddev, const char *buf, size_t len) | |||
3636 | static struct md_sysfs_entry md_layout = | 3619 | static struct md_sysfs_entry md_layout = |
3637 | __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); | 3620 | __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); |
3638 | 3621 | ||
3639 | |||
3640 | static ssize_t | 3622 | static ssize_t |
3641 | raid_disks_show(struct mddev *mddev, char *page) | 3623 | raid_disks_show(struct mddev *mddev, char *page) |
3642 | { | 3624 | { |
@@ -3841,9 +3823,9 @@ array_state_show(struct mddev *mddev, char *page) | |||
3841 | return sprintf(page, "%s\n", array_states[st]); | 3823 | return sprintf(page, "%s\n", array_states[st]); |
3842 | } | 3824 | } |
3843 | 3825 | ||
3844 | static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev); | 3826 | static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); |
3845 | static int md_set_readonly(struct mddev * mddev, struct block_device *bdev); | 3827 | static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); |
3846 | static int do_md_run(struct mddev * mddev); | 3828 | static int do_md_run(struct mddev *mddev); |
3847 | static int restart_array(struct mddev *mddev); | 3829 | static int restart_array(struct mddev *mddev); |
3848 | 3830 | ||
3849 | static ssize_t | 3831 | static ssize_t |
@@ -3994,7 +3976,6 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) | |||
3994 | minor != MINOR(dev)) | 3976 | minor != MINOR(dev)) |
3995 | return -EOVERFLOW; | 3977 | return -EOVERFLOW; |
3996 | 3978 | ||
3997 | |||
3998 | if (mddev->persistent) { | 3979 | if (mddev->persistent) { |
3999 | rdev = md_import_device(dev, mddev->major_version, | 3980 | rdev = md_import_device(dev, mddev->major_version, |
4000 | mddev->minor_version); | 3981 | mddev->minor_version); |
@@ -4090,7 +4071,6 @@ size_store(struct mddev *mddev, const char *buf, size_t len) | |||
4090 | static struct md_sysfs_entry md_size = | 4071 | static struct md_sysfs_entry md_size = |
4091 | __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); | 4072 | __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); |
4092 | 4073 | ||
4093 | |||
4094 | /* Metadata version. | 4074 | /* Metadata version. |
4095 | * This is one of | 4075 | * This is one of |
4096 | * 'none' for arrays with no metadata (good luck...) | 4076 | * 'none' for arrays with no metadata (good luck...) |
@@ -4472,7 +4452,7 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) | |||
4472 | unsigned long long new = simple_strtoull(buf, &e, 10); | 4452 | unsigned long long new = simple_strtoull(buf, &e, 10); |
4473 | unsigned long long old = mddev->suspend_lo; | 4453 | unsigned long long old = mddev->suspend_lo; |
4474 | 4454 | ||
4475 | if (mddev->pers == NULL || | 4455 | if (mddev->pers == NULL || |
4476 | mddev->pers->quiesce == NULL) | 4456 | mddev->pers->quiesce == NULL) |
4477 | return -EINVAL; | 4457 | return -EINVAL; |
4478 | if (buf == e || (*e && *e != '\n')) | 4458 | if (buf == e || (*e && *e != '\n')) |
@@ -4492,7 +4472,6 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) | |||
4492 | static struct md_sysfs_entry md_suspend_lo = | 4472 | static struct md_sysfs_entry md_suspend_lo = |
4493 | __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); | 4473 | __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); |
4494 | 4474 | ||
4495 | |||
4496 | static ssize_t | 4475 | static ssize_t |
4497 | suspend_hi_show(struct mddev *mddev, char *page) | 4476 | suspend_hi_show(struct mddev *mddev, char *page) |
4498 | { | 4477 | { |
@@ -4680,7 +4659,6 @@ static struct attribute_group md_redundancy_group = { | |||
4680 | .attrs = md_redundancy_attrs, | 4659 | .attrs = md_redundancy_attrs, |
4681 | }; | 4660 | }; |
4682 | 4661 | ||
4683 | |||
4684 | static ssize_t | 4662 | static ssize_t |
4685 | md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 4663 | md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
4686 | { | 4664 | { |
@@ -5093,7 +5071,7 @@ int md_run(struct mddev *mddev) | |||
5093 | } else if (mddev->ro == 2) /* auto-readonly not meaningful */ | 5071 | } else if (mddev->ro == 2) /* auto-readonly not meaningful */ |
5094 | mddev->ro = 0; | 5072 | mddev->ro = 0; |
5095 | 5073 | ||
5096 | atomic_set(&mddev->writes_pending,0); | 5074 | atomic_set(&mddev->writes_pending,0); |
5097 | atomic_set(&mddev->max_corr_read_errors, | 5075 | atomic_set(&mddev->max_corr_read_errors, |
5098 | MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); | 5076 | MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); |
5099 | mddev->safemode = 0; | 5077 | mddev->safemode = 0; |
@@ -5107,9 +5085,9 @@ int md_run(struct mddev *mddev) | |||
5107 | if (rdev->raid_disk >= 0) | 5085 | if (rdev->raid_disk >= 0) |
5108 | if (sysfs_link_rdev(mddev, rdev)) | 5086 | if (sysfs_link_rdev(mddev, rdev)) |
5109 | /* failure here is OK */; | 5087 | /* failure here is OK */; |
5110 | 5088 | ||
5111 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5089 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5112 | 5090 | ||
5113 | if (mddev->flags & MD_UPDATE_SB_FLAGS) | 5091 | if (mddev->flags & MD_UPDATE_SB_FLAGS) |
5114 | md_update_sb(mddev, 0); | 5092 | md_update_sb(mddev, 0); |
5115 | 5093 | ||
@@ -5321,7 +5299,7 @@ out: | |||
5321 | * 0 - completely stop and dis-assemble array | 5299 | * 0 - completely stop and dis-assemble array |
5322 | * 2 - stop but do not disassemble array | 5300 | * 2 - stop but do not disassemble array |
5323 | */ | 5301 | */ |
5324 | static int do_md_stop(struct mddev * mddev, int mode, | 5302 | static int do_md_stop(struct mddev *mddev, int mode, |
5325 | struct block_device *bdev) | 5303 | struct block_device *bdev) |
5326 | { | 5304 | { |
5327 | struct gendisk *disk = mddev->gendisk; | 5305 | struct gendisk *disk = mddev->gendisk; |
@@ -5494,12 +5472,12 @@ static void autorun_devices(int part) | |||
5494 | "md: cannot allocate memory for md drive.\n"); | 5472 | "md: cannot allocate memory for md drive.\n"); |
5495 | break; | 5473 | break; |
5496 | } | 5474 | } |
5497 | if (mddev_lock(mddev)) | 5475 | if (mddev_lock(mddev)) |
5498 | printk(KERN_WARNING "md: %s locked, cannot run\n", | 5476 | printk(KERN_WARNING "md: %s locked, cannot run\n", |
5499 | mdname(mddev)); | 5477 | mdname(mddev)); |
5500 | else if (mddev->raid_disks || mddev->major_version | 5478 | else if (mddev->raid_disks || mddev->major_version |
5501 | || !list_empty(&mddev->disks)) { | 5479 | || !list_empty(&mddev->disks)) { |
5502 | printk(KERN_WARNING | 5480 | printk(KERN_WARNING |
5503 | "md: %s already running, cannot run %s\n", | 5481 | "md: %s already running, cannot run %s\n", |
5504 | mdname(mddev), bdevname(rdev0->bdev,b)); | 5482 | mdname(mddev), bdevname(rdev0->bdev,b)); |
5505 | mddev_unlock(mddev); | 5483 | mddev_unlock(mddev); |
@@ -5527,7 +5505,7 @@ static void autorun_devices(int part) | |||
5527 | } | 5505 | } |
5528 | #endif /* !MODULE */ | 5506 | #endif /* !MODULE */ |
5529 | 5507 | ||
5530 | static int get_version(void __user * arg) | 5508 | static int get_version(void __user *arg) |
5531 | { | 5509 | { |
5532 | mdu_version_t ver; | 5510 | mdu_version_t ver; |
5533 | 5511 | ||
@@ -5541,7 +5519,7 @@ static int get_version(void __user * arg) | |||
5541 | return 0; | 5519 | return 0; |
5542 | } | 5520 | } |
5543 | 5521 | ||
5544 | static int get_array_info(struct mddev * mddev, void __user * arg) | 5522 | static int get_array_info(struct mddev *mddev, void __user *arg) |
5545 | { | 5523 | { |
5546 | mdu_array_info_t info; | 5524 | mdu_array_info_t info; |
5547 | int nr,working,insync,failed,spare; | 5525 | int nr,working,insync,failed,spare; |
@@ -5556,7 +5534,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) | |||
5556 | else { | 5534 | else { |
5557 | working++; | 5535 | working++; |
5558 | if (test_bit(In_sync, &rdev->flags)) | 5536 | if (test_bit(In_sync, &rdev->flags)) |
5559 | insync++; | 5537 | insync++; |
5560 | else | 5538 | else |
5561 | spare++; | 5539 | spare++; |
5562 | } | 5540 | } |
@@ -5596,7 +5574,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) | |||
5596 | return 0; | 5574 | return 0; |
5597 | } | 5575 | } |
5598 | 5576 | ||
5599 | static int get_bitmap_file(struct mddev * mddev, void __user * arg) | 5577 | static int get_bitmap_file(struct mddev *mddev, void __user * arg) |
5600 | { | 5578 | { |
5601 | mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ | 5579 | mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ |
5602 | char *ptr, *buf = NULL; | 5580 | char *ptr, *buf = NULL; |
@@ -5634,7 +5612,7 @@ out: | |||
5634 | return err; | 5612 | return err; |
5635 | } | 5613 | } |
5636 | 5614 | ||
5637 | static int get_disk_info(struct mddev * mddev, void __user * arg) | 5615 | static int get_disk_info(struct mddev *mddev, void __user * arg) |
5638 | { | 5616 | { |
5639 | mdu_disk_info_t info; | 5617 | mdu_disk_info_t info; |
5640 | struct md_rdev *rdev; | 5618 | struct md_rdev *rdev; |
@@ -5670,7 +5648,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg) | |||
5670 | return 0; | 5648 | return 0; |
5671 | } | 5649 | } |
5672 | 5650 | ||
5673 | static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | 5651 | static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) |
5674 | { | 5652 | { |
5675 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; | 5653 | char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; |
5676 | struct md_rdev *rdev; | 5654 | struct md_rdev *rdev; |
@@ -5684,7 +5662,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5684 | /* expecting a device which has a superblock */ | 5662 | /* expecting a device which has a superblock */ |
5685 | rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); | 5663 | rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); |
5686 | if (IS_ERR(rdev)) { | 5664 | if (IS_ERR(rdev)) { |
5687 | printk(KERN_WARNING | 5665 | printk(KERN_WARNING |
5688 | "md: md_import_device returned %ld\n", | 5666 | "md: md_import_device returned %ld\n", |
5689 | PTR_ERR(rdev)); | 5667 | PTR_ERR(rdev)); |
5690 | return PTR_ERR(rdev); | 5668 | return PTR_ERR(rdev); |
@@ -5696,9 +5674,9 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5696 | err = super_types[mddev->major_version] | 5674 | err = super_types[mddev->major_version] |
5697 | .load_super(rdev, rdev0, mddev->minor_version); | 5675 | .load_super(rdev, rdev0, mddev->minor_version); |
5698 | if (err < 0) { | 5676 | if (err < 0) { |
5699 | printk(KERN_WARNING | 5677 | printk(KERN_WARNING |
5700 | "md: %s has different UUID to %s\n", | 5678 | "md: %s has different UUID to %s\n", |
5701 | bdevname(rdev->bdev,b), | 5679 | bdevname(rdev->bdev,b), |
5702 | bdevname(rdev0->bdev,b2)); | 5680 | bdevname(rdev0->bdev,b2)); |
5703 | export_rdev(rdev); | 5681 | export_rdev(rdev); |
5704 | return -EINVAL; | 5682 | return -EINVAL; |
@@ -5718,7 +5696,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5718 | if (mddev->pers) { | 5696 | if (mddev->pers) { |
5719 | int err; | 5697 | int err; |
5720 | if (!mddev->pers->hot_add_disk) { | 5698 | if (!mddev->pers->hot_add_disk) { |
5721 | printk(KERN_WARNING | 5699 | printk(KERN_WARNING |
5722 | "%s: personality does not support diskops!\n", | 5700 | "%s: personality does not support diskops!\n", |
5723 | mdname(mddev)); | 5701 | mdname(mddev)); |
5724 | return -EINVAL; | 5702 | return -EINVAL; |
@@ -5729,7 +5707,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5729 | else | 5707 | else |
5730 | rdev = md_import_device(dev, -1, -1); | 5708 | rdev = md_import_device(dev, -1, -1); |
5731 | if (IS_ERR(rdev)) { | 5709 | if (IS_ERR(rdev)) { |
5732 | printk(KERN_WARNING | 5710 | printk(KERN_WARNING |
5733 | "md: md_import_device returned %ld\n", | 5711 | "md: md_import_device returned %ld\n", |
5734 | PTR_ERR(rdev)); | 5712 | PTR_ERR(rdev)); |
5735 | return PTR_ERR(rdev); | 5713 | return PTR_ERR(rdev); |
@@ -5803,7 +5781,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5803 | int err; | 5781 | int err; |
5804 | rdev = md_import_device(dev, -1, 0); | 5782 | rdev = md_import_device(dev, -1, 0); |
5805 | if (IS_ERR(rdev)) { | 5783 | if (IS_ERR(rdev)) { |
5806 | printk(KERN_WARNING | 5784 | printk(KERN_WARNING |
5807 | "md: error, md_import_device() returned %ld\n", | 5785 | "md: error, md_import_device() returned %ld\n", |
5808 | PTR_ERR(rdev)); | 5786 | PTR_ERR(rdev)); |
5809 | return PTR_ERR(rdev); | 5787 | return PTR_ERR(rdev); |
@@ -5838,7 +5816,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) | |||
5838 | return 0; | 5816 | return 0; |
5839 | } | 5817 | } |
5840 | 5818 | ||
5841 | static int hot_remove_disk(struct mddev * mddev, dev_t dev) | 5819 | static int hot_remove_disk(struct mddev *mddev, dev_t dev) |
5842 | { | 5820 | { |
5843 | char b[BDEVNAME_SIZE]; | 5821 | char b[BDEVNAME_SIZE]; |
5844 | struct md_rdev *rdev; | 5822 | struct md_rdev *rdev; |
@@ -5864,7 +5842,7 @@ busy: | |||
5864 | return -EBUSY; | 5842 | return -EBUSY; |
5865 | } | 5843 | } |
5866 | 5844 | ||
5867 | static int hot_add_disk(struct mddev * mddev, dev_t dev) | 5845 | static int hot_add_disk(struct mddev *mddev, dev_t dev) |
5868 | { | 5846 | { |
5869 | char b[BDEVNAME_SIZE]; | 5847 | char b[BDEVNAME_SIZE]; |
5870 | int err; | 5848 | int err; |
@@ -5880,7 +5858,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev) | |||
5880 | return -EINVAL; | 5858 | return -EINVAL; |
5881 | } | 5859 | } |
5882 | if (!mddev->pers->hot_add_disk) { | 5860 | if (!mddev->pers->hot_add_disk) { |
5883 | printk(KERN_WARNING | 5861 | printk(KERN_WARNING |
5884 | "%s: personality does not support diskops!\n", | 5862 | "%s: personality does not support diskops!\n", |
5885 | mdname(mddev)); | 5863 | mdname(mddev)); |
5886 | return -EINVAL; | 5864 | return -EINVAL; |
@@ -5888,7 +5866,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev) | |||
5888 | 5866 | ||
5889 | rdev = md_import_device(dev, -1, 0); | 5867 | rdev = md_import_device(dev, -1, 0); |
5890 | if (IS_ERR(rdev)) { | 5868 | if (IS_ERR(rdev)) { |
5891 | printk(KERN_WARNING | 5869 | printk(KERN_WARNING |
5892 | "md: error, md_import_device() returned %ld\n", | 5870 | "md: error, md_import_device() returned %ld\n", |
5893 | PTR_ERR(rdev)); | 5871 | PTR_ERR(rdev)); |
5894 | return -EINVAL; | 5872 | return -EINVAL; |
@@ -5902,7 +5880,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev) | |||
5902 | rdev->sectors = rdev->sb_start; | 5880 | rdev->sectors = rdev->sb_start; |
5903 | 5881 | ||
5904 | if (test_bit(Faulty, &rdev->flags)) { | 5882 | if (test_bit(Faulty, &rdev->flags)) { |
5905 | printk(KERN_WARNING | 5883 | printk(KERN_WARNING |
5906 | "md: can not hot-add faulty %s disk to %s!\n", | 5884 | "md: can not hot-add faulty %s disk to %s!\n", |
5907 | bdevname(rdev->bdev,b), mdname(mddev)); | 5885 | bdevname(rdev->bdev,b), mdname(mddev)); |
5908 | err = -EINVAL; | 5886 | err = -EINVAL; |
@@ -5950,7 +5928,6 @@ static int set_bitmap_file(struct mddev *mddev, int fd) | |||
5950 | /* we should be able to change the bitmap.. */ | 5928 | /* we should be able to change the bitmap.. */ |
5951 | } | 5929 | } |
5952 | 5930 | ||
5953 | |||
5954 | if (fd >= 0) { | 5931 | if (fd >= 0) { |
5955 | struct inode *inode; | 5932 | struct inode *inode; |
5956 | if (mddev->bitmap) | 5933 | if (mddev->bitmap) |
@@ -6021,7 +5998,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd) | |||
6021 | * The minor and patch _version numbers are also kept incase the | 5998 | * The minor and patch _version numbers are also kept incase the |
6022 | * super_block handler wishes to interpret them. | 5999 | * super_block handler wishes to interpret them. |
6023 | */ | 6000 | */ |
6024 | static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) | 6001 | static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) |
6025 | { | 6002 | { |
6026 | 6003 | ||
6027 | if (info->raid_disks == 0) { | 6004 | if (info->raid_disks == 0) { |
@@ -6030,7 +6007,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) | |||
6030 | info->major_version >= ARRAY_SIZE(super_types) || | 6007 | info->major_version >= ARRAY_SIZE(super_types) || |
6031 | super_types[info->major_version].name == NULL) { | 6008 | super_types[info->major_version].name == NULL) { |
6032 | /* maybe try to auto-load a module? */ | 6009 | /* maybe try to auto-load a module? */ |
6033 | printk(KERN_INFO | 6010 | printk(KERN_INFO |
6034 | "md: superblock version %d not known\n", | 6011 | "md: superblock version %d not known\n", |
6035 | info->major_version); | 6012 | info->major_version); |
6036 | return -EINVAL; | 6013 | return -EINVAL; |
@@ -6178,7 +6155,6 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) | |||
6178 | return rv; | 6155 | return rv; |
6179 | } | 6156 | } |
6180 | 6157 | ||
6181 | |||
6182 | /* | 6158 | /* |
6183 | * update_array_info is used to change the configuration of an | 6159 | * update_array_info is used to change the configuration of an |
6184 | * on-line array. | 6160 | * on-line array. |
@@ -6447,7 +6423,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
6447 | } | 6423 | } |
6448 | err = mddev_lock(mddev); | 6424 | err = mddev_lock(mddev); |
6449 | if (err) { | 6425 | if (err) { |
6450 | printk(KERN_INFO | 6426 | printk(KERN_INFO |
6451 | "md: ioctl lock interrupted, reason %d, cmd %d\n", | 6427 | "md: ioctl lock interrupted, reason %d, cmd %d\n", |
6452 | err, cmd); | 6428 | err, cmd); |
6453 | goto abort; | 6429 | goto abort; |
@@ -6708,7 +6684,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6708 | 6684 | ||
6709 | static void md_release(struct gendisk *disk, fmode_t mode) | 6685 | static void md_release(struct gendisk *disk, fmode_t mode) |
6710 | { | 6686 | { |
6711 | struct mddev *mddev = disk->private_data; | 6687 | struct mddev *mddev = disk->private_data; |
6712 | 6688 | ||
6713 | BUG_ON(!mddev); | 6689 | BUG_ON(!mddev); |
6714 | atomic_dec(&mddev->openers); | 6690 | atomic_dec(&mddev->openers); |
@@ -6743,7 +6719,7 @@ static const struct block_device_operations md_fops = | |||
6743 | .revalidate_disk= md_revalidate, | 6719 | .revalidate_disk= md_revalidate, |
6744 | }; | 6720 | }; |
6745 | 6721 | ||
6746 | static int md_thread(void * arg) | 6722 | static int md_thread(void *arg) |
6747 | { | 6723 | { |
6748 | struct md_thread *thread = arg; | 6724 | struct md_thread *thread = arg; |
6749 | 6725 | ||
@@ -6880,8 +6856,7 @@ static void status_unused(struct seq_file *seq) | |||
6880 | seq_printf(seq, "\n"); | 6856 | seq_printf(seq, "\n"); |
6881 | } | 6857 | } |
6882 | 6858 | ||
6883 | 6859 | static void status_resync(struct seq_file *seq, struct mddev *mddev) | |
6884 | static void status_resync(struct seq_file *seq, struct mddev * mddev) | ||
6885 | { | 6860 | { |
6886 | sector_t max_sectors, resync, res; | 6861 | sector_t max_sectors, resync, res; |
6887 | unsigned long dt, db; | 6862 | unsigned long dt, db; |
@@ -7003,7 +6978,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
7003 | { | 6978 | { |
7004 | struct list_head *tmp; | 6979 | struct list_head *tmp; |
7005 | struct mddev *next_mddev, *mddev = v; | 6980 | struct mddev *next_mddev, *mddev = v; |
7006 | 6981 | ||
7007 | ++*pos; | 6982 | ++*pos; |
7008 | if (v == (void*)2) | 6983 | if (v == (void*)2) |
7009 | return NULL; | 6984 | return NULL; |
@@ -7018,7 +6993,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
7018 | else { | 6993 | else { |
7019 | next_mddev = (void*)2; | 6994 | next_mddev = (void*)2; |
7020 | *pos = 0x10000; | 6995 | *pos = 0x10000; |
7021 | } | 6996 | } |
7022 | spin_unlock(&all_mddevs_lock); | 6997 | spin_unlock(&all_mddevs_lock); |
7023 | 6998 | ||
7024 | if (v != (void*)1) | 6999 | if (v != (void*)1) |
@@ -7114,7 +7089,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
7114 | 7089 | ||
7115 | if (mddev->pers) { | 7090 | if (mddev->pers) { |
7116 | mddev->pers->status(seq, mddev); | 7091 | mddev->pers->status(seq, mddev); |
7117 | seq_printf(seq, "\n "); | 7092 | seq_printf(seq, "\n "); |
7118 | if (mddev->pers->sync_request) { | 7093 | if (mddev->pers->sync_request) { |
7119 | if (mddev->curr_resync > 2) { | 7094 | if (mddev->curr_resync > 2) { |
7120 | status_resync(seq, mddev); | 7095 | status_resync(seq, mddev); |
@@ -7132,7 +7107,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
7132 | seq_printf(seq, "\n"); | 7107 | seq_printf(seq, "\n"); |
7133 | } | 7108 | } |
7134 | mddev_unlock(mddev); | 7109 | mddev_unlock(mddev); |
7135 | 7110 | ||
7136 | return 0; | 7111 | return 0; |
7137 | } | 7112 | } |
7138 | 7113 | ||
@@ -7205,7 +7180,7 @@ int unregister_md_personality(struct md_personality *p) | |||
7205 | 7180 | ||
7206 | static int is_mddev_idle(struct mddev *mddev, int init) | 7181 | static int is_mddev_idle(struct mddev *mddev, int init) |
7207 | { | 7182 | { |
7208 | struct md_rdev * rdev; | 7183 | struct md_rdev *rdev; |
7209 | int idle; | 7184 | int idle; |
7210 | int curr_events; | 7185 | int curr_events; |
7211 | 7186 | ||
@@ -7260,7 +7235,6 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok) | |||
7260 | } | 7235 | } |
7261 | } | 7236 | } |
7262 | 7237 | ||
7263 | |||
7264 | /* md_write_start(mddev, bi) | 7238 | /* md_write_start(mddev, bi) |
7265 | * If we need to update some array metadata (e.g. 'active' flag | 7239 | * If we need to update some array metadata (e.g. 'active' flag |
7266 | * in superblock) before writing, schedule a superblock update | 7240 | * in superblock) before writing, schedule a superblock update |
@@ -8637,7 +8611,6 @@ void md_autodetect_dev(dev_t dev) | |||
8637 | } | 8611 | } |
8638 | } | 8612 | } |
8639 | 8613 | ||
8640 | |||
8641 | static void autostart_arrays(int part) | 8614 | static void autostart_arrays(int part) |
8642 | { | 8615 | { |
8643 | struct md_rdev *rdev; | 8616 | struct md_rdev *rdev; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index a49d991f3fe1..03cec5bdcaae 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -1,15 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | md.h : kernel internal structure of the Linux MD driver | 2 | md.h : kernel internal structure of the Linux MD driver |
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | 3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify | 5 | This program is free software; you can redistribute it and/or modify |
6 | it under the terms of the GNU General Public License as published by | 6 | it under the terms of the GNU General Public License as published by |
7 | the Free Software Foundation; either version 2, or (at your option) | 7 | the Free Software Foundation; either version 2, or (at your option) |
8 | any later version. | 8 | any later version. |
9 | 9 | ||
10 | You should have received a copy of the GNU General Public License | 10 | You should have received a copy of the GNU General Public License |
11 | (for example /usr/src/linux/COPYING); if not, write to the Free | 11 | (for example /usr/src/linux/COPYING); if not, write to the Free |
12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _MD_MD_H | 15 | #ifndef _MD_MD_H |
@@ -56,7 +56,7 @@ struct md_rdev { | |||
56 | __u64 sb_events; | 56 | __u64 sb_events; |
57 | sector_t data_offset; /* start of data in array */ | 57 | sector_t data_offset; /* start of data in array */ |
58 | sector_t new_data_offset;/* only relevant while reshaping */ | 58 | sector_t new_data_offset;/* only relevant while reshaping */ |
59 | sector_t sb_start; /* offset of the super block (in 512byte sectors) */ | 59 | sector_t sb_start; /* offset of the super block (in 512byte sectors) */ |
60 | int sb_size; /* bytes in the superblock */ | 60 | int sb_size; /* bytes in the superblock */ |
61 | int preferred_minor; /* autorun support */ | 61 | int preferred_minor; /* autorun support */ |
62 | 62 | ||
@@ -239,7 +239,7 @@ struct mddev { | |||
239 | minor_version, | 239 | minor_version, |
240 | patch_version; | 240 | patch_version; |
241 | int persistent; | 241 | int persistent; |
242 | int external; /* metadata is | 242 | int external; /* metadata is |
243 | * managed externally */ | 243 | * managed externally */ |
244 | char metadata_type[17]; /* externally set*/ | 244 | char metadata_type[17]; /* externally set*/ |
245 | int chunk_sectors; | 245 | int chunk_sectors; |
@@ -248,7 +248,7 @@ struct mddev { | |||
248 | char clevel[16]; | 248 | char clevel[16]; |
249 | int raid_disks; | 249 | int raid_disks; |
250 | int max_disks; | 250 | int max_disks; |
251 | sector_t dev_sectors; /* used size of | 251 | sector_t dev_sectors; /* used size of |
252 | * component devices */ | 252 | * component devices */ |
253 | sector_t array_sectors; /* exported array size */ | 253 | sector_t array_sectors; /* exported array size */ |
254 | int external_size; /* size managed | 254 | int external_size; /* size managed |
@@ -312,7 +312,7 @@ struct mddev { | |||
312 | int parallel_resync; | 312 | int parallel_resync; |
313 | 313 | ||
314 | int ok_start_degraded; | 314 | int ok_start_degraded; |
315 | /* recovery/resync flags | 315 | /* recovery/resync flags |
316 | * NEEDED: we might need to start a resync/recover | 316 | * NEEDED: we might need to start a resync/recover |
317 | * RUNNING: a thread is running, or about to be started | 317 | * RUNNING: a thread is running, or about to be started |
318 | * SYNC: actually doing a resync, not a recovery | 318 | * SYNC: actually doing a resync, not a recovery |
@@ -392,20 +392,20 @@ struct mddev { | |||
392 | 392 | ||
393 | unsigned int safemode; /* if set, update "clean" superblock | 393 | unsigned int safemode; /* if set, update "clean" superblock |
394 | * when no writes pending. | 394 | * when no writes pending. |
395 | */ | 395 | */ |
396 | unsigned int safemode_delay; | 396 | unsigned int safemode_delay; |
397 | struct timer_list safemode_timer; | 397 | struct timer_list safemode_timer; |
398 | atomic_t writes_pending; | 398 | atomic_t writes_pending; |
399 | struct request_queue *queue; /* for plugging ... */ | 399 | struct request_queue *queue; /* for plugging ... */ |
400 | 400 | ||
401 | struct bitmap *bitmap; /* the bitmap for the device */ | 401 | struct bitmap *bitmap; /* the bitmap for the device */ |
402 | struct { | 402 | struct { |
403 | struct file *file; /* the bitmap file */ | 403 | struct file *file; /* the bitmap file */ |
404 | loff_t offset; /* offset from superblock of | 404 | loff_t offset; /* offset from superblock of |
405 | * start of bitmap. May be | 405 | * start of bitmap. May be |
406 | * negative, but not '0' | 406 | * negative, but not '0' |
407 | * For external metadata, offset | 407 | * For external metadata, offset |
408 | * from start of device. | 408 | * from start of device. |
409 | */ | 409 | */ |
410 | unsigned long space; /* space available at this offset */ | 410 | unsigned long space; /* space available at this offset */ |
411 | loff_t default_offset; /* this is the offset to use when | 411 | loff_t default_offset; /* this is the offset to use when |
@@ -421,7 +421,7 @@ struct mddev { | |||
421 | int external; | 421 | int external; |
422 | } bitmap_info; | 422 | } bitmap_info; |
423 | 423 | ||
424 | atomic_t max_corr_read_errors; /* max read retries */ | 424 | atomic_t max_corr_read_errors; /* max read retries */ |
425 | struct list_head all_mddevs; | 425 | struct list_head all_mddevs; |
426 | 426 | ||
427 | struct attribute_group *to_remove; | 427 | struct attribute_group *to_remove; |
@@ -439,7 +439,6 @@ struct mddev { | |||
439 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); | 439 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
440 | }; | 440 | }; |
441 | 441 | ||
442 | |||
443 | static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) | 442 | static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) |
444 | { | 443 | { |
445 | int faulty = test_bit(Faulty, &rdev->flags); | 444 | int faulty = test_bit(Faulty, &rdev->flags); |
@@ -449,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) | |||
449 | 448 | ||
450 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) | 449 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) |
451 | { | 450 | { |
452 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | 451 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); |
453 | } | 452 | } |
454 | 453 | ||
455 | struct md_personality | 454 | struct md_personality |
@@ -463,7 +462,7 @@ struct md_personality | |||
463 | int (*stop)(struct mddev *mddev); | 462 | int (*stop)(struct mddev *mddev); |
464 | void (*status)(struct seq_file *seq, struct mddev *mddev); | 463 | void (*status)(struct seq_file *seq, struct mddev *mddev); |
465 | /* error_handler must set ->faulty and clear ->in_sync | 464 | /* error_handler must set ->faulty and clear ->in_sync |
466 | * if appropriate, and should abort recovery if needed | 465 | * if appropriate, and should abort recovery if needed |
467 | */ | 466 | */ |
468 | void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); | 467 | void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); |
469 | int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); | 468 | int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); |
@@ -493,7 +492,6 @@ struct md_personality | |||
493 | void *(*takeover) (struct mddev *mddev); | 492 | void *(*takeover) (struct mddev *mddev); |
494 | }; | 493 | }; |
495 | 494 | ||
496 | |||
497 | struct md_sysfs_entry { | 495 | struct md_sysfs_entry { |
498 | struct attribute attr; | 496 | struct attribute attr; |
499 | ssize_t (*show)(struct mddev *, char *); | 497 | ssize_t (*show)(struct mddev *, char *); |
@@ -560,7 +558,7 @@ struct md_thread { | |||
560 | void (*run) (struct md_thread *thread); | 558 | void (*run) (struct md_thread *thread); |
561 | struct mddev *mddev; | 559 | struct mddev *mddev; |
562 | wait_queue_head_t wqueue; | 560 | wait_queue_head_t wqueue; |
563 | unsigned long flags; | 561 | unsigned long flags; |
564 | struct task_struct *tsk; | 562 | struct task_struct *tsk; |
565 | unsigned long timeout; | 563 | unsigned long timeout; |
566 | void *private; | 564 | void *private; |
@@ -594,7 +592,7 @@ extern void md_flush_request(struct mddev *mddev, struct bio *bio); | |||
594 | extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | 592 | extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, |
595 | sector_t sector, int size, struct page *page); | 593 | sector_t sector, int size, struct page *page); |
596 | extern void md_super_wait(struct mddev *mddev); | 594 | extern void md_super_wait(struct mddev *mddev); |
597 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | 595 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
598 | struct page *page, int rw, bool metadata_op); | 596 | struct page *page, int rw, bool metadata_op); |
599 | extern void md_do_sync(struct md_thread *thread); | 597 | extern void md_do_sync(struct md_thread *thread); |
600 | extern void md_new_event(struct mddev *mddev); | 598 | extern void md_new_event(struct mddev *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 849ad39f547b..399272f9c042 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -31,13 +31,12 @@ | |||
31 | 31 | ||
32 | #define NR_RESERVED_BUFS 32 | 32 | #define NR_RESERVED_BUFS 32 |
33 | 33 | ||
34 | |||
35 | static int multipath_map (struct mpconf *conf) | 34 | static int multipath_map (struct mpconf *conf) |
36 | { | 35 | { |
37 | int i, disks = conf->raid_disks; | 36 | int i, disks = conf->raid_disks; |
38 | 37 | ||
39 | /* | 38 | /* |
40 | * Later we do read balancing on the read side | 39 | * Later we do read balancing on the read side |
41 | * now we use the first available disk. | 40 | * now we use the first available disk. |
42 | */ | 41 | */ |
43 | 42 | ||
@@ -68,7 +67,6 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) | |||
68 | md_wakeup_thread(mddev->thread); | 67 | md_wakeup_thread(mddev->thread); |
69 | } | 68 | } |
70 | 69 | ||
71 | |||
72 | /* | 70 | /* |
73 | * multipath_end_bh_io() is called when we have finished servicing a multipathed | 71 | * multipath_end_bh_io() is called when we have finished servicing a multipathed |
74 | * operation and are ready to return a success/failure code to the buffer | 72 | * operation and are ready to return a success/failure code to the buffer |
@@ -98,8 +96,8 @@ static void multipath_end_request(struct bio *bio, int error) | |||
98 | */ | 96 | */ |
99 | char b[BDEVNAME_SIZE]; | 97 | char b[BDEVNAME_SIZE]; |
100 | md_error (mp_bh->mddev, rdev); | 98 | md_error (mp_bh->mddev, rdev); |
101 | printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", | 99 | printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", |
102 | bdevname(rdev->bdev,b), | 100 | bdevname(rdev->bdev,b), |
103 | (unsigned long long)bio->bi_iter.bi_sector); | 101 | (unsigned long long)bio->bi_iter.bi_sector); |
104 | multipath_reschedule_retry(mp_bh); | 102 | multipath_reschedule_retry(mp_bh); |
105 | } else | 103 | } else |
@@ -145,12 +143,12 @@ static void multipath_status (struct seq_file *seq, struct mddev *mddev) | |||
145 | { | 143 | { |
146 | struct mpconf *conf = mddev->private; | 144 | struct mpconf *conf = mddev->private; |
147 | int i; | 145 | int i; |
148 | 146 | ||
149 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, | 147 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, |
150 | conf->raid_disks - mddev->degraded); | 148 | conf->raid_disks - mddev->degraded); |
151 | for (i = 0; i < conf->raid_disks; i++) | 149 | for (i = 0; i < conf->raid_disks; i++) |
152 | seq_printf (seq, "%s", | 150 | seq_printf (seq, "%s", |
153 | conf->multipaths[i].rdev && | 151 | conf->multipaths[i].rdev && |
154 | test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); | 152 | test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); |
155 | seq_printf (seq, "]"); | 153 | seq_printf (seq, "]"); |
156 | } | 154 | } |
@@ -195,7 +193,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) | |||
195 | * first check if this is a queued request for a device | 193 | * first check if this is a queued request for a device |
196 | * which has just failed. | 194 | * which has just failed. |
197 | */ | 195 | */ |
198 | printk(KERN_ALERT | 196 | printk(KERN_ALERT |
199 | "multipath: only one IO path left and IO error.\n"); | 197 | "multipath: only one IO path left and IO error.\n"); |
200 | /* leave it active... it's all we have */ | 198 | /* leave it active... it's all we have */ |
201 | return; | 199 | return; |
@@ -242,7 +240,6 @@ static void print_multipath_conf (struct mpconf *conf) | |||
242 | } | 240 | } |
243 | } | 241 | } |
244 | 242 | ||
245 | |||
246 | static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) | 243 | static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
247 | { | 244 | { |
248 | struct mpconf *conf = mddev->private; | 245 | struct mpconf *conf = mddev->private; |
@@ -325,8 +322,6 @@ abort: | |||
325 | return err; | 322 | return err; |
326 | } | 323 | } |
327 | 324 | ||
328 | |||
329 | |||
330 | /* | 325 | /* |
331 | * This is a kernel thread which: | 326 | * This is a kernel thread which: |
332 | * | 327 | * |
@@ -356,7 +351,7 @@ static void multipathd(struct md_thread *thread) | |||
356 | 351 | ||
357 | bio = &mp_bh->bio; | 352 | bio = &mp_bh->bio; |
358 | bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; | 353 | bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; |
359 | 354 | ||
360 | if ((mp_bh->path = multipath_map (conf))<0) { | 355 | if ((mp_bh->path = multipath_map (conf))<0) { |
361 | printk(KERN_ALERT "multipath: %s: unrecoverable IO read" | 356 | printk(KERN_ALERT "multipath: %s: unrecoverable IO read" |
362 | " error for block %llu\n", | 357 | " error for block %llu\n", |
@@ -414,7 +409,7 @@ static int multipath_run (struct mddev *mddev) | |||
414 | conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); | 409 | conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); |
415 | mddev->private = conf; | 410 | mddev->private = conf; |
416 | if (!conf) { | 411 | if (!conf) { |
417 | printk(KERN_ERR | 412 | printk(KERN_ERR |
418 | "multipath: couldn't allocate memory for %s\n", | 413 | "multipath: couldn't allocate memory for %s\n", |
419 | mdname(mddev)); | 414 | mdname(mddev)); |
420 | goto out; | 415 | goto out; |
@@ -423,7 +418,7 @@ static int multipath_run (struct mddev *mddev) | |||
423 | conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, | 418 | conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, |
424 | GFP_KERNEL); | 419 | GFP_KERNEL); |
425 | if (!conf->multipaths) { | 420 | if (!conf->multipaths) { |
426 | printk(KERN_ERR | 421 | printk(KERN_ERR |
427 | "multipath: couldn't allocate memory for %s\n", | 422 | "multipath: couldn't allocate memory for %s\n", |
428 | mdname(mddev)); | 423 | mdname(mddev)); |
429 | goto out_free_conf; | 424 | goto out_free_conf; |
@@ -469,7 +464,7 @@ static int multipath_run (struct mddev *mddev) | |||
469 | conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, | 464 | conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, |
470 | sizeof(struct multipath_bh)); | 465 | sizeof(struct multipath_bh)); |
471 | if (conf->pool == NULL) { | 466 | if (conf->pool == NULL) { |
472 | printk(KERN_ERR | 467 | printk(KERN_ERR |
473 | "multipath: couldn't allocate memory for %s\n", | 468 | "multipath: couldn't allocate memory for %s\n", |
474 | mdname(mddev)); | 469 | mdname(mddev)); |
475 | goto out_free_conf; | 470 | goto out_free_conf; |
@@ -485,7 +480,7 @@ static int multipath_run (struct mddev *mddev) | |||
485 | } | 480 | } |
486 | } | 481 | } |
487 | 482 | ||
488 | printk(KERN_INFO | 483 | printk(KERN_INFO |
489 | "multipath: array %s active with %d out of %d IO paths\n", | 484 | "multipath: array %s active with %d out of %d IO paths\n", |
490 | mdname(mddev), conf->raid_disks - mddev->degraded, | 485 | mdname(mddev), conf->raid_disks - mddev->degraded, |
491 | mddev->raid_disks); | 486 | mddev->raid_disks); |
@@ -512,7 +507,6 @@ out: | |||
512 | return -EIO; | 507 | return -EIO; |
513 | } | 508 | } |
514 | 509 | ||
515 | |||
516 | static int multipath_stop (struct mddev *mddev) | 510 | static int multipath_stop (struct mddev *mddev) |
517 | { | 511 | { |
518 | struct mpconf *conf = mddev->private; | 512 | struct mpconf *conf = mddev->private; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index cf91f5910c7c..ba6b85de96d2 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | raid0.c : Multiple Devices driver for Linux | 2 | raid0.c : Multiple Devices driver for Linux |
3 | Copyright (C) 1994-96 Marc ZYNGIER | 3 | Copyright (C) 1994-96 Marc ZYNGIER |
4 | <zyngier@ufr-info-p7.ibp.fr> or | 4 | <zyngier@ufr-info-p7.ibp.fr> or |
5 | <maz@gloups.fdn.fr> | 5 | <maz@gloups.fdn.fr> |
6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat | 6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat |
7 | |||
8 | 7 | ||
9 | RAID-0 management functions. | 8 | RAID-0 management functions. |
10 | 9 | ||
@@ -12,10 +11,10 @@ | |||
12 | it under the terms of the GNU General Public License as published by | 11 | it under the terms of the GNU General Public License as published by |
13 | the Free Software Foundation; either version 2, or (at your option) | 12 | the Free Software Foundation; either version 2, or (at your option) |
14 | any later version. | 13 | any later version. |
15 | 14 | ||
16 | You should have received a copy of the GNU General Public License | 15 | You should have received a copy of the GNU General Public License |
17 | (for example /usr/src/linux/COPYING); if not, write to the Free | 16 | (for example /usr/src/linux/COPYING); if not, write to the Free |
18 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ | 18 | */ |
20 | 19 | ||
21 | #include <linux/blkdev.h> | 20 | #include <linux/blkdev.h> |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7c333b5a50fc..40b35be34f8d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -494,7 +494,6 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
494 | bio_put(to_put); | 494 | bio_put(to_put); |
495 | } | 495 | } |
496 | 496 | ||
497 | |||
498 | /* | 497 | /* |
499 | * This routine returns the disk from which the requested read should | 498 | * This routine returns the disk from which the requested read should |
500 | * be done. There is a per-array 'next expected sequential IO' sector | 499 | * be done. There is a per-array 'next expected sequential IO' sector |
@@ -1001,8 +1000,7 @@ static void unfreeze_array(struct r1conf *conf) | |||
1001 | spin_unlock_irq(&conf->resync_lock); | 1000 | spin_unlock_irq(&conf->resync_lock); |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | 1003 | /* duplicate the data pages for behind I/O | |
1005 | /* duplicate the data pages for behind I/O | ||
1006 | */ | 1004 | */ |
1007 | static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) | 1005 | static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) |
1008 | { | 1006 | { |
@@ -1471,7 +1469,6 @@ static void status(struct seq_file *seq, struct mddev *mddev) | |||
1471 | seq_printf(seq, "]"); | 1469 | seq_printf(seq, "]"); |
1472 | } | 1470 | } |
1473 | 1471 | ||
1474 | |||
1475 | static void error(struct mddev *mddev, struct md_rdev *rdev) | 1472 | static void error(struct mddev *mddev, struct md_rdev *rdev) |
1476 | { | 1473 | { |
1477 | char b[BDEVNAME_SIZE]; | 1474 | char b[BDEVNAME_SIZE]; |
@@ -1565,7 +1562,7 @@ static int raid1_spare_active(struct mddev *mddev) | |||
1565 | unsigned long flags; | 1562 | unsigned long flags; |
1566 | 1563 | ||
1567 | /* | 1564 | /* |
1568 | * Find all failed disks within the RAID1 configuration | 1565 | * Find all failed disks within the RAID1 configuration |
1569 | * and mark them readable. | 1566 | * and mark them readable. |
1570 | * Called under mddev lock, so rcu protection not needed. | 1567 | * Called under mddev lock, so rcu protection not needed. |
1571 | */ | 1568 | */ |
@@ -1606,7 +1603,6 @@ static int raid1_spare_active(struct mddev *mddev) | |||
1606 | return count; | 1603 | return count; |
1607 | } | 1604 | } |
1608 | 1605 | ||
1609 | |||
1610 | static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) | 1606 | static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
1611 | { | 1607 | { |
1612 | struct r1conf *conf = mddev->private; | 1608 | struct r1conf *conf = mddev->private; |
@@ -1735,7 +1731,6 @@ abort: | |||
1735 | return err; | 1731 | return err; |
1736 | } | 1732 | } |
1737 | 1733 | ||
1738 | |||
1739 | static void end_sync_read(struct bio *bio, int error) | 1734 | static void end_sync_read(struct bio *bio, int error) |
1740 | { | 1735 | { |
1741 | struct r1bio *r1_bio = bio->bi_private; | 1736 | struct r1bio *r1_bio = bio->bi_private; |
@@ -2457,7 +2452,6 @@ static void raid1d(struct md_thread *thread) | |||
2457 | blk_finish_plug(&plug); | 2452 | blk_finish_plug(&plug); |
2458 | } | 2453 | } |
2459 | 2454 | ||
2460 | |||
2461 | static int init_resync(struct r1conf *conf) | 2455 | static int init_resync(struct r1conf *conf) |
2462 | { | 2456 | { |
2463 | int buffs; | 2457 | int buffs; |
@@ -2946,9 +2940,9 @@ static int run(struct mddev *mddev) | |||
2946 | printk(KERN_NOTICE "md/raid1:%s: not clean" | 2940 | printk(KERN_NOTICE "md/raid1:%s: not clean" |
2947 | " -- starting background reconstruction\n", | 2941 | " -- starting background reconstruction\n", |
2948 | mdname(mddev)); | 2942 | mdname(mddev)); |
2949 | printk(KERN_INFO | 2943 | printk(KERN_INFO |
2950 | "md/raid1:%s: active with %d out of %d mirrors\n", | 2944 | "md/raid1:%s: active with %d out of %d mirrors\n", |
2951 | mdname(mddev), mddev->raid_disks - mddev->degraded, | 2945 | mdname(mddev), mddev->raid_disks - mddev->degraded, |
2952 | mddev->raid_disks); | 2946 | mddev->raid_disks); |
2953 | 2947 | ||
2954 | /* | 2948 | /* |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 9bebca7bff2f..33bda55ef9f7 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -90,7 +90,6 @@ struct r1conf { | |||
90 | */ | 90 | */ |
91 | int recovery_disabled; | 91 | int recovery_disabled; |
92 | 92 | ||
93 | |||
94 | /* poolinfo contains information about the content of the | 93 | /* poolinfo contains information about the content of the |
95 | * mempools - it changes when the array grows or shrinks | 94 | * mempools - it changes when the array grows or shrinks |
96 | */ | 95 | */ |
@@ -103,7 +102,6 @@ struct r1conf { | |||
103 | */ | 102 | */ |
104 | struct page *tmppage; | 103 | struct page *tmppage; |
105 | 104 | ||
106 | |||
107 | /* When taking over an array from a different personality, we store | 105 | /* When taking over an array from a different personality, we store |
108 | * the new thread here until we fully activate the array. | 106 | * the new thread here until we fully activate the array. |
109 | */ | 107 | */ |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8fa37eceaef3..32e282f4c83c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -366,7 +366,6 @@ static void raid10_end_read_request(struct bio *bio, int error) | |||
366 | struct md_rdev *rdev; | 366 | struct md_rdev *rdev; |
367 | struct r10conf *conf = r10_bio->mddev->private; | 367 | struct r10conf *conf = r10_bio->mddev->private; |
368 | 368 | ||
369 | |||
370 | slot = r10_bio->read_slot; | 369 | slot = r10_bio->read_slot; |
371 | dev = r10_bio->devs[slot].devnum; | 370 | dev = r10_bio->devs[slot].devnum; |
372 | rdev = r10_bio->devs[slot].rdev; | 371 | rdev = r10_bio->devs[slot].rdev; |
@@ -1559,7 +1558,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) | |||
1559 | 1558 | ||
1560 | md_write_start(mddev, bio); | 1559 | md_write_start(mddev, bio); |
1561 | 1560 | ||
1562 | |||
1563 | do { | 1561 | do { |
1564 | 1562 | ||
1565 | /* | 1563 | /* |
@@ -1782,7 +1780,6 @@ static int raid10_spare_active(struct mddev *mddev) | |||
1782 | return count; | 1780 | return count; |
1783 | } | 1781 | } |
1784 | 1782 | ||
1785 | |||
1786 | static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | 1783 | static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
1787 | { | 1784 | { |
1788 | struct r10conf *conf = mddev->private; | 1785 | struct r10conf *conf = mddev->private; |
@@ -1929,7 +1926,6 @@ abort: | |||
1929 | return err; | 1926 | return err; |
1930 | } | 1927 | } |
1931 | 1928 | ||
1932 | |||
1933 | static void end_sync_read(struct bio *bio, int error) | 1929 | static void end_sync_read(struct bio *bio, int error) |
1934 | { | 1930 | { |
1935 | struct r10bio *r10_bio = bio->bi_private; | 1931 | struct r10bio *r10_bio = bio->bi_private; |
@@ -2295,7 +2291,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2295 | } | 2291 | } |
2296 | } | 2292 | } |
2297 | 2293 | ||
2298 | |||
2299 | /* | 2294 | /* |
2300 | * Used by fix_read_error() to decay the per rdev read_errors. | 2295 | * Used by fix_read_error() to decay the per rdev read_errors. |
2301 | * We halve the read error count for every hour that has elapsed | 2296 | * We halve the read error count for every hour that has elapsed |
@@ -2852,7 +2847,6 @@ static void raid10d(struct md_thread *thread) | |||
2852 | blk_finish_plug(&plug); | 2847 | blk_finish_plug(&plug); |
2853 | } | 2848 | } |
2854 | 2849 | ||
2855 | |||
2856 | static int init_resync(struct r10conf *conf) | 2850 | static int init_resync(struct r10conf *conf) |
2857 | { | 2851 | { |
2858 | int buffs; | 2852 | int buffs; |
@@ -3776,7 +3770,6 @@ static int run(struct mddev *mddev) | |||
3776 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); | 3770 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); |
3777 | } | 3771 | } |
3778 | 3772 | ||
3779 | |||
3780 | if (md_integrity_register(mddev)) | 3773 | if (md_integrity_register(mddev)) |
3781 | goto out_free_conf; | 3774 | goto out_free_conf; |
3782 | 3775 | ||
@@ -4577,7 +4570,6 @@ static void end_reshape(struct r10conf *conf) | |||
4577 | conf->fullsync = 0; | 4570 | conf->fullsync = 0; |
4578 | } | 4571 | } |
4579 | 4572 | ||
4580 | |||
4581 | static int handle_reshape_read_error(struct mddev *mddev, | 4573 | static int handle_reshape_read_error(struct mddev *mddev, |
4582 | struct r10bio *r10_bio) | 4574 | struct r10bio *r10_bio) |
4583 | { | 4575 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 741134d429a4..9c66e5997fc8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -463,7 +463,6 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) | |||
463 | hlist_add_head(&sh->hash, hp); | 463 | hlist_add_head(&sh->hash, hp); |
464 | } | 464 | } |
465 | 465 | ||
466 | |||
467 | /* find an idle stripe, make sure it is unhashed, and return it. */ | 466 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
468 | static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) | 467 | static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) |
469 | { | 468 | { |
@@ -540,7 +539,6 @@ retry: | |||
540 | stripe_set_idx(sector, conf, previous, sh); | 539 | stripe_set_idx(sector, conf, previous, sh); |
541 | sh->state = 0; | 540 | sh->state = 0; |
542 | 541 | ||
543 | |||
544 | for (i = sh->disks; i--; ) { | 542 | for (i = sh->disks; i--; ) { |
545 | struct r5dev *dev = &sh->dev[i]; | 543 | struct r5dev *dev = &sh->dev[i]; |
546 | 544 | ||
@@ -1348,7 +1346,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) | |||
1348 | } | 1346 | } |
1349 | } | 1347 | } |
1350 | 1348 | ||
1351 | |||
1352 | static void ops_complete_prexor(void *stripe_head_ref) | 1349 | static void ops_complete_prexor(void *stripe_head_ref) |
1353 | { | 1350 | { |
1354 | struct stripe_head *sh = stripe_head_ref; | 1351 | struct stripe_head *sh = stripe_head_ref; |
@@ -2417,7 +2414,6 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, | |||
2417 | return new_sector; | 2414 | return new_sector; |
2418 | } | 2415 | } |
2419 | 2416 | ||
2420 | |||
2421 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | 2417 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) |
2422 | { | 2418 | { |
2423 | struct r5conf *conf = sh->raid_conf; | 2419 | struct r5conf *conf = sh->raid_conf; |
@@ -2435,7 +2431,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
2435 | sector_t r_sector; | 2431 | sector_t r_sector; |
2436 | struct stripe_head sh2; | 2432 | struct stripe_head sh2; |
2437 | 2433 | ||
2438 | |||
2439 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 2434 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
2440 | stripe = new_sector; | 2435 | stripe = new_sector; |
2441 | 2436 | ||
@@ -2539,7 +2534,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
2539 | return r_sector; | 2534 | return r_sector; |
2540 | } | 2535 | } |
2541 | 2536 | ||
2542 | |||
2543 | static void | 2537 | static void |
2544 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | 2538 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
2545 | int rcw, int expand) | 2539 | int rcw, int expand) |
@@ -3011,7 +3005,6 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
3011 | set_bit(STRIPE_HANDLE, &sh->state); | 3005 | set_bit(STRIPE_HANDLE, &sh->state); |
3012 | } | 3006 | } |
3013 | 3007 | ||
3014 | |||
3015 | /* handle_stripe_clean_event | 3008 | /* handle_stripe_clean_event |
3016 | * any written block on an uptodate or failed drive can be returned. | 3009 | * any written block on an uptodate or failed drive can be returned. |
3017 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but | 3010 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
@@ -3302,7 +3295,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, | |||
3302 | } | 3295 | } |
3303 | } | 3296 | } |
3304 | 3297 | ||
3305 | |||
3306 | static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, | 3298 | static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, |
3307 | struct stripe_head_state *s, | 3299 | struct stripe_head_state *s, |
3308 | int disks) | 3300 | int disks) |
@@ -3937,7 +3929,6 @@ static void handle_stripe(struct stripe_head *sh) | |||
3937 | } | 3929 | } |
3938 | } | 3930 | } |
3939 | 3931 | ||
3940 | |||
3941 | /* Finish reconstruct operations initiated by the expansion process */ | 3932 | /* Finish reconstruct operations initiated by the expansion process */ |
3942 | if (sh->reconstruct_state == reconstruct_state_result) { | 3933 | if (sh->reconstruct_state == reconstruct_state_result) { |
3943 | struct stripe_head *sh_src | 3934 | struct stripe_head *sh_src |
@@ -4135,7 +4126,6 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
4135 | return max; | 4126 | return max; |
4136 | } | 4127 | } |
4137 | 4128 | ||
4138 | |||
4139 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) | 4129 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
4140 | { | 4130 | { |
4141 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); | 4131 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); |
@@ -4165,7 +4155,6 @@ static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) | |||
4165 | md_wakeup_thread(conf->mddev->thread); | 4155 | md_wakeup_thread(conf->mddev->thread); |
4166 | } | 4156 | } |
4167 | 4157 | ||
4168 | |||
4169 | static struct bio *remove_bio_from_retry(struct r5conf *conf) | 4158 | static struct bio *remove_bio_from_retry(struct r5conf *conf) |
4170 | { | 4159 | { |
4171 | struct bio *bi; | 4160 | struct bio *bi; |
@@ -4189,7 +4178,6 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) | |||
4189 | return bi; | 4178 | return bi; |
4190 | } | 4179 | } |
4191 | 4180 | ||
4192 | |||
4193 | /* | 4181 | /* |
4194 | * The "raid5_align_endio" should check if the read succeeded and if it | 4182 | * The "raid5_align_endio" should check if the read succeeded and if it |
4195 | * did, call bio_endio on the original bio (having bio_put the new bio | 4183 | * did, call bio_endio on the original bio (having bio_put the new bio |
@@ -4222,7 +4210,6 @@ static void raid5_align_endio(struct bio *bi, int error) | |||
4222 | return; | 4210 | return; |
4223 | } | 4211 | } |
4224 | 4212 | ||
4225 | |||
4226 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); | 4213 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); |
4227 | 4214 | ||
4228 | add_bio_to_retry(raid_bi, conf); | 4215 | add_bio_to_retry(raid_bi, conf); |
@@ -4247,7 +4234,6 @@ static int bio_fits_rdev(struct bio *bi) | |||
4247 | return 1; | 4234 | return 1; |
4248 | } | 4235 | } |
4249 | 4236 | ||
4250 | |||
4251 | static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | 4237 | static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) |
4252 | { | 4238 | { |
4253 | struct r5conf *conf = mddev->private; | 4239 | struct r5conf *conf = mddev->private; |
@@ -5444,7 +5430,6 @@ raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, | |||
5444 | raid5_show_skip_copy, | 5430 | raid5_show_skip_copy, |
5445 | raid5_store_skip_copy); | 5431 | raid5_store_skip_copy); |
5446 | 5432 | ||
5447 | |||
5448 | static ssize_t | 5433 | static ssize_t |
5449 | stripe_cache_active_show(struct mddev *mddev, char *page) | 5434 | stripe_cache_active_show(struct mddev *mddev, char *page) |
5450 | { | 5435 | { |
@@ -5896,7 +5881,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
5896 | return ERR_PTR(-ENOMEM); | 5881 | return ERR_PTR(-ENOMEM); |
5897 | } | 5882 | } |
5898 | 5883 | ||
5899 | |||
5900 | static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) | 5884 | static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) |
5901 | { | 5885 | { |
5902 | switch (algo) { | 5886 | switch (algo) { |
@@ -5909,7 +5893,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded | |||
5909 | return 1; | 5893 | return 1; |
5910 | break; | 5894 | break; |
5911 | case ALGORITHM_PARITY_0_6: | 5895 | case ALGORITHM_PARITY_0_6: |
5912 | if (raid_disk == 0 || | 5896 | if (raid_disk == 0 || |
5913 | raid_disk == raid_disks - 1) | 5897 | raid_disk == raid_disks - 1) |
5914 | return 1; | 5898 | return 1; |
5915 | break; | 5899 | break; |
@@ -6163,7 +6147,6 @@ static int run(struct mddev *mddev) | |||
6163 | "reshape"); | 6147 | "reshape"); |
6164 | } | 6148 | } |
6165 | 6149 | ||
6166 | |||
6167 | /* Ok, everything is just fine now */ | 6150 | /* Ok, everything is just fine now */ |
6168 | if (mddev->to_remove == &raid5_attrs_group) | 6151 | if (mddev->to_remove == &raid5_attrs_group) |
6169 | mddev->to_remove = NULL; | 6152 | mddev->to_remove = NULL; |
@@ -6812,7 +6795,6 @@ static void raid5_quiesce(struct mddev *mddev, int state) | |||
6812 | } | 6795 | } |
6813 | } | 6796 | } |
6814 | 6797 | ||
6815 | |||
6816 | static void *raid45_takeover_raid0(struct mddev *mddev, int level) | 6798 | static void *raid45_takeover_raid0(struct mddev *mddev, int level) |
6817 | { | 6799 | { |
6818 | struct r0conf *raid0_conf = mddev->private; | 6800 | struct r0conf *raid0_conf = mddev->private; |
@@ -6839,7 +6821,6 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level) | |||
6839 | return setup_conf(mddev); | 6821 | return setup_conf(mddev); |
6840 | } | 6822 | } |
6841 | 6823 | ||
6842 | |||
6843 | static void *raid5_takeover_raid1(struct mddev *mddev) | 6824 | static void *raid5_takeover_raid1(struct mddev *mddev) |
6844 | { | 6825 | { |
6845 | int chunksect; | 6826 | int chunksect; |
@@ -6900,7 +6881,6 @@ static void *raid5_takeover_raid6(struct mddev *mddev) | |||
6900 | return setup_conf(mddev); | 6881 | return setup_conf(mddev); |
6901 | } | 6882 | } |
6902 | 6883 | ||
6903 | |||
6904 | static int raid5_check_reshape(struct mddev *mddev) | 6884 | static int raid5_check_reshape(struct mddev *mddev) |
6905 | { | 6885 | { |
6906 | /* For a 2-drive array, the layout and chunk size can be changed | 6886 | /* For a 2-drive array, the layout and chunk size can be changed |
@@ -7049,7 +7029,6 @@ static void *raid6_takeover(struct mddev *mddev) | |||
7049 | return setup_conf(mddev); | 7029 | return setup_conf(mddev); |
7050 | } | 7030 | } |
7051 | 7031 | ||
7052 | |||
7053 | static struct md_personality raid6_personality = | 7032 | static struct md_personality raid6_personality = |
7054 | { | 7033 | { |
7055 | .name = "raid6", | 7034 | .name = "raid6", |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index bc72cd4be5f8..d59f5ca743cd 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -155,7 +155,7 @@ | |||
155 | */ | 155 | */ |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * Operations state - intermediate states that are visible outside of | 158 | * Operations state - intermediate states that are visible outside of |
159 | * STRIPE_ACTIVE. | 159 | * STRIPE_ACTIVE. |
160 | * In general _idle indicates nothing is running, _run indicates a data | 160 | * In general _idle indicates nothing is running, _run indicates a data |
161 | * processing operation is active, and _result means the data processing result | 161 | * processing operation is active, and _result means the data processing result |
@@ -364,7 +364,6 @@ enum { | |||
364 | * HANDLE gets cleared if stripe_handle leaves nothing locked. | 364 | * HANDLE gets cleared if stripe_handle leaves nothing locked. |
365 | */ | 365 | */ |
366 | 366 | ||
367 | |||
368 | struct disk_info { | 367 | struct disk_info { |
369 | struct md_rdev *rdev, *replacement; | 368 | struct md_rdev *rdev, *replacement; |
370 | }; | 369 | }; |
@@ -528,7 +527,6 @@ struct r5conf { | |||
528 | #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */ | 527 | #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */ |
529 | #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */ | 528 | #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */ |
530 | 529 | ||
531 | |||
532 | /* For every RAID5 algorithm we define a RAID6 algorithm | 530 | /* For every RAID5 algorithm we define a RAID6 algorithm |
533 | * with exactly the same layout for data and parity, and | 531 | * with exactly the same layout for data and parity, and |
534 | * with the Q block always on the last device (N-1). | 532 | * with the Q block always on the last device (N-1). |