aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-03-18 21:46:39 -0400
committerNeilBrown <neilb@suse.de>2012-03-18 21:46:39 -0400
commitdafb20fa34320a472deb7442f25a0c086e0feb33 (patch)
tree2ff501805f8a6d08119f1f1a7248f579d52e491b /drivers/md/md.c
parentd6b42dcb995e6acd7cc276774e751ffc9f0ef4bf (diff)
md: tidy up rdev_for_each usage.
md.h has an 'rdev_for_each()' macro for iterating the rdevs in an mddev. However it uses the 'safe' version of list_for_each_entry, and so requires the extra variable, but doesn't include 'safe' in the name, which is useful documentation. Consequently some places use this safe version without needing it, and many use an explicity list_for_each entry. So: - rename rdev_for_each to rdev_for_each_safe - create a new rdev_for_each which uses the plain list_for_each_entry, - use the 'safe' version only where needed, and convert all other list_for_each_entry calls to use rdev_for_each. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c74
1 files changed, 37 insertions, 37 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 115a6dd85837..119de175bf12 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -439,7 +439,7 @@ static void submit_flushes(struct work_struct *ws)
439 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 439 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
440 atomic_set(&mddev->flush_pending, 1); 440 atomic_set(&mddev->flush_pending, 1);
441 rcu_read_lock(); 441 rcu_read_lock();
442 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 442 rdev_for_each_rcu(rdev, mddev)
443 if (rdev->raid_disk >= 0 && 443 if (rdev->raid_disk >= 0 &&
444 !test_bit(Faulty, &rdev->flags)) { 444 !test_bit(Faulty, &rdev->flags)) {
445 /* Take two references, one is dropped 445 /* Take two references, one is dropped
@@ -749,7 +749,7 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
749{ 749{
750 struct md_rdev *rdev; 750 struct md_rdev *rdev;
751 751
752 list_for_each_entry(rdev, &mddev->disks, same_set) 752 rdev_for_each(rdev, mddev)
753 if (rdev->desc_nr == nr) 753 if (rdev->desc_nr == nr)
754 return rdev; 754 return rdev;
755 755
@@ -760,7 +760,7 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
760{ 760{
761 struct md_rdev *rdev; 761 struct md_rdev *rdev;
762 762
763 list_for_each_entry(rdev, &mddev->disks, same_set) 763 rdev_for_each(rdev, mddev)
764 if (rdev->bdev->bd_dev == dev) 764 if (rdev->bdev->bd_dev == dev)
765 return rdev; 765 return rdev;
766 766
@@ -1342,7 +1342,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1342 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1342 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1343 1343
1344 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1344 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1345 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1345 rdev_for_each(rdev2, mddev) {
1346 mdp_disk_t *d; 1346 mdp_disk_t *d;
1347 int desc_nr; 1347 int desc_nr;
1348 int is_active = test_bit(In_sync, &rdev2->flags); 1348 int is_active = test_bit(In_sync, &rdev2->flags);
@@ -1816,7 +1816,7 @@ retry:
1816 } 1816 }
1817 1817
1818 max_dev = 0; 1818 max_dev = 0;
1819 list_for_each_entry(rdev2, &mddev->disks, same_set) 1819 rdev_for_each(rdev2, mddev)
1820 if (rdev2->desc_nr+1 > max_dev) 1820 if (rdev2->desc_nr+1 > max_dev)
1821 max_dev = rdev2->desc_nr+1; 1821 max_dev = rdev2->desc_nr+1;
1822 1822
@@ -1833,7 +1833,7 @@ retry:
1833 for (i=0; i<max_dev;i++) 1833 for (i=0; i<max_dev;i++)
1834 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1834 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1835 1835
1836 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1836 rdev_for_each(rdev2, mddev) {
1837 i = rdev2->desc_nr; 1837 i = rdev2->desc_nr;
1838 if (test_bit(Faulty, &rdev2->flags)) 1838 if (test_bit(Faulty, &rdev2->flags))
1839 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1839 sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1948,7 +1948,7 @@ int md_integrity_register(struct mddev *mddev)
1948 return 0; /* nothing to do */ 1948 return 0; /* nothing to do */
1949 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1949 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1950 return 0; /* shouldn't register, or already is */ 1950 return 0; /* shouldn't register, or already is */
1951 list_for_each_entry(rdev, &mddev->disks, same_set) { 1951 rdev_for_each(rdev, mddev) {
1952 /* skip spares and non-functional disks */ 1952 /* skip spares and non-functional disks */
1953 if (test_bit(Faulty, &rdev->flags)) 1953 if (test_bit(Faulty, &rdev->flags))
1954 continue; 1954 continue;
@@ -2175,7 +2175,7 @@ static void export_array(struct mddev *mddev)
2175{ 2175{
2176 struct md_rdev *rdev, *tmp; 2176 struct md_rdev *rdev, *tmp;
2177 2177
2178 rdev_for_each(rdev, tmp, mddev) { 2178 rdev_for_each_safe(rdev, tmp, mddev) {
2179 if (!rdev->mddev) { 2179 if (!rdev->mddev) {
2180 MD_BUG(); 2180 MD_BUG();
2181 continue; 2181 continue;
@@ -2307,11 +2307,11 @@ static void md_print_devices(void)
2307 bitmap_print_sb(mddev->bitmap); 2307 bitmap_print_sb(mddev->bitmap);
2308 else 2308 else
2309 printk("%s: ", mdname(mddev)); 2309 printk("%s: ", mdname(mddev));
2310 list_for_each_entry(rdev, &mddev->disks, same_set) 2310 rdev_for_each(rdev, mddev)
2311 printk("<%s>", bdevname(rdev->bdev,b)); 2311 printk("<%s>", bdevname(rdev->bdev,b));
2312 printk("\n"); 2312 printk("\n");
2313 2313
2314 list_for_each_entry(rdev, &mddev->disks, same_set) 2314 rdev_for_each(rdev, mddev)
2315 print_rdev(rdev, mddev->major_version); 2315 print_rdev(rdev, mddev->major_version);
2316 } 2316 }
2317 printk("md: **********************************\n"); 2317 printk("md: **********************************\n");
@@ -2328,7 +2328,7 @@ static void sync_sbs(struct mddev * mddev, int nospares)
2328 * with the rest of the array) 2328 * with the rest of the array)
2329 */ 2329 */
2330 struct md_rdev *rdev; 2330 struct md_rdev *rdev;
2331 list_for_each_entry(rdev, &mddev->disks, same_set) { 2331 rdev_for_each(rdev, mddev) {
2332 if (rdev->sb_events == mddev->events || 2332 if (rdev->sb_events == mddev->events ||
2333 (nospares && 2333 (nospares &&
2334 rdev->raid_disk < 0 && 2334 rdev->raid_disk < 0 &&
@@ -2351,7 +2351,7 @@ static void md_update_sb(struct mddev * mddev, int force_change)
2351 2351
2352repeat: 2352repeat:
2353 /* First make sure individual recovery_offsets are correct */ 2353 /* First make sure individual recovery_offsets are correct */
2354 list_for_each_entry(rdev, &mddev->disks, same_set) { 2354 rdev_for_each(rdev, mddev) {
2355 if (rdev->raid_disk >= 0 && 2355 if (rdev->raid_disk >= 0 &&
2356 mddev->delta_disks >= 0 && 2356 mddev->delta_disks >= 0 &&
2357 !test_bit(In_sync, &rdev->flags) && 2357 !test_bit(In_sync, &rdev->flags) &&
@@ -2364,7 +2364,7 @@ repeat:
2364 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2364 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2365 if (!mddev->external) { 2365 if (!mddev->external) {
2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2367 list_for_each_entry(rdev, &mddev->disks, same_set) { 2367 rdev_for_each(rdev, mddev) {
2368 if (rdev->badblocks.changed) { 2368 if (rdev->badblocks.changed) {
2369 md_ack_all_badblocks(&rdev->badblocks); 2369 md_ack_all_badblocks(&rdev->badblocks);
2370 md_error(mddev, rdev); 2370 md_error(mddev, rdev);
@@ -2430,7 +2430,7 @@ repeat:
2430 mddev->events --; 2430 mddev->events --;
2431 } 2431 }
2432 2432
2433 list_for_each_entry(rdev, &mddev->disks, same_set) { 2433 rdev_for_each(rdev, mddev) {
2434 if (rdev->badblocks.changed) 2434 if (rdev->badblocks.changed)
2435 any_badblocks_changed++; 2435 any_badblocks_changed++;
2436 if (test_bit(Faulty, &rdev->flags)) 2436 if (test_bit(Faulty, &rdev->flags))
@@ -2444,7 +2444,7 @@ repeat:
2444 mdname(mddev), mddev->in_sync); 2444 mdname(mddev), mddev->in_sync);
2445 2445
2446 bitmap_update_sb(mddev->bitmap); 2446 bitmap_update_sb(mddev->bitmap);
2447 list_for_each_entry(rdev, &mddev->disks, same_set) { 2447 rdev_for_each(rdev, mddev) {
2448 char b[BDEVNAME_SIZE]; 2448 char b[BDEVNAME_SIZE];
2449 2449
2450 if (rdev->sb_loaded != 1) 2450 if (rdev->sb_loaded != 1)
@@ -2493,7 +2493,7 @@ repeat:
2493 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2493 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2494 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2494 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2495 2495
2496 list_for_each_entry(rdev, &mddev->disks, same_set) { 2496 rdev_for_each(rdev, mddev) {
2497 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2497 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2498 clear_bit(Blocked, &rdev->flags); 2498 clear_bit(Blocked, &rdev->flags);
2499 2499
@@ -2896,7 +2896,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2896 struct md_rdev *rdev2; 2896 struct md_rdev *rdev2;
2897 2897
2898 mddev_lock(mddev); 2898 mddev_lock(mddev);
2899 list_for_each_entry(rdev2, &mddev->disks, same_set) 2899 rdev_for_each(rdev2, mddev)
2900 if (rdev->bdev == rdev2->bdev && 2900 if (rdev->bdev == rdev2->bdev &&
2901 rdev != rdev2 && 2901 rdev != rdev2 &&
2902 overlaps(rdev->data_offset, rdev->sectors, 2902 overlaps(rdev->data_offset, rdev->sectors,
@@ -3193,7 +3193,7 @@ static void analyze_sbs(struct mddev * mddev)
3193 char b[BDEVNAME_SIZE]; 3193 char b[BDEVNAME_SIZE];
3194 3194
3195 freshest = NULL; 3195 freshest = NULL;
3196 rdev_for_each(rdev, tmp, mddev) 3196 rdev_for_each_safe(rdev, tmp, mddev)
3197 switch (super_types[mddev->major_version]. 3197 switch (super_types[mddev->major_version].
3198 load_super(rdev, freshest, mddev->minor_version)) { 3198 load_super(rdev, freshest, mddev->minor_version)) {
3199 case 1: 3199 case 1:
@@ -3214,7 +3214,7 @@ static void analyze_sbs(struct mddev * mddev)
3214 validate_super(mddev, freshest); 3214 validate_super(mddev, freshest);
3215 3215
3216 i = 0; 3216 i = 0;
3217 rdev_for_each(rdev, tmp, mddev) { 3217 rdev_for_each_safe(rdev, tmp, mddev) {
3218 if (mddev->max_disks && 3218 if (mddev->max_disks &&
3219 (rdev->desc_nr >= mddev->max_disks || 3219 (rdev->desc_nr >= mddev->max_disks ||
3220 i > mddev->max_disks)) { 3220 i > mddev->max_disks)) {
@@ -3403,7 +3403,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3403 return -EINVAL; 3403 return -EINVAL;
3404 } 3404 }
3405 3405
3406 list_for_each_entry(rdev, &mddev->disks, same_set) 3406 rdev_for_each(rdev, mddev)
3407 rdev->new_raid_disk = rdev->raid_disk; 3407 rdev->new_raid_disk = rdev->raid_disk;
3408 3408
3409 /* ->takeover must set new_* and/or delta_disks 3409 /* ->takeover must set new_* and/or delta_disks
@@ -3456,7 +3456,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3456 mddev->safemode = 0; 3456 mddev->safemode = 0;
3457 } 3457 }
3458 3458
3459 list_for_each_entry(rdev, &mddev->disks, same_set) { 3459 rdev_for_each(rdev, mddev) {
3460 if (rdev->raid_disk < 0) 3460 if (rdev->raid_disk < 0)
3461 continue; 3461 continue;
3462 if (rdev->new_raid_disk >= mddev->raid_disks) 3462 if (rdev->new_raid_disk >= mddev->raid_disks)
@@ -3465,7 +3465,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3465 continue; 3465 continue;
3466 sysfs_unlink_rdev(mddev, rdev); 3466 sysfs_unlink_rdev(mddev, rdev);
3467 } 3467 }
3468 list_for_each_entry(rdev, &mddev->disks, same_set) { 3468 rdev_for_each(rdev, mddev) {
3469 if (rdev->raid_disk < 0) 3469 if (rdev->raid_disk < 0)
3470 continue; 3470 continue;
3471 if (rdev->new_raid_disk == rdev->raid_disk) 3471 if (rdev->new_raid_disk == rdev->raid_disk)
@@ -4796,7 +4796,7 @@ int md_run(struct mddev *mddev)
4796 * the only valid external interface is through the md 4796 * the only valid external interface is through the md
4797 * device. 4797 * device.
4798 */ 4798 */
4799 list_for_each_entry(rdev, &mddev->disks, same_set) { 4799 rdev_for_each(rdev, mddev) {
4800 if (test_bit(Faulty, &rdev->flags)) 4800 if (test_bit(Faulty, &rdev->flags))
4801 continue; 4801 continue;
4802 sync_blockdev(rdev->bdev); 4802 sync_blockdev(rdev->bdev);
@@ -4867,8 +4867,8 @@ int md_run(struct mddev *mddev)
4867 struct md_rdev *rdev2; 4867 struct md_rdev *rdev2;
4868 int warned = 0; 4868 int warned = 0;
4869 4869
4870 list_for_each_entry(rdev, &mddev->disks, same_set) 4870 rdev_for_each(rdev, mddev)
4871 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4871 rdev_for_each(rdev2, mddev) {
4872 if (rdev < rdev2 && 4872 if (rdev < rdev2 &&
4873 rdev->bdev->bd_contains == 4873 rdev->bdev->bd_contains ==
4874 rdev2->bdev->bd_contains) { 4874 rdev2->bdev->bd_contains) {
@@ -4945,7 +4945,7 @@ int md_run(struct mddev *mddev)
4945 mddev->in_sync = 1; 4945 mddev->in_sync = 1;
4946 smp_wmb(); 4946 smp_wmb();
4947 mddev->ready = 1; 4947 mddev->ready = 1;
4948 list_for_each_entry(rdev, &mddev->disks, same_set) 4948 rdev_for_each(rdev, mddev)
4949 if (rdev->raid_disk >= 0) 4949 if (rdev->raid_disk >= 0)
4950 if (sysfs_link_rdev(mddev, rdev)) 4950 if (sysfs_link_rdev(mddev, rdev))
4951 /* failure here is OK */; 4951 /* failure here is OK */;
@@ -5175,7 +5175,7 @@ static int do_md_stop(struct mddev * mddev, int mode, int is_open)
5175 /* tell userspace to handle 'inactive' */ 5175 /* tell userspace to handle 'inactive' */
5176 sysfs_notify_dirent_safe(mddev->sysfs_state); 5176 sysfs_notify_dirent_safe(mddev->sysfs_state);
5177 5177
5178 list_for_each_entry(rdev, &mddev->disks, same_set) 5178 rdev_for_each(rdev, mddev)
5179 if (rdev->raid_disk >= 0) 5179 if (rdev->raid_disk >= 0)
5180 sysfs_unlink_rdev(mddev, rdev); 5180 sysfs_unlink_rdev(mddev, rdev);
5181 5181
@@ -5226,7 +5226,7 @@ static void autorun_array(struct mddev *mddev)
5226 5226
5227 printk(KERN_INFO "md: running: "); 5227 printk(KERN_INFO "md: running: ");
5228 5228
5229 list_for_each_entry(rdev, &mddev->disks, same_set) { 5229 rdev_for_each(rdev, mddev) {
5230 char b[BDEVNAME_SIZE]; 5230 char b[BDEVNAME_SIZE];
5231 printk("<%s>", bdevname(rdev->bdev,b)); 5231 printk("<%s>", bdevname(rdev->bdev,b));
5232 } 5232 }
@@ -5356,7 +5356,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
5356 struct md_rdev *rdev; 5356 struct md_rdev *rdev;
5357 5357
5358 nr=working=insync=failed=spare=0; 5358 nr=working=insync=failed=spare=0;
5359 list_for_each_entry(rdev, &mddev->disks, same_set) { 5359 rdev_for_each(rdev, mddev) {
5360 nr++; 5360 nr++;
5361 if (test_bit(Faulty, &rdev->flags)) 5361 if (test_bit(Faulty, &rdev->flags))
5362 failed++; 5362 failed++;
@@ -5923,7 +5923,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
5923 * grow, and re-add. 5923 * grow, and re-add.
5924 */ 5924 */
5925 return -EBUSY; 5925 return -EBUSY;
5926 list_for_each_entry(rdev, &mddev->disks, same_set) { 5926 rdev_for_each(rdev, mddev) {
5927 sector_t avail = rdev->sectors; 5927 sector_t avail = rdev->sectors;
5928 5928
5929 if (fit && (num_sectors == 0 || num_sectors > avail)) 5929 if (fit && (num_sectors == 0 || num_sectors > avail))
@@ -6758,7 +6758,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
6758 } 6758 }
6759 6759
6760 sectors = 0; 6760 sectors = 0;
6761 list_for_each_entry(rdev, &mddev->disks, same_set) { 6761 rdev_for_each(rdev, mddev) {
6762 char b[BDEVNAME_SIZE]; 6762 char b[BDEVNAME_SIZE];
6763 seq_printf(seq, " %s[%d]", 6763 seq_printf(seq, " %s[%d]",
6764 bdevname(rdev->bdev,b), rdev->desc_nr); 6764 bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -7170,7 +7170,7 @@ void md_do_sync(struct mddev *mddev)
7170 max_sectors = mddev->dev_sectors; 7170 max_sectors = mddev->dev_sectors;
7171 j = MaxSector; 7171 j = MaxSector;
7172 rcu_read_lock(); 7172 rcu_read_lock();
7173 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7173 rdev_for_each_rcu(rdev, mddev)
7174 if (rdev->raid_disk >= 0 && 7174 if (rdev->raid_disk >= 0 &&
7175 !test_bit(Faulty, &rdev->flags) && 7175 !test_bit(Faulty, &rdev->flags) &&
7176 !test_bit(In_sync, &rdev->flags) && 7176 !test_bit(In_sync, &rdev->flags) &&
@@ -7342,7 +7342,7 @@ void md_do_sync(struct mddev *mddev)
7342 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7342 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7343 mddev->curr_resync = MaxSector; 7343 mddev->curr_resync = MaxSector;
7344 rcu_read_lock(); 7344 rcu_read_lock();
7345 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7345 rdev_for_each_rcu(rdev, mddev)
7346 if (rdev->raid_disk >= 0 && 7346 if (rdev->raid_disk >= 0 &&
7347 mddev->delta_disks >= 0 && 7347 mddev->delta_disks >= 0 &&
7348 !test_bit(Faulty, &rdev->flags) && 7348 !test_bit(Faulty, &rdev->flags) &&
@@ -7388,7 +7388,7 @@ static int remove_and_add_spares(struct mddev *mddev)
7388 7388
7389 mddev->curr_resync_completed = 0; 7389 mddev->curr_resync_completed = 0;
7390 7390
7391 list_for_each_entry(rdev, &mddev->disks, same_set) 7391 rdev_for_each(rdev, mddev)
7392 if (rdev->raid_disk >= 0 && 7392 if (rdev->raid_disk >= 0 &&
7393 !test_bit(Blocked, &rdev->flags) && 7393 !test_bit(Blocked, &rdev->flags) &&
7394 (test_bit(Faulty, &rdev->flags) || 7394 (test_bit(Faulty, &rdev->flags) ||
@@ -7406,7 +7406,7 @@ static int remove_and_add_spares(struct mddev *mddev)
7406 "degraded"); 7406 "degraded");
7407 7407
7408 7408
7409 list_for_each_entry(rdev, &mddev->disks, same_set) { 7409 rdev_for_each(rdev, mddev) {
7410 if (rdev->raid_disk >= 0 && 7410 if (rdev->raid_disk >= 0 &&
7411 !test_bit(In_sync, &rdev->flags) && 7411 !test_bit(In_sync, &rdev->flags) &&
7412 !test_bit(Faulty, &rdev->flags)) 7412 !test_bit(Faulty, &rdev->flags))
@@ -7451,7 +7451,7 @@ static void reap_sync_thread(struct mddev *mddev)
7451 * do the superblock for an incrementally recovered device 7451 * do the superblock for an incrementally recovered device
7452 * written out. 7452 * written out.
7453 */ 7453 */
7454 list_for_each_entry(rdev, &mddev->disks, same_set) 7454 rdev_for_each(rdev, mddev)
7455 if (!mddev->degraded || 7455 if (!mddev->degraded ||
7456 test_bit(In_sync, &rdev->flags)) 7456 test_bit(In_sync, &rdev->flags))
7457 rdev->saved_raid_disk = -1; 7457 rdev->saved_raid_disk = -1;
@@ -7529,7 +7529,7 @@ void md_check_recovery(struct mddev *mddev)
7529 * failed devices. 7529 * failed devices.
7530 */ 7530 */
7531 struct md_rdev *rdev; 7531 struct md_rdev *rdev;
7532 list_for_each_entry(rdev, &mddev->disks, same_set) 7532 rdev_for_each(rdev, mddev)
7533 if (rdev->raid_disk >= 0 && 7533 if (rdev->raid_disk >= 0 &&
7534 !test_bit(Blocked, &rdev->flags) && 7534 !test_bit(Blocked, &rdev->flags) &&
7535 test_bit(Faulty, &rdev->flags) && 7535 test_bit(Faulty, &rdev->flags) &&