aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c140
1 files changed, 57 insertions, 83 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ce88755baf4a..b572e1e386ce 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -439,7 +439,7 @@ static void submit_flushes(struct work_struct *ws)
439 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 439 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
440 atomic_set(&mddev->flush_pending, 1); 440 atomic_set(&mddev->flush_pending, 1);
441 rcu_read_lock(); 441 rcu_read_lock();
442 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 442 rdev_for_each_rcu(rdev, mddev)
443 if (rdev->raid_disk >= 0 && 443 if (rdev->raid_disk >= 0 &&
444 !test_bit(Faulty, &rdev->flags)) { 444 !test_bit(Faulty, &rdev->flags)) {
445 /* Take two references, one is dropped 445 /* Take two references, one is dropped
@@ -749,7 +749,7 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
749{ 749{
750 struct md_rdev *rdev; 750 struct md_rdev *rdev;
751 751
752 list_for_each_entry(rdev, &mddev->disks, same_set) 752 rdev_for_each(rdev, mddev)
753 if (rdev->desc_nr == nr) 753 if (rdev->desc_nr == nr)
754 return rdev; 754 return rdev;
755 755
@@ -760,7 +760,7 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
760{ 760{
761 struct md_rdev *rdev; 761 struct md_rdev *rdev;
762 762
763 list_for_each_entry(rdev, &mddev->disks, same_set) 763 rdev_for_each(rdev, mddev)
764 if (rdev->bdev->bd_dev == dev) 764 if (rdev->bdev->bd_dev == dev)
765 return rdev; 765 return rdev;
766 766
@@ -1342,7 +1342,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1342 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1342 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1343 1343
1344 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1344 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1345 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1345 rdev_for_each(rdev2, mddev) {
1346 mdp_disk_t *d; 1346 mdp_disk_t *d;
1347 int desc_nr; 1347 int desc_nr;
1348 int is_active = test_bit(In_sync, &rdev2->flags); 1348 int is_active = test_bit(In_sync, &rdev2->flags);
@@ -1805,18 +1805,18 @@ retry:
1805 | BB_LEN(internal_bb)); 1805 | BB_LEN(internal_bb));
1806 *bbp++ = cpu_to_le64(store_bb); 1806 *bbp++ = cpu_to_le64(store_bb);
1807 } 1807 }
1808 bb->changed = 0;
1808 if (read_seqretry(&bb->lock, seq)) 1809 if (read_seqretry(&bb->lock, seq))
1809 goto retry; 1810 goto retry;
1810 1811
1811 bb->sector = (rdev->sb_start + 1812 bb->sector = (rdev->sb_start +
1812 (int)le32_to_cpu(sb->bblog_offset)); 1813 (int)le32_to_cpu(sb->bblog_offset));
1813 bb->size = le16_to_cpu(sb->bblog_size); 1814 bb->size = le16_to_cpu(sb->bblog_size);
1814 bb->changed = 0;
1815 } 1815 }
1816 } 1816 }
1817 1817
1818 max_dev = 0; 1818 max_dev = 0;
1819 list_for_each_entry(rdev2, &mddev->disks, same_set) 1819 rdev_for_each(rdev2, mddev)
1820 if (rdev2->desc_nr+1 > max_dev) 1820 if (rdev2->desc_nr+1 > max_dev)
1821 max_dev = rdev2->desc_nr+1; 1821 max_dev = rdev2->desc_nr+1;
1822 1822
@@ -1833,7 +1833,7 @@ retry:
1833 for (i=0; i<max_dev;i++) 1833 for (i=0; i<max_dev;i++)
1834 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1834 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1835 1835
1836 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1836 rdev_for_each(rdev2, mddev) {
1837 i = rdev2->desc_nr; 1837 i = rdev2->desc_nr;
1838 if (test_bit(Faulty, &rdev2->flags)) 1838 if (test_bit(Faulty, &rdev2->flags))
1839 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1839 sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1948,7 +1948,7 @@ int md_integrity_register(struct mddev *mddev)
1948 return 0; /* nothing to do */ 1948 return 0; /* nothing to do */
1949 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1949 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1950 return 0; /* shouldn't register, or already is */ 1950 return 0; /* shouldn't register, or already is */
1951 list_for_each_entry(rdev, &mddev->disks, same_set) { 1951 rdev_for_each(rdev, mddev) {
1952 /* skip spares and non-functional disks */ 1952 /* skip spares and non-functional disks */
1953 if (test_bit(Faulty, &rdev->flags)) 1953 if (test_bit(Faulty, &rdev->flags))
1954 continue; 1954 continue;
@@ -2175,7 +2175,7 @@ static void export_array(struct mddev *mddev)
2175{ 2175{
2176 struct md_rdev *rdev, *tmp; 2176 struct md_rdev *rdev, *tmp;
2177 2177
2178 rdev_for_each(rdev, tmp, mddev) { 2178 rdev_for_each_safe(rdev, tmp, mddev) {
2179 if (!rdev->mddev) { 2179 if (!rdev->mddev) {
2180 MD_BUG(); 2180 MD_BUG();
2181 continue; 2181 continue;
@@ -2307,11 +2307,11 @@ static void md_print_devices(void)
2307 bitmap_print_sb(mddev->bitmap); 2307 bitmap_print_sb(mddev->bitmap);
2308 else 2308 else
2309 printk("%s: ", mdname(mddev)); 2309 printk("%s: ", mdname(mddev));
2310 list_for_each_entry(rdev, &mddev->disks, same_set) 2310 rdev_for_each(rdev, mddev)
2311 printk("<%s>", bdevname(rdev->bdev,b)); 2311 printk("<%s>", bdevname(rdev->bdev,b));
2312 printk("\n"); 2312 printk("\n");
2313 2313
2314 list_for_each_entry(rdev, &mddev->disks, same_set) 2314 rdev_for_each(rdev, mddev)
2315 print_rdev(rdev, mddev->major_version); 2315 print_rdev(rdev, mddev->major_version);
2316 } 2316 }
2317 printk("md: **********************************\n"); 2317 printk("md: **********************************\n");
@@ -2328,7 +2328,7 @@ static void sync_sbs(struct mddev * mddev, int nospares)
2328 * with the rest of the array) 2328 * with the rest of the array)
2329 */ 2329 */
2330 struct md_rdev *rdev; 2330 struct md_rdev *rdev;
2331 list_for_each_entry(rdev, &mddev->disks, same_set) { 2331 rdev_for_each(rdev, mddev) {
2332 if (rdev->sb_events == mddev->events || 2332 if (rdev->sb_events == mddev->events ||
2333 (nospares && 2333 (nospares &&
2334 rdev->raid_disk < 0 && 2334 rdev->raid_disk < 0 &&
@@ -2351,7 +2351,7 @@ static void md_update_sb(struct mddev * mddev, int force_change)
2351 2351
2352repeat: 2352repeat:
2353 /* First make sure individual recovery_offsets are correct */ 2353 /* First make sure individual recovery_offsets are correct */
2354 list_for_each_entry(rdev, &mddev->disks, same_set) { 2354 rdev_for_each(rdev, mddev) {
2355 if (rdev->raid_disk >= 0 && 2355 if (rdev->raid_disk >= 0 &&
2356 mddev->delta_disks >= 0 && 2356 mddev->delta_disks >= 0 &&
2357 !test_bit(In_sync, &rdev->flags) && 2357 !test_bit(In_sync, &rdev->flags) &&
@@ -2364,8 +2364,9 @@ repeat:
2364 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2364 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2365 if (!mddev->external) { 2365 if (!mddev->external) {
2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2367 list_for_each_entry(rdev, &mddev->disks, same_set) { 2367 rdev_for_each(rdev, mddev) {
2368 if (rdev->badblocks.changed) { 2368 if (rdev->badblocks.changed) {
2369 rdev->badblocks.changed = 0;
2369 md_ack_all_badblocks(&rdev->badblocks); 2370 md_ack_all_badblocks(&rdev->badblocks);
2370 md_error(mddev, rdev); 2371 md_error(mddev, rdev);
2371 } 2372 }
@@ -2430,7 +2431,7 @@ repeat:
2430 mddev->events --; 2431 mddev->events --;
2431 } 2432 }
2432 2433
2433 list_for_each_entry(rdev, &mddev->disks, same_set) { 2434 rdev_for_each(rdev, mddev) {
2434 if (rdev->badblocks.changed) 2435 if (rdev->badblocks.changed)
2435 any_badblocks_changed++; 2436 any_badblocks_changed++;
2436 if (test_bit(Faulty, &rdev->flags)) 2437 if (test_bit(Faulty, &rdev->flags))
@@ -2444,7 +2445,7 @@ repeat:
2444 mdname(mddev), mddev->in_sync); 2445 mdname(mddev), mddev->in_sync);
2445 2446
2446 bitmap_update_sb(mddev->bitmap); 2447 bitmap_update_sb(mddev->bitmap);
2447 list_for_each_entry(rdev, &mddev->disks, same_set) { 2448 rdev_for_each(rdev, mddev) {
2448 char b[BDEVNAME_SIZE]; 2449 char b[BDEVNAME_SIZE];
2449 2450
2450 if (rdev->sb_loaded != 1) 2451 if (rdev->sb_loaded != 1)
@@ -2493,7 +2494,7 @@ repeat:
2493 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2494 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2494 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2495 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2495 2496
2496 list_for_each_entry(rdev, &mddev->disks, same_set) { 2497 rdev_for_each(rdev, mddev) {
2497 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2498 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2498 clear_bit(Blocked, &rdev->flags); 2499 clear_bit(Blocked, &rdev->flags);
2499 2500
@@ -2896,7 +2897,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2896 struct md_rdev *rdev2; 2897 struct md_rdev *rdev2;
2897 2898
2898 mddev_lock(mddev); 2899 mddev_lock(mddev);
2899 list_for_each_entry(rdev2, &mddev->disks, same_set) 2900 rdev_for_each(rdev2, mddev)
2900 if (rdev->bdev == rdev2->bdev && 2901 if (rdev->bdev == rdev2->bdev &&
2901 rdev != rdev2 && 2902 rdev != rdev2 &&
2902 overlaps(rdev->data_offset, rdev->sectors, 2903 overlaps(rdev->data_offset, rdev->sectors,
@@ -3193,7 +3194,7 @@ static void analyze_sbs(struct mddev * mddev)
3193 char b[BDEVNAME_SIZE]; 3194 char b[BDEVNAME_SIZE];
3194 3195
3195 freshest = NULL; 3196 freshest = NULL;
3196 rdev_for_each(rdev, tmp, mddev) 3197 rdev_for_each_safe(rdev, tmp, mddev)
3197 switch (super_types[mddev->major_version]. 3198 switch (super_types[mddev->major_version].
3198 load_super(rdev, freshest, mddev->minor_version)) { 3199 load_super(rdev, freshest, mddev->minor_version)) {
3199 case 1: 3200 case 1:
@@ -3214,7 +3215,7 @@ static void analyze_sbs(struct mddev * mddev)
3214 validate_super(mddev, freshest); 3215 validate_super(mddev, freshest);
3215 3216
3216 i = 0; 3217 i = 0;
3217 rdev_for_each(rdev, tmp, mddev) { 3218 rdev_for_each_safe(rdev, tmp, mddev) {
3218 if (mddev->max_disks && 3219 if (mddev->max_disks &&
3219 (rdev->desc_nr >= mddev->max_disks || 3220 (rdev->desc_nr >= mddev->max_disks ||
3220 i > mddev->max_disks)) { 3221 i > mddev->max_disks)) {
@@ -3403,7 +3404,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3403 return -EINVAL; 3404 return -EINVAL;
3404 } 3405 }
3405 3406
3406 list_for_each_entry(rdev, &mddev->disks, same_set) 3407 rdev_for_each(rdev, mddev)
3407 rdev->new_raid_disk = rdev->raid_disk; 3408 rdev->new_raid_disk = rdev->raid_disk;
3408 3409
3409 /* ->takeover must set new_* and/or delta_disks 3410 /* ->takeover must set new_* and/or delta_disks
@@ -3456,7 +3457,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3456 mddev->safemode = 0; 3457 mddev->safemode = 0;
3457 } 3458 }
3458 3459
3459 list_for_each_entry(rdev, &mddev->disks, same_set) { 3460 rdev_for_each(rdev, mddev) {
3460 if (rdev->raid_disk < 0) 3461 if (rdev->raid_disk < 0)
3461 continue; 3462 continue;
3462 if (rdev->new_raid_disk >= mddev->raid_disks) 3463 if (rdev->new_raid_disk >= mddev->raid_disks)
@@ -3465,7 +3466,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3465 continue; 3466 continue;
3466 sysfs_unlink_rdev(mddev, rdev); 3467 sysfs_unlink_rdev(mddev, rdev);
3467 } 3468 }
3468 list_for_each_entry(rdev, &mddev->disks, same_set) { 3469 rdev_for_each(rdev, mddev) {
3469 if (rdev->raid_disk < 0) 3470 if (rdev->raid_disk < 0)
3470 continue; 3471 continue;
3471 if (rdev->new_raid_disk == rdev->raid_disk) 3472 if (rdev->new_raid_disk == rdev->raid_disk)
@@ -4796,7 +4797,7 @@ int md_run(struct mddev *mddev)
4796 * the only valid external interface is through the md 4797 * the only valid external interface is through the md
4797 * device. 4798 * device.
4798 */ 4799 */
4799 list_for_each_entry(rdev, &mddev->disks, same_set) { 4800 rdev_for_each(rdev, mddev) {
4800 if (test_bit(Faulty, &rdev->flags)) 4801 if (test_bit(Faulty, &rdev->flags))
4801 continue; 4802 continue;
4802 sync_blockdev(rdev->bdev); 4803 sync_blockdev(rdev->bdev);
@@ -4867,8 +4868,8 @@ int md_run(struct mddev *mddev)
4867 struct md_rdev *rdev2; 4868 struct md_rdev *rdev2;
4868 int warned = 0; 4869 int warned = 0;
4869 4870
4870 list_for_each_entry(rdev, &mddev->disks, same_set) 4871 rdev_for_each(rdev, mddev)
4871 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4872 rdev_for_each(rdev2, mddev) {
4872 if (rdev < rdev2 && 4873 if (rdev < rdev2 &&
4873 rdev->bdev->bd_contains == 4874 rdev->bdev->bd_contains ==
4874 rdev2->bdev->bd_contains) { 4875 rdev2->bdev->bd_contains) {
@@ -4945,7 +4946,7 @@ int md_run(struct mddev *mddev)
4945 mddev->in_sync = 1; 4946 mddev->in_sync = 1;
4946 smp_wmb(); 4947 smp_wmb();
4947 mddev->ready = 1; 4948 mddev->ready = 1;
4948 list_for_each_entry(rdev, &mddev->disks, same_set) 4949 rdev_for_each(rdev, mddev)
4949 if (rdev->raid_disk >= 0) 4950 if (rdev->raid_disk >= 0)
4950 if (sysfs_link_rdev(mddev, rdev)) 4951 if (sysfs_link_rdev(mddev, rdev))
4951 /* failure here is OK */; 4952 /* failure here is OK */;
@@ -5073,6 +5074,7 @@ static void md_clean(struct mddev *mddev)
5073 mddev->changed = 0; 5074 mddev->changed = 0;
5074 mddev->degraded = 0; 5075 mddev->degraded = 0;
5075 mddev->safemode = 0; 5076 mddev->safemode = 0;
5077 mddev->merge_check_needed = 0;
5076 mddev->bitmap_info.offset = 0; 5078 mddev->bitmap_info.offset = 0;
5077 mddev->bitmap_info.default_offset = 0; 5079 mddev->bitmap_info.default_offset = 0;
5078 mddev->bitmap_info.chunksize = 0; 5080 mddev->bitmap_info.chunksize = 0;
@@ -5175,7 +5177,7 @@ static int do_md_stop(struct mddev * mddev, int mode, int is_open)
5175 /* tell userspace to handle 'inactive' */ 5177 /* tell userspace to handle 'inactive' */
5176 sysfs_notify_dirent_safe(mddev->sysfs_state); 5178 sysfs_notify_dirent_safe(mddev->sysfs_state);
5177 5179
5178 list_for_each_entry(rdev, &mddev->disks, same_set) 5180 rdev_for_each(rdev, mddev)
5179 if (rdev->raid_disk >= 0) 5181 if (rdev->raid_disk >= 0)
5180 sysfs_unlink_rdev(mddev, rdev); 5182 sysfs_unlink_rdev(mddev, rdev);
5181 5183
@@ -5226,7 +5228,7 @@ static void autorun_array(struct mddev *mddev)
5226 5228
5227 printk(KERN_INFO "md: running: "); 5229 printk(KERN_INFO "md: running: ");
5228 5230
5229 list_for_each_entry(rdev, &mddev->disks, same_set) { 5231 rdev_for_each(rdev, mddev) {
5230 char b[BDEVNAME_SIZE]; 5232 char b[BDEVNAME_SIZE];
5231 printk("<%s>", bdevname(rdev->bdev,b)); 5233 printk("<%s>", bdevname(rdev->bdev,b));
5232 } 5234 }
@@ -5356,7 +5358,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
5356 struct md_rdev *rdev; 5358 struct md_rdev *rdev;
5357 5359
5358 nr=working=insync=failed=spare=0; 5360 nr=working=insync=failed=spare=0;
5359 list_for_each_entry(rdev, &mddev->disks, same_set) { 5361 rdev_for_each(rdev, mddev) {
5360 nr++; 5362 nr++;
5361 if (test_bit(Faulty, &rdev->flags)) 5363 if (test_bit(Faulty, &rdev->flags))
5362 failed++; 5364 failed++;
@@ -5923,7 +5925,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
5923 * grow, and re-add. 5925 * grow, and re-add.
5924 */ 5926 */
5925 return -EBUSY; 5927 return -EBUSY;
5926 list_for_each_entry(rdev, &mddev->disks, same_set) { 5928 rdev_for_each(rdev, mddev) {
5927 sector_t avail = rdev->sectors; 5929 sector_t avail = rdev->sectors;
5928 5930
5929 if (fit && (num_sectors == 0 || num_sectors > avail)) 5931 if (fit && (num_sectors == 0 || num_sectors > avail))
@@ -6724,7 +6726,6 @@ static int md_seq_show(struct seq_file *seq, void *v)
6724 struct mddev *mddev = v; 6726 struct mddev *mddev = v;
6725 sector_t sectors; 6727 sector_t sectors;
6726 struct md_rdev *rdev; 6728 struct md_rdev *rdev;
6727 struct bitmap *bitmap;
6728 6729
6729 if (v == (void*)1) { 6730 if (v == (void*)1) {
6730 struct md_personality *pers; 6731 struct md_personality *pers;
@@ -6758,7 +6759,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
6758 } 6759 }
6759 6760
6760 sectors = 0; 6761 sectors = 0;
6761 list_for_each_entry(rdev, &mddev->disks, same_set) { 6762 rdev_for_each(rdev, mddev) {
6762 char b[BDEVNAME_SIZE]; 6763 char b[BDEVNAME_SIZE];
6763 seq_printf(seq, " %s[%d]", 6764 seq_printf(seq, " %s[%d]",
6764 bdevname(rdev->bdev,b), rdev->desc_nr); 6765 bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -6812,27 +6813,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
6812 } else 6813 } else
6813 seq_printf(seq, "\n "); 6814 seq_printf(seq, "\n ");
6814 6815
6815 if ((bitmap = mddev->bitmap)) { 6816 bitmap_status(seq, mddev->bitmap);
6816 unsigned long chunk_kb;
6817 unsigned long flags;
6818 spin_lock_irqsave(&bitmap->lock, flags);
6819 chunk_kb = mddev->bitmap_info.chunksize >> 10;
6820 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6821 "%lu%s chunk",
6822 bitmap->pages - bitmap->missing_pages,
6823 bitmap->pages,
6824 (bitmap->pages - bitmap->missing_pages)
6825 << (PAGE_SHIFT - 10),
6826 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6827 chunk_kb ? "KB" : "B");
6828 if (bitmap->file) {
6829 seq_printf(seq, ", file: ");
6830 seq_path(seq, &bitmap->file->f_path, " \t\n");
6831 }
6832
6833 seq_printf(seq, "\n");
6834 spin_unlock_irqrestore(&bitmap->lock, flags);
6835 }
6836 6817
6837 seq_printf(seq, "\n"); 6818 seq_printf(seq, "\n");
6838 } 6819 }
@@ -7170,7 +7151,7 @@ void md_do_sync(struct mddev *mddev)
7170 max_sectors = mddev->dev_sectors; 7151 max_sectors = mddev->dev_sectors;
7171 j = MaxSector; 7152 j = MaxSector;
7172 rcu_read_lock(); 7153 rcu_read_lock();
7173 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7154 rdev_for_each_rcu(rdev, mddev)
7174 if (rdev->raid_disk >= 0 && 7155 if (rdev->raid_disk >= 0 &&
7175 !test_bit(Faulty, &rdev->flags) && 7156 !test_bit(Faulty, &rdev->flags) &&
7176 !test_bit(In_sync, &rdev->flags) && 7157 !test_bit(In_sync, &rdev->flags) &&
@@ -7342,7 +7323,7 @@ void md_do_sync(struct mddev *mddev)
7342 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7323 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7343 mddev->curr_resync = MaxSector; 7324 mddev->curr_resync = MaxSector;
7344 rcu_read_lock(); 7325 rcu_read_lock();
7345 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7326 rdev_for_each_rcu(rdev, mddev)
7346 if (rdev->raid_disk >= 0 && 7327 if (rdev->raid_disk >= 0 &&
7347 mddev->delta_disks >= 0 && 7328 mddev->delta_disks >= 0 &&
7348 !test_bit(Faulty, &rdev->flags) && 7329 !test_bit(Faulty, &rdev->flags) &&
@@ -7388,7 +7369,7 @@ static int remove_and_add_spares(struct mddev *mddev)
7388 7369
7389 mddev->curr_resync_completed = 0; 7370 mddev->curr_resync_completed = 0;
7390 7371
7391 list_for_each_entry(rdev, &mddev->disks, same_set) 7372 rdev_for_each(rdev, mddev)
7392 if (rdev->raid_disk >= 0 && 7373 if (rdev->raid_disk >= 0 &&
7393 !test_bit(Blocked, &rdev->flags) && 7374 !test_bit(Blocked, &rdev->flags) &&
7394 (test_bit(Faulty, &rdev->flags) || 7375 (test_bit(Faulty, &rdev->flags) ||
@@ -7406,7 +7387,7 @@ static int remove_and_add_spares(struct mddev *mddev)
7406 "degraded"); 7387 "degraded");
7407 7388
7408 7389
7409 list_for_each_entry(rdev, &mddev->disks, same_set) { 7390 rdev_for_each(rdev, mddev) {
7410 if (rdev->raid_disk >= 0 && 7391 if (rdev->raid_disk >= 0 &&
7411 !test_bit(In_sync, &rdev->flags) && 7392 !test_bit(In_sync, &rdev->flags) &&
7412 !test_bit(Faulty, &rdev->flags)) 7393 !test_bit(Faulty, &rdev->flags))
@@ -7451,7 +7432,7 @@ static void reap_sync_thread(struct mddev *mddev)
7451 * do the superblock for an incrementally recovered device 7432 * do the superblock for an incrementally recovered device
7452 * written out. 7433 * written out.
7453 */ 7434 */
7454 list_for_each_entry(rdev, &mddev->disks, same_set) 7435 rdev_for_each(rdev, mddev)
7455 if (!mddev->degraded || 7436 if (!mddev->degraded ||
7456 test_bit(In_sync, &rdev->flags)) 7437 test_bit(In_sync, &rdev->flags))
7457 rdev->saved_raid_disk = -1; 7438 rdev->saved_raid_disk = -1;
@@ -7529,7 +7510,7 @@ void md_check_recovery(struct mddev *mddev)
7529 * failed devices. 7510 * failed devices.
7530 */ 7511 */
7531 struct md_rdev *rdev; 7512 struct md_rdev *rdev;
7532 list_for_each_entry(rdev, &mddev->disks, same_set) 7513 rdev_for_each(rdev, mddev)
7533 if (rdev->raid_disk >= 0 && 7514 if (rdev->raid_disk >= 0 &&
7534 !test_bit(Blocked, &rdev->flags) && 7515 !test_bit(Blocked, &rdev->flags) &&
7535 test_bit(Faulty, &rdev->flags) && 7516 test_bit(Faulty, &rdev->flags) &&
@@ -8040,7 +8021,7 @@ void md_ack_all_badblocks(struct badblocks *bb)
8040 return; 8021 return;
8041 write_seqlock_irq(&bb->lock); 8022 write_seqlock_irq(&bb->lock);
8042 8023
8043 if (bb->changed == 0) { 8024 if (bb->changed == 0 && bb->unacked_exist) {
8044 u64 *p = bb->page; 8025 u64 *p = bb->page;
8045 int i; 8026 int i;
8046 for (i = 0; i < bb->count ; i++) { 8027 for (i = 0; i < bb->count ; i++) {
@@ -8157,30 +8138,23 @@ static int md_notify_reboot(struct notifier_block *this,
8157 struct mddev *mddev; 8138 struct mddev *mddev;
8158 int need_delay = 0; 8139 int need_delay = 0;
8159 8140
8160 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 8141 for_each_mddev(mddev, tmp) {
8161 8142 if (mddev_trylock(mddev)) {
8162 printk(KERN_INFO "md: stopping all md devices.\n"); 8143 __md_stop_writes(mddev);
8163 8144 mddev->safemode = 2;
8164 for_each_mddev(mddev, tmp) { 8145 mddev_unlock(mddev);
8165 if (mddev_trylock(mddev)) {
8166 /* Force a switch to readonly even array
8167 * appears to still be in use. Hence
8168 * the '100'.
8169 */
8170 md_set_readonly(mddev, 100);
8171 mddev_unlock(mddev);
8172 }
8173 need_delay = 1;
8174 } 8146 }
8175 /* 8147 need_delay = 1;
8176 * certain more exotic SCSI devices are known to be
8177 * volatile wrt too early system reboots. While the
8178 * right place to handle this issue is the given
8179 * driver, we do want to have a safe RAID driver ...
8180 */
8181 if (need_delay)
8182 mdelay(1000*1);
8183 } 8148 }
8149 /*
8150 * certain more exotic SCSI devices are known to be
8151 * volatile wrt too early system reboots. While the
8152 * right place to handle this issue is the given
8153 * driver, we do want to have a safe RAID driver ...
8154 */
8155 if (need_delay)
8156 mdelay(1000*1);
8157
8184 return NOTIFY_DONE; 8158 return NOTIFY_DONE;
8185} 8159}
8186 8160