aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2016-12-08 18:48:19 -0500
committerShaohua Li <shli@fb.com>2016-12-09 01:01:47 -0500
commit2953079c692da067aeb6345659875b97378f9b0a (patch)
tree06bec98f71114629966da122360fd38a6f8ec422
parent82a301cb0ea2df8a5c88213094a01660067c7fb4 (diff)
md: separate flags for superblock changes
The mddev->flags are used for different purposes. There are a lot of places we check/change the flags without masking unrelated flags, we could check/change unrelated flags. These usage are most for superblock write, so spearate superblock related flags. This should make the code clearer and also fix real bugs. Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/md.c115
-rw-r--r--drivers/md/md.h16
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c22
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/md/raid5.c26
9 files changed, 106 insertions, 101 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index c4621571b718..9fb2ccac958a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1623,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1623 atomic_read(&bitmap->mddev->recovery_active) == 0); 1623 atomic_read(&bitmap->mddev->recovery_active) == 0);
1624 1624
1625 bitmap->mddev->curr_resync_completed = sector; 1625 bitmap->mddev->curr_resync_completed = sector;
1626 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); 1626 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1627 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); 1627 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1628 s = 0; 1628 s = 0;
1629 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1629 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -2296,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2296 /* Ensure new bitmap info is stored in 2296 /* Ensure new bitmap info is stored in
2297 * metadata promptly. 2297 * metadata promptly.
2298 */ 2298 */
2299 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2299 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2300 md_wakeup_thread(mddev->thread); 2300 md_wakeup_thread(mddev->thread);
2301 } 2301 }
2302 rv = 0; 2302 rv = 0;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810963f7..953159d9a825 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2011,7 +2011,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2011 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); 2011 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2012 2012
2013 /* Force writing of superblocks to disk */ 2013 /* Force writing of superblocks to disk */
2014 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); 2014 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2015 2015
2016 /* Any superblock is better than none, choose that if given */ 2016 /* Any superblock is better than none, choose that if given */
2017 return refdev ? 0 : 1; 2017 return refdev ? 0 : 1;
@@ -3497,7 +3497,7 @@ static void rs_update_sbs(struct raid_set *rs)
3497 struct mddev *mddev = &rs->md; 3497 struct mddev *mddev = &rs->md;
3498 int ro = mddev->ro; 3498 int ro = mddev->ro;
3499 3499
3500 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3500 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3501 mddev->ro = 0; 3501 mddev->ro = 0;
3502 md_update_sb(mddev, 1); 3502 md_update_sb(mddev, 1);
3503 mddev->ro = ro; 3503 mddev->ro = ro;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5e666482db3c..c15e2344e7c8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -729,7 +729,7 @@ static void super_written(struct bio *bio)
729 md_error(mddev, rdev); 729 md_error(mddev, rdev);
730 if (!test_bit(Faulty, &rdev->flags) 730 if (!test_bit(Faulty, &rdev->flags)
731 && (bio->bi_opf & MD_FAILFAST)) { 731 && (bio->bi_opf & MD_FAILFAST)) {
732 set_bit(MD_NEED_REWRITE, &mddev->flags); 732 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
733 set_bit(LastDev, &rdev->flags); 733 set_bit(LastDev, &rdev->flags);
734 } 734 }
735 } else 735 } else
@@ -780,7 +780,7 @@ int md_super_wait(struct mddev *mddev)
780{ 780{
781 /* wait for all superblock writes that were scheduled to complete */ 781 /* wait for all superblock writes that were scheduled to complete */
782 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 782 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
783 if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags)) 783 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
784 return -EAGAIN; 784 return -EAGAIN;
785 return 0; 785 return 0;
786} 786}
@@ -2322,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
2322 2322
2323 if (mddev->ro) { 2323 if (mddev->ro) {
2324 if (force_change) 2324 if (force_change)
2325 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2325 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2326 return; 2326 return;
2327 } 2327 }
2328 2328
2329repeat: 2329repeat:
2330 if (mddev_is_clustered(mddev)) { 2330 if (mddev_is_clustered(mddev)) {
2331 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2331 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2332 force_change = 1; 2332 force_change = 1;
2333 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2333 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2334 nospares = 1; 2334 nospares = 1;
2335 ret = md_cluster_ops->metadata_update_start(mddev); 2335 ret = md_cluster_ops->metadata_update_start(mddev);
2336 /* Has someone else has updated the sb */ 2336 /* Has someone else has updated the sb */
2337 if (!does_sb_need_changing(mddev)) { 2337 if (!does_sb_need_changing(mddev)) {
2338 if (ret == 0) 2338 if (ret == 0)
2339 md_cluster_ops->metadata_update_cancel(mddev); 2339 md_cluster_ops->metadata_update_cancel(mddev);
2340 bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), 2340 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2341 BIT(MD_CHANGE_DEVS) | 2341 BIT(MD_SB_CHANGE_DEVS) |
2342 BIT(MD_CHANGE_CLEAN)); 2342 BIT(MD_SB_CHANGE_CLEAN));
2343 return; 2343 return;
2344 } 2344 }
2345 } 2345 }
@@ -2355,10 +2355,10 @@ repeat:
2355 2355
2356 } 2356 }
2357 if (!mddev->persistent) { 2357 if (!mddev->persistent) {
2358 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2358 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2359 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2359 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2360 if (!mddev->external) { 2360 if (!mddev->external) {
2361 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2361 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2362 rdev_for_each(rdev, mddev) { 2362 rdev_for_each(rdev, mddev) {
2363 if (rdev->badblocks.changed) { 2363 if (rdev->badblocks.changed) {
2364 rdev->badblocks.changed = 0; 2364 rdev->badblocks.changed = 0;
@@ -2378,9 +2378,9 @@ repeat:
2378 2378
2379 mddev->utime = ktime_get_real_seconds(); 2379 mddev->utime = ktime_get_real_seconds();
2380 2380
2381 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2381 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2382 force_change = 1; 2382 force_change = 1;
2383 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2383 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2384 /* just a clean<-> dirty transition, possibly leave spares alone, 2384 /* just a clean<-> dirty transition, possibly leave spares alone,
2385 * though if events isn't the right even/odd, we will have to do 2385 * though if events isn't the right even/odd, we will have to do
2386 * spares after all 2386 * spares after all
@@ -2472,14 +2472,14 @@ rewrite:
2472 } 2472 }
2473 if (md_super_wait(mddev) < 0) 2473 if (md_super_wait(mddev) < 0)
2474 goto rewrite; 2474 goto rewrite;
2475 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2475 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2476 2476
2477 if (mddev_is_clustered(mddev) && ret == 0) 2477 if (mddev_is_clustered(mddev) && ret == 0)
2478 md_cluster_ops->metadata_update_finish(mddev); 2478 md_cluster_ops->metadata_update_finish(mddev);
2479 2479
2480 if (mddev->in_sync != sync_req || 2480 if (mddev->in_sync != sync_req ||
2481 !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), 2481 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2482 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN))) 2482 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2483 /* have to write it out again */ 2483 /* have to write it out again */
2484 goto repeat; 2484 goto repeat;
2485 wake_up(&mddev->sb_wait); 2485 wake_up(&mddev->sb_wait);
@@ -2523,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
2523 } 2523 }
2524 sysfs_notify_dirent_safe(rdev->sysfs_state); 2524 sysfs_notify_dirent_safe(rdev->sysfs_state);
2525 2525
2526 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2526 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2527 if (mddev->degraded) 2527 if (mddev->degraded)
2528 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2528 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2529 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2529 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2640,7 +2640,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2640 if (err == 0) { 2640 if (err == 0) {
2641 md_kick_rdev_from_array(rdev); 2641 md_kick_rdev_from_array(rdev);
2642 if (mddev->pers) { 2642 if (mddev->pers) {
2643 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2643 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2644 md_wakeup_thread(mddev->thread); 2644 md_wakeup_thread(mddev->thread);
2645 } 2645 }
2646 md_new_event(mddev); 2646 md_new_event(mddev);
@@ -3651,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3651 } 3651 }
3652 blk_set_stacking_limits(&mddev->queue->limits); 3652 blk_set_stacking_limits(&mddev->queue->limits);
3653 pers->run(mddev); 3653 pers->run(mddev);
3654 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3654 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3655 mddev_resume(mddev); 3655 mddev_resume(mddev);
3656 if (!mddev->thread) 3656 if (!mddev->thread)
3657 md_update_sb(mddev, 1); 3657 md_update_sb(mddev, 1);
@@ -3846,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3846 if (!err) { 3846 if (!err) {
3847 mddev->recovery_cp = n; 3847 mddev->recovery_cp = n;
3848 if (mddev->pers) 3848 if (mddev->pers)
3849 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3849 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3850 } 3850 }
3851 mddev_unlock(mddev); 3851 mddev_unlock(mddev);
3852 return err ?: len; 3852 return err ?: len;
@@ -3920,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page)
3920 st = read_auto; 3920 st = read_auto;
3921 break; 3921 break;
3922 case 0: 3922 case 0:
3923 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3923 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
3924 st = write_pending; 3924 st = write_pending;
3925 else if (mddev->in_sync) 3925 else if (mddev->in_sync)
3926 st = clean; 3926 st = clean;
@@ -3958,7 +3958,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3958 spin_lock(&mddev->lock); 3958 spin_lock(&mddev->lock);
3959 if (st == active) { 3959 if (st == active) {
3960 restart_array(mddev); 3960 restart_array(mddev);
3961 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3961 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
3962 md_wakeup_thread(mddev->thread); 3962 md_wakeup_thread(mddev->thread);
3963 wake_up(&mddev->sb_wait); 3963 wake_up(&mddev->sb_wait);
3964 err = 0; 3964 err = 0;
@@ -3969,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3969 mddev->in_sync = 1; 3969 mddev->in_sync = 1;
3970 if (mddev->safemode == 1) 3970 if (mddev->safemode == 1)
3971 mddev->safemode = 0; 3971 mddev->safemode = 0;
3972 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3972 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3973 } 3973 }
3974 err = 0; 3974 err = 0;
3975 } else 3975 } else
@@ -4035,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
4035 mddev->in_sync = 1; 4035 mddev->in_sync = 1;
4036 if (mddev->safemode == 1) 4036 if (mddev->safemode == 1)
4037 mddev->safemode = 0; 4037 mddev->safemode = 0;
4038 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 4038 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4039 } 4039 }
4040 err = 0; 4040 err = 0;
4041 } else 4041 } else
@@ -4049,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
4049 err = restart_array(mddev); 4049 err = restart_array(mddev);
4050 if (err) 4050 if (err)
4051 break; 4051 break;
4052 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 4052 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4053 wake_up(&mddev->sb_wait); 4053 wake_up(&mddev->sb_wait);
4054 err = 0; 4054 err = 0;
4055 } else { 4055 } else {
@@ -5378,7 +5378,7 @@ int md_run(struct mddev *mddev)
5378 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5378 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5379 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5379 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5380 5380
5381 if (mddev->flags & MD_UPDATE_SB_FLAGS) 5381 if (mddev->sb_flags)
5382 md_update_sb(mddev, 0); 5382 md_update_sb(mddev, 0);
5383 5383
5384 md_new_event(mddev); 5384 md_new_event(mddev);
@@ -5473,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
5473 mddev->level = LEVEL_NONE; 5473 mddev->level = LEVEL_NONE;
5474 mddev->clevel[0] = 0; 5474 mddev->clevel[0] = 0;
5475 mddev->flags = 0; 5475 mddev->flags = 0;
5476 mddev->sb_flags = 0;
5476 mddev->ro = 0; 5477 mddev->ro = 0;
5477 mddev->metadata_type[0] = 0; 5478 mddev->metadata_type[0] = 0;
5478 mddev->chunk_sectors = 0; 5479 mddev->chunk_sectors = 0;
@@ -5525,7 +5526,7 @@ static void __md_stop_writes(struct mddev *mddev)
5525 5526
5526 if (mddev->ro == 0 && 5527 if (mddev->ro == 0 &&
5527 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 5528 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5528 (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5529 mddev->sb_flags)) {
5529 /* mark array as shutdown cleanly */ 5530 /* mark array as shutdown cleanly */
5530 if (!mddev_is_clustered(mddev)) 5531 if (!mddev_is_clustered(mddev))
5531 mddev->in_sync = 1; 5532 mddev->in_sync = 1;
@@ -5608,13 +5609,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5608 * which will now never happen */ 5609 * which will now never happen */
5609 wake_up_process(mddev->sync_thread->tsk); 5610 wake_up_process(mddev->sync_thread->tsk);
5610 5611
5611 if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) 5612 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
5612 return -EBUSY; 5613 return -EBUSY;
5613 mddev_unlock(mddev); 5614 mddev_unlock(mddev);
5614 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5615 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5615 &mddev->recovery)); 5616 &mddev->recovery));
5616 wait_event(mddev->sb_wait, 5617 wait_event(mddev->sb_wait,
5617 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5618 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
5618 mddev_lock_nointr(mddev); 5619 mddev_lock_nointr(mddev);
5619 5620
5620 mutex_lock(&mddev->open_mutex); 5621 mutex_lock(&mddev->open_mutex);
@@ -6234,7 +6235,7 @@ kick_rdev:
6234 md_cluster_ops->remove_disk(mddev, rdev); 6235 md_cluster_ops->remove_disk(mddev, rdev);
6235 6236
6236 md_kick_rdev_from_array(rdev); 6237 md_kick_rdev_from_array(rdev);
6237 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6238 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6238 if (mddev->thread) 6239 if (mddev->thread)
6239 md_wakeup_thread(mddev->thread); 6240 md_wakeup_thread(mddev->thread);
6240 else 6241 else
@@ -6303,7 +6304,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
6303 6304
6304 rdev->raid_disk = -1; 6305 rdev->raid_disk = -1;
6305 6306
6306 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6307 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6307 if (!mddev->thread) 6308 if (!mddev->thread)
6308 md_update_sb(mddev, 1); 6309 md_update_sb(mddev, 1);
6309 /* 6310 /*
@@ -6460,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6460 6461
6461 mddev->max_disks = MD_SB_DISKS; 6462 mddev->max_disks = MD_SB_DISKS;
6462 6463
6463 if (mddev->persistent) 6464 if (mddev->persistent) {
6464 mddev->flags = 0; 6465 mddev->flags = 0;
6465 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6466 mddev->sb_flags = 0;
6467 }
6468 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6466 6469
6467 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 6470 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6468 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 6471 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -7007,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
7007 /* If a device failed while we were read-only, we 7010 /* If a device failed while we were read-only, we
7008 * need to make sure the metadata is updated now. 7011 * need to make sure the metadata is updated now.
7009 */ 7012 */
7010 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 7013 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7011 mddev_unlock(mddev); 7014 mddev_unlock(mddev);
7012 wait_event(mddev->sb_wait, 7015 wait_event(mddev->sb_wait,
7013 !test_bit(MD_CHANGE_DEVS, &mddev->flags) && 7016 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7014 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7017 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7015 mddev_lock_nointr(mddev); 7018 mddev_lock_nointr(mddev);
7016 } 7019 }
7017 } else { 7020 } else {
@@ -7766,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
7766 spin_lock(&mddev->lock); 7769 spin_lock(&mddev->lock);
7767 if (mddev->in_sync) { 7770 if (mddev->in_sync) {
7768 mddev->in_sync = 0; 7771 mddev->in_sync = 0;
7769 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7772 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7770 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7773 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7771 md_wakeup_thread(mddev->thread); 7774 md_wakeup_thread(mddev->thread);
7772 did_change = 1; 7775 did_change = 1;
7773 } 7776 }
@@ -7776,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
7776 if (did_change) 7779 if (did_change)
7777 sysfs_notify_dirent_safe(mddev->sysfs_state); 7780 sysfs_notify_dirent_safe(mddev->sysfs_state);
7778 wait_event(mddev->sb_wait, 7781 wait_event(mddev->sb_wait,
7779 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7782 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7780} 7783}
7781EXPORT_SYMBOL(md_write_start); 7784EXPORT_SYMBOL(md_write_start);
7782 7785
@@ -7797,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
7797 * attempting a GFP_KERNEL allocation while holding the mddev lock. 7800 * attempting a GFP_KERNEL allocation while holding the mddev lock.
7798 * Must be called with mddev_lock held. 7801 * Must be called with mddev_lock held.
7799 * 7802 *
7800 * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock 7803 * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
7801 * is dropped, so return -EAGAIN after notifying userspace. 7804 * is dropped, so return -EAGAIN after notifying userspace.
7802 */ 7805 */
7803int md_allow_write(struct mddev *mddev) 7806int md_allow_write(struct mddev *mddev)
@@ -7812,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
7812 spin_lock(&mddev->lock); 7815 spin_lock(&mddev->lock);
7813 if (mddev->in_sync) { 7816 if (mddev->in_sync) {
7814 mddev->in_sync = 0; 7817 mddev->in_sync = 0;
7815 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7818 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7816 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7819 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7817 if (mddev->safemode_delay && 7820 if (mddev->safemode_delay &&
7818 mddev->safemode == 0) 7821 mddev->safemode == 0)
7819 mddev->safemode = 1; 7822 mddev->safemode = 1;
@@ -7823,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
7823 } else 7826 } else
7824 spin_unlock(&mddev->lock); 7827 spin_unlock(&mddev->lock);
7825 7828
7826 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 7829 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
7827 return -EAGAIN; 7830 return -EAGAIN;
7828 else 7831 else
7829 return 0; 7832 return 0;
@@ -8058,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
8058 j > mddev->recovery_cp) 8061 j > mddev->recovery_cp)
8059 mddev->recovery_cp = j; 8062 mddev->recovery_cp = j;
8060 update_time = jiffies; 8063 update_time = jiffies;
8061 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 8064 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8062 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8065 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8063 } 8066 }
8064 8067
@@ -8206,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
8206 /* set CHANGE_PENDING here since maybe another update is needed, 8209 /* set CHANGE_PENDING here since maybe another update is needed,
8207 * so other nodes are informed. It should be harmless for normal 8210 * so other nodes are informed. It should be harmless for normal
8208 * raid */ 8211 * raid */
8209 set_mask_bits(&mddev->flags, 0, 8212 set_mask_bits(&mddev->sb_flags, 0,
8210 BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS)); 8213 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8211 8214
8212 spin_lock(&mddev->lock); 8215 spin_lock(&mddev->lock);
8213 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8216 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8307,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
8307 if (!test_bit(Journal, &rdev->flags)) 8310 if (!test_bit(Journal, &rdev->flags))
8308 spares++; 8311 spares++;
8309 md_new_event(mddev); 8312 md_new_event(mddev);
8310 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8313 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8311 } 8314 }
8312 } 8315 }
8313no_add: 8316no_add:
8314 if (removed) 8317 if (removed)
8315 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8318 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8316 return spares; 8319 return spares;
8317} 8320}
8318 8321
@@ -8385,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
8385 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 8388 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8386 return; 8389 return;
8387 if ( ! ( 8390 if ( ! (
8388 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || 8391 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
8389 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 8392 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8390 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8393 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8391 test_bit(MD_RELOAD_SB, &mddev->flags) || 8394 test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8423,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
8423 md_reap_sync_thread(mddev); 8426 md_reap_sync_thread(mddev);
8424 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8427 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8425 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8428 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8426 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 8429 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8427 goto unlock; 8430 goto unlock;
8428 } 8431 }
8429 8432
@@ -8451,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
8451 mddev->recovery_cp == MaxSector) { 8454 mddev->recovery_cp == MaxSector) {
8452 mddev->in_sync = 1; 8455 mddev->in_sync = 1;
8453 did_change = 1; 8456 did_change = 1;
8454 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 8457 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8455 } 8458 }
8456 if (mddev->safemode == 1) 8459 if (mddev->safemode == 1)
8457 mddev->safemode = 0; 8460 mddev->safemode = 0;
@@ -8460,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
8460 sysfs_notify_dirent_safe(mddev->sysfs_state); 8463 sysfs_notify_dirent_safe(mddev->sysfs_state);
8461 } 8464 }
8462 8465
8463 if (mddev->flags & MD_UPDATE_SB_FLAGS) 8466 if (mddev->sb_flags)
8464 md_update_sb(mddev, 0); 8467 md_update_sb(mddev, 0);
8465 8468
8466 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8469 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8556,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
8556 if (mddev->pers->spare_active(mddev)) { 8559 if (mddev->pers->spare_active(mddev)) {
8557 sysfs_notify(&mddev->kobj, NULL, 8560 sysfs_notify(&mddev->kobj, NULL,
8558 "degraded"); 8561 "degraded");
8559 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8562 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8560 } 8563 }
8561 } 8564 }
8562 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8565 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8571,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
8571 rdev->saved_raid_disk = -1; 8574 rdev->saved_raid_disk = -1;
8572 8575
8573 md_update_sb(mddev, 1); 8576 md_update_sb(mddev, 1);
8574 /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can 8577 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
8575 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 8578 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8576 * clustered raid */ 8579 * clustered raid */
8577 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 8580 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8637,8 +8640,8 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8637 sysfs_notify(&rdev->kobj, NULL, 8640 sysfs_notify(&rdev->kobj, NULL,
8638 "unacknowledged_bad_blocks"); 8641 "unacknowledged_bad_blocks");
8639 sysfs_notify_dirent_safe(rdev->sysfs_state); 8642 sysfs_notify_dirent_safe(rdev->sysfs_state);
8640 set_mask_bits(&mddev->flags, 0, 8643 set_mask_bits(&mddev->sb_flags, 0,
8641 BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING)); 8644 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
8642 md_wakeup_thread(rdev->mddev->thread); 8645 md_wakeup_thread(rdev->mddev->thread);
8643 return 1; 8646 return 1;
8644 } else 8647 } else
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5c08f84101fa..e38936d05df1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -213,9 +213,6 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
213struct md_cluster_info; 213struct md_cluster_info;
214 214
215enum mddev_flags { 215enum mddev_flags {
216 MD_CHANGE_DEVS, /* Some device status has changed */
217 MD_CHANGE_CLEAN, /* transition to or from 'clean' */
218 MD_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
219 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ 216 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
220 MD_CLOSING, /* If set, we are closing the array, do not open 217 MD_CLOSING, /* If set, we are closing the array, do not open
221 * it then */ 218 * it then */
@@ -231,11 +228,15 @@ enum mddev_flags {
231 * supported as calls to md_error() will 228 * supported as calls to md_error() will
232 * never cause the array to become failed. 229 * never cause the array to become failed.
233 */ 230 */
234 MD_NEED_REWRITE, /* metadata write needs to be repeated */
235}; 231};
236#define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ 232
237 BIT(MD_CHANGE_CLEAN) | \ 233enum mddev_sb_flags {
238 BIT(MD_CHANGE_PENDING)) /* If these are set, md_update_sb needed */ 234 MD_SB_CHANGE_DEVS, /* Some device status has changed */
235 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
236 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
237 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
238};
239
239struct mddev { 240struct mddev {
240 void *private; 241 void *private;
241 struct md_personality *pers; 242 struct md_personality *pers;
@@ -243,6 +244,7 @@ struct mddev {
243 int md_minor; 244 int md_minor;
244 struct list_head disks; 245 struct list_head disks;
245 unsigned long flags; 246 unsigned long flags;
247 unsigned long sb_flags;
246 248
247 int suspended; 249 int suspended;
248 atomic_t active_io; 250 atomic_t active_io;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 589b80775d3f..9fa2d6c5d996 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -208,7 +208,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
208 spin_unlock_irqrestore(&conf->device_lock, flags); 208 spin_unlock_irqrestore(&conf->device_lock, flags);
209 } 209 }
210 set_bit(Faulty, &rdev->flags); 210 set_bit(Faulty, &rdev->flags);
211 set_bit(MD_CHANGE_DEVS, &mddev->flags); 211 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
212 pr_err("multipath: IO failure on %s, disabling IO path.\n" 212 pr_err("multipath: IO failure on %s, disabling IO path.\n"
213 "multipath: Operation continuing on %d IO paths.\n", 213 "multipath: Operation continuing on %d IO paths.\n",
214 bdevname(rdev->bdev, b), 214 bdevname(rdev->bdev, b),
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index efc2e744cfd3..a1f3fbed9100 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1517,8 +1517,8 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1517 * if recovery is running, make sure it aborts. 1517 * if recovery is running, make sure it aborts.
1518 */ 1518 */
1519 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1519 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1520 set_mask_bits(&mddev->flags, 0, 1520 set_mask_bits(&mddev->sb_flags, 0,
1521 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1521 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1522 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1522 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1523 "md/raid1:%s: Operation continuing on %d devices.\n", 1523 "md/raid1:%s: Operation continuing on %d devices.\n",
1524 mdname(mddev), bdevname(rdev->bdev, b), 1524 mdname(mddev), bdevname(rdev->bdev, b),
@@ -2464,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
2464 md_check_recovery(mddev); 2464 md_check_recovery(mddev);
2465 2465
2466 if (!list_empty_careful(&conf->bio_end_io_list) && 2466 if (!list_empty_careful(&conf->bio_end_io_list) &&
2467 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2467 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2468 LIST_HEAD(tmp); 2468 LIST_HEAD(tmp);
2469 spin_lock_irqsave(&conf->device_lock, flags); 2469 spin_lock_irqsave(&conf->device_lock, flags);
2470 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2470 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2471 while (!list_empty(&conf->bio_end_io_list)) { 2471 while (!list_empty(&conf->bio_end_io_list)) {
2472 list_move(conf->bio_end_io_list.prev, &tmp); 2472 list_move(conf->bio_end_io_list.prev, &tmp);
2473 conf->nr_queued--; 2473 conf->nr_queued--;
@@ -2521,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
2521 generic_make_request(r1_bio->bios[r1_bio->read_disk]); 2521 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2522 2522
2523 cond_resched(); 2523 cond_resched();
2524 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2524 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2525 md_check_recovery(mddev); 2525 md_check_recovery(mddev);
2526 } 2526 }
2527 blk_finish_plug(&plug); 2527 blk_finish_plug(&plug);
@@ -2724,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2724 min_bad, 0 2724 min_bad, 0
2725 ) && ok; 2725 ) && ok;
2726 } 2726 }
2727 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2727 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2728 *skipped = 1; 2728 *skipped = 1;
2729 put_buf(r1_bio); 2729 put_buf(r1_bio);
2730 2730
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 525ca9923707..ab5e86209322 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1138,12 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1138 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1138 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1139 /* Need to update reshape_position in metadata */ 1139 /* Need to update reshape_position in metadata */
1140 mddev->reshape_position = conf->reshape_progress; 1140 mddev->reshape_position = conf->reshape_progress;
1141 set_mask_bits(&mddev->flags, 0, 1141 set_mask_bits(&mddev->sb_flags, 0,
1142 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1142 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1143 md_wakeup_thread(mddev->thread); 1143 md_wakeup_thread(mddev->thread);
1144 raid10_log(conf->mddev, "wait reshape metadata"); 1144 raid10_log(conf->mddev, "wait reshape metadata");
1145 wait_event(mddev->sb_wait, 1145 wait_event(mddev->sb_wait,
1146 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 1146 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1147 1147
1148 conf->reshape_safe = mddev->reshape_position; 1148 conf->reshape_safe = mddev->reshape_position;
1149 } 1149 }
@@ -1652,8 +1652,8 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1652 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1652 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1653 set_bit(Blocked, &rdev->flags); 1653 set_bit(Blocked, &rdev->flags);
1654 set_bit(Faulty, &rdev->flags); 1654 set_bit(Faulty, &rdev->flags);
1655 set_mask_bits(&mddev->flags, 0, 1655 set_mask_bits(&mddev->sb_flags, 0,
1656 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1656 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1657 spin_unlock_irqrestore(&conf->device_lock, flags); 1657 spin_unlock_irqrestore(&conf->device_lock, flags);
1658 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" 1658 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1659 "md/raid10:%s: Operation continuing on %d devices.\n", 1659 "md/raid10:%s: Operation continuing on %d devices.\n",
@@ -2761,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
2761 md_check_recovery(mddev); 2761 md_check_recovery(mddev);
2762 2762
2763 if (!list_empty_careful(&conf->bio_end_io_list) && 2763 if (!list_empty_careful(&conf->bio_end_io_list) &&
2764 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2764 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2765 LIST_HEAD(tmp); 2765 LIST_HEAD(tmp);
2766 spin_lock_irqsave(&conf->device_lock, flags); 2766 spin_lock_irqsave(&conf->device_lock, flags);
2767 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 2767 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2768 while (!list_empty(&conf->bio_end_io_list)) { 2768 while (!list_empty(&conf->bio_end_io_list)) {
2769 list_move(conf->bio_end_io_list.prev, &tmp); 2769 list_move(conf->bio_end_io_list.prev, &tmp);
2770 conf->nr_queued--; 2770 conf->nr_queued--;
@@ -2822,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
2822 } 2822 }
2823 2823
2824 cond_resched(); 2824 cond_resched();
2825 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2825 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2826 md_check_recovery(mddev); 2826 md_check_recovery(mddev);
2827 } 2827 }
2828 blk_finish_plug(&plug); 2828 blk_finish_plug(&plug);
@@ -4209,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4209 spin_unlock_irq(&conf->device_lock); 4209 spin_unlock_irq(&conf->device_lock);
4210 mddev->raid_disks = conf->geo.raid_disks; 4210 mddev->raid_disks = conf->geo.raid_disks;
4211 mddev->reshape_position = conf->reshape_progress; 4211 mddev->reshape_position = conf->reshape_progress;
4212 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4212 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4213 4213
4214 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4214 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4215 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4215 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4404,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4404 else 4404 else
4405 mddev->curr_resync_completed = conf->reshape_progress; 4405 mddev->curr_resync_completed = conf->reshape_progress;
4406 conf->reshape_checkpoint = jiffies; 4406 conf->reshape_checkpoint = jiffies;
4407 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4407 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4408 md_wakeup_thread(mddev->thread); 4408 md_wakeup_thread(mddev->thread);
4409 wait_event(mddev->sb_wait, mddev->flags == 0 || 4409 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4410 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4410 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4411 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4411 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4412 allow_barrier(conf); 4412 allow_barrier(conf);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index de8a4ede0bc9..6d1a150eacd6 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1206,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
1206 * there is a deadlock. We workaround this issue with a trylock. 1206 * there is a deadlock. We workaround this issue with a trylock.
1207 * FIXME: we could miss discard if we can't take reconfig mutex 1207 * FIXME: we could miss discard if we can't take reconfig mutex
1208 */ 1208 */
1209 set_mask_bits(&mddev->flags, 0, 1209 set_mask_bits(&mddev->sb_flags, 0,
1210 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 1210 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1211 if (!mddev_trylock(mddev)) 1211 if (!mddev_trylock(mddev))
1212 return; 1212 return;
1213 md_update_sb(mddev, 1); 1213 md_update_sb(mddev, 1);
@@ -2197,7 +2197,7 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
2197 struct mddev *mddev = log->rdev->mddev; 2197 struct mddev *mddev = log->rdev->mddev;
2198 2198
2199 log->rdev->journal_tail = cp; 2199 log->rdev->journal_tail = cp;
2200 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2200 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2201} 2201}
2202 2202
2203static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) 2203static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3e6a2a0d61e9..d40e94d56410 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -961,7 +961,7 @@ again:
961 if (bad < 0) { 961 if (bad < 0) {
962 set_bit(BlockedBadBlocks, &rdev->flags); 962 set_bit(BlockedBadBlocks, &rdev->flags);
963 if (!conf->mddev->external && 963 if (!conf->mddev->external &&
964 conf->mddev->flags) { 964 conf->mddev->sb_flags) {
965 /* It is very unlikely, but we might 965 /* It is very unlikely, but we might
966 * still need to write out the 966 * still need to write out the
967 * bad block log - better give it 967 * bad block log - better give it
@@ -2547,8 +2547,8 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2547 2547
2548 set_bit(Blocked, &rdev->flags); 2548 set_bit(Blocked, &rdev->flags);
2549 set_bit(Faulty, &rdev->flags); 2549 set_bit(Faulty, &rdev->flags);
2550 set_mask_bits(&mddev->flags, 0, 2550 set_mask_bits(&mddev->sb_flags, 0,
2551 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 2551 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2552 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" 2552 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
2553 "md/raid:%s: Operation continuing on %d devices.\n", 2553 "md/raid:%s: Operation continuing on %d devices.\n",
2554 mdname(mddev), 2554 mdname(mddev),
@@ -4761,7 +4761,7 @@ finish:
4761 } 4761 }
4762 4762
4763 if (!bio_list_empty(&s.return_bi)) { 4763 if (!bio_list_empty(&s.return_bi)) {
4764 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { 4764 if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
4765 spin_lock_irq(&conf->device_lock); 4765 spin_lock_irq(&conf->device_lock);
4766 bio_list_merge(&conf->return_bi, &s.return_bi); 4766 bio_list_merge(&conf->return_bi, &s.return_bi);
4767 spin_unlock_irq(&conf->device_lock); 4767 spin_unlock_irq(&conf->device_lock);
@@ -5617,9 +5617,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
5617 mddev->reshape_position = conf->reshape_progress; 5617 mddev->reshape_position = conf->reshape_progress;
5618 mddev->curr_resync_completed = sector_nr; 5618 mddev->curr_resync_completed = sector_nr;
5619 conf->reshape_checkpoint = jiffies; 5619 conf->reshape_checkpoint = jiffies;
5620 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5620 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
5621 md_wakeup_thread(mddev->thread); 5621 md_wakeup_thread(mddev->thread);
5622 wait_event(mddev->sb_wait, mddev->flags == 0 || 5622 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
5623 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5623 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5624 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5624 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5625 return 0; 5625 return 0;
@@ -5715,10 +5715,10 @@ finish:
5715 mddev->reshape_position = conf->reshape_progress; 5715 mddev->reshape_position = conf->reshape_progress;
5716 mddev->curr_resync_completed = sector_nr; 5716 mddev->curr_resync_completed = sector_nr;
5717 conf->reshape_checkpoint = jiffies; 5717 conf->reshape_checkpoint = jiffies;
5718 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5718 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
5719 md_wakeup_thread(mddev->thread); 5719 md_wakeup_thread(mddev->thread);
5720 wait_event(mddev->sb_wait, 5720 wait_event(mddev->sb_wait,
5721 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 5721 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
5722 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5722 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5723 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5723 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5724 goto ret; 5724 goto ret;
@@ -5993,10 +5993,10 @@ static void raid5d(struct md_thread *thread)
5993 md_check_recovery(mddev); 5993 md_check_recovery(mddev);
5994 5994
5995 if (!bio_list_empty(&conf->return_bi) && 5995 if (!bio_list_empty(&conf->return_bi) &&
5996 !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 5996 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
5997 struct bio_list tmp = BIO_EMPTY_LIST; 5997 struct bio_list tmp = BIO_EMPTY_LIST;
5998 spin_lock_irq(&conf->device_lock); 5998 spin_lock_irq(&conf->device_lock);
5999 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { 5999 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
6000 bio_list_merge(&tmp, &conf->return_bi); 6000 bio_list_merge(&tmp, &conf->return_bi);
6001 bio_list_init(&conf->return_bi); 6001 bio_list_init(&conf->return_bi);
6002 } 6002 }
@@ -6043,7 +6043,7 @@ static void raid5d(struct md_thread *thread)
6043 break; 6043 break;
6044 handled += batch_size; 6044 handled += batch_size;
6045 6045
6046 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 6046 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
6047 spin_unlock_irq(&conf->device_lock); 6047 spin_unlock_irq(&conf->device_lock);
6048 md_check_recovery(mddev); 6048 md_check_recovery(mddev);
6049 spin_lock_irq(&conf->device_lock); 6049 spin_lock_irq(&conf->device_lock);
@@ -7640,7 +7640,7 @@ static int raid5_start_reshape(struct mddev *mddev)
7640 } 7640 }
7641 mddev->raid_disks = conf->raid_disks; 7641 mddev->raid_disks = conf->raid_disks;
7642 mddev->reshape_position = conf->reshape_progress; 7642 mddev->reshape_position = conf->reshape_progress;
7643 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7643 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7644 7644
7645 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7645 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7646 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7646 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7906,7 +7906,7 @@ static int raid5_check_reshape(struct mddev *mddev)
7906 conf->chunk_sectors = new_chunk ; 7906 conf->chunk_sectors = new_chunk ;
7907 mddev->chunk_sectors = new_chunk; 7907 mddev->chunk_sectors = new_chunk;
7908 } 7908 }
7909 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7909 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7910 md_wakeup_thread(mddev->thread); 7910 md_wakeup_thread(mddev->thread);
7911 } 7911 }
7912 return check_reshape(mddev); 7912 return check_reshape(mddev);