aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-14 15:13:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-14 15:13:05 -0500
commit8fd9589ced9a4ab1cf23296fa1c17d07e883f734 (patch)
tree48f284a1ded8e4a04719c838092dd00a7ad2eaf8 /drivers/md
parent536e89ee53e9cbdec00e49ae1888bffa262043d8 (diff)
parentf851b60db0fd83a10034c5cc9d9e58c758457b1c (diff)
Merge tag 'md/3.19' of git://neil.brown.name/md
Pull md updates from Neil Brown: "Three fixes for md. I did have a largish set of locking changes queued, but late testing showed they weren't quite as stable as I thought and while I fixed what I found, I decided it safer to delay them a release ... particularly as I'll be AFK for a few weeks. So expect a larger batch next time :-)" * tag 'md/3.19' of git://neil.brown.name/md: md: Check MD_RECOVERY_RUNNING as well as ->sync_thread. md: fix semicolon.cocci warnings md/raid5: fetch_block must fetch all the blocks handle_stripe_dirtying wants.
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/md.c38
-rw-r--r--drivers/md/raid5.c7
2 files changed, 32 insertions, 13 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 056ccd28c037..709755fb6d7b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2691,7 +2691,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
2691 if (kstrtoull(buf, 10, &new_offset) < 0) 2691 if (kstrtoull(buf, 10, &new_offset) < 0)
2692 return -EINVAL; 2692 return -EINVAL;
2693 2693
2694 if (mddev->sync_thread) 2694 if (mddev->sync_thread ||
2695 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2695 return -EBUSY; 2696 return -EBUSY;
2696 if (new_offset == rdev->data_offset) 2697 if (new_offset == rdev->data_offset)
2697 /* reset is always permitted */ 2698 /* reset is always permitted */
@@ -3268,6 +3269,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3268 */ 3269 */
3269 3270
3270 if (mddev->sync_thread || 3271 if (mddev->sync_thread ||
3272 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3271 mddev->reshape_position != MaxSector || 3273 mddev->reshape_position != MaxSector ||
3272 mddev->sysfs_active) 3274 mddev->sysfs_active)
3273 return -EBUSY; 3275 return -EBUSY;
@@ -4022,6 +4024,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4022 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4024 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4023 4025
4024 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4026 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4027 flush_workqueue(md_misc_wq);
4025 if (mddev->sync_thread) { 4028 if (mddev->sync_thread) {
4026 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4029 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4027 md_reap_sync_thread(mddev); 4030 md_reap_sync_thread(mddev);
@@ -5040,6 +5043,7 @@ static void md_clean(struct mddev *mddev)
5040static void __md_stop_writes(struct mddev *mddev) 5043static void __md_stop_writes(struct mddev *mddev)
5041{ 5044{
5042 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5045 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5046 flush_workqueue(md_misc_wq);
5043 if (mddev->sync_thread) { 5047 if (mddev->sync_thread) {
5044 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5048 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5045 md_reap_sync_thread(mddev); 5049 md_reap_sync_thread(mddev);
@@ -5100,19 +5104,22 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5100 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5104 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5101 md_wakeup_thread(mddev->thread); 5105 md_wakeup_thread(mddev->thread);
5102 } 5106 }
5103 if (mddev->sync_thread) { 5107 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5104 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5108 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5109 if (mddev->sync_thread)
5105 /* Thread might be blocked waiting for metadata update 5110 /* Thread might be blocked waiting for metadata update
5106 * which will now never happen */ 5111 * which will now never happen */
5107 wake_up_process(mddev->sync_thread->tsk); 5112 wake_up_process(mddev->sync_thread->tsk);
5108 } 5113
5109 mddev_unlock(mddev); 5114 mddev_unlock(mddev);
5110 wait_event(resync_wait, mddev->sync_thread == NULL); 5115 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5116 &mddev->recovery));
5111 mddev_lock_nointr(mddev); 5117 mddev_lock_nointr(mddev);
5112 5118
5113 mutex_lock(&mddev->open_mutex); 5119 mutex_lock(&mddev->open_mutex);
5114 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5120 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5115 mddev->sync_thread || 5121 mddev->sync_thread ||
5122 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5116 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5123 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5117 printk("md: %s still in use.\n",mdname(mddev)); 5124 printk("md: %s still in use.\n",mdname(mddev));
5118 if (did_freeze) { 5125 if (did_freeze) {
@@ -5158,20 +5165,24 @@ static int do_md_stop(struct mddev *mddev, int mode,
5158 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5165 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5159 md_wakeup_thread(mddev->thread); 5166 md_wakeup_thread(mddev->thread);
5160 } 5167 }
5161 if (mddev->sync_thread) { 5168 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5162 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5169 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5170 if (mddev->sync_thread)
5163 /* Thread might be blocked waiting for metadata update 5171 /* Thread might be blocked waiting for metadata update
5164 * which will now never happen */ 5172 * which will now never happen */
5165 wake_up_process(mddev->sync_thread->tsk); 5173 wake_up_process(mddev->sync_thread->tsk);
5166 } 5174
5167 mddev_unlock(mddev); 5175 mddev_unlock(mddev);
5168 wait_event(resync_wait, mddev->sync_thread == NULL); 5176 wait_event(resync_wait, (mddev->sync_thread == NULL &&
5177 !test_bit(MD_RECOVERY_RUNNING,
5178 &mddev->recovery)));
5169 mddev_lock_nointr(mddev); 5179 mddev_lock_nointr(mddev);
5170 5180
5171 mutex_lock(&mddev->open_mutex); 5181 mutex_lock(&mddev->open_mutex);
5172 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 5182 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5173 mddev->sysfs_active || 5183 mddev->sysfs_active ||
5174 mddev->sync_thread || 5184 mddev->sync_thread ||
5185 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5175 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { 5186 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5176 printk("md: %s still in use.\n",mdname(mddev)); 5187 printk("md: %s still in use.\n",mdname(mddev));
5177 mutex_unlock(&mddev->open_mutex); 5188 mutex_unlock(&mddev->open_mutex);
@@ -5946,7 +5957,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
5946 * of each device. If num_sectors is zero, we find the largest size 5957 * of each device. If num_sectors is zero, we find the largest size
5947 * that fits. 5958 * that fits.
5948 */ 5959 */
5949 if (mddev->sync_thread) 5960 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5961 mddev->sync_thread)
5950 return -EBUSY; 5962 return -EBUSY;
5951 if (mddev->ro) 5963 if (mddev->ro)
5952 return -EROFS; 5964 return -EROFS;
@@ -5977,7 +5989,9 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
5977 if (raid_disks <= 0 || 5989 if (raid_disks <= 0 ||
5978 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5990 (mddev->max_disks && raid_disks >= mddev->max_disks))
5979 return -EINVAL; 5991 return -EINVAL;
5980 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5992 if (mddev->sync_thread ||
5993 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5994 mddev->reshape_position != MaxSector)
5981 return -EBUSY; 5995 return -EBUSY;
5982 5996
5983 rdev_for_each(rdev, mddev) { 5997 rdev_for_each(rdev, mddev) {
@@ -6965,7 +6979,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6965 int mask; 6979 int mask;
6966 6980
6967 if (md_unloading) 6981 if (md_unloading)
6968 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;; 6982 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
6969 poll_wait(filp, &md_event_waiters, wait); 6983 poll_wait(filp, &md_event_waiters, wait);
6970 6984
6971 /* always allow read */ 6985 /* always allow read */
@@ -7589,6 +7603,7 @@ static void md_start_sync(struct work_struct *ws)
7589 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7603 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7590 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7604 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7591 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7605 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7606 wake_up(&resync_wait);
7592 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7607 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7593 &mddev->recovery)) 7608 &mddev->recovery))
7594 if (mddev->sysfs_action) 7609 if (mddev->sysfs_action)
@@ -7757,6 +7772,7 @@ void md_check_recovery(struct mddev *mddev)
7757 not_running: 7772 not_running:
7758 if (!mddev->sync_thread) { 7773 if (!mddev->sync_thread) {
7759 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7774 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7775 wake_up(&resync_wait);
7760 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7776 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7761 &mddev->recovery)) 7777 &mddev->recovery))
7762 if (mddev->sysfs_action) 7778 if (mddev->sysfs_action)
@@ -7775,7 +7791,6 @@ void md_reap_sync_thread(struct mddev *mddev)
7775 7791
7776 /* resync has finished, collect result */ 7792 /* resync has finished, collect result */
7777 md_unregister_thread(&mddev->sync_thread); 7793 md_unregister_thread(&mddev->sync_thread);
7778 wake_up(&resync_wait);
7779 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7794 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7780 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7795 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7781 /* success...*/ 7796 /* success...*/
@@ -7803,6 +7818,7 @@ void md_reap_sync_thread(struct mddev *mddev)
7803 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7818 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7804 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7819 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7805 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7820 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7821 wake_up(&resync_wait);
7806 /* flag recovery needed just to double check */ 7822 /* flag recovery needed just to double check */
7807 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7823 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7808 sysfs_notify_dirent_safe(mddev->sysfs_action); 7824 sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9c66e5997fc8..c1b0d52bfcb0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2917,8 +2917,11 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2917 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && 2917 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2918 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && 2918 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
2919 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2919 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2920 (sh->raid_conf->level == 6 && s->failed && s->to_write && 2920 ((sh->raid_conf->level == 6 ||
2921 s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 && 2921 sh->sector >= sh->raid_conf->mddev->recovery_cp)
2922 && s->failed && s->to_write &&
2923 (s->to_write - s->non_overwrite <
2924 sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) &&
2922 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { 2925 (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
2923 /* we would like to get this block, possibly by computing it, 2926 /* we would like to get this block, possibly by computing it,
2924 * otherwise read it if the backing disk is insync 2927 * otherwise read it if the backing disk is insync