aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:56:58 -0500
committerNeilBrown <neilb@suse.de>2015-02-03 16:35:53 -0500
commit36d091f4759d194c99f0705d412afe208622b45a (patch)
treef76abdd702316a37df60f6522adb3f6a3b230741
parentdb721d32b74b51a5ac9ec9fab1d85cba90dbdbd3 (diff)
md: protect ->pers changes with mddev->lock
->pers is already protected by ->reconfig_mutex, and cannot possibly change when there are threads running or outstanding IO. However there are some places where we access ->pers not in a thread or IO context, and where ->reconfig_mutex is unnecessarily heavy-weight: level_show and md_seq_show(). So protect all changes, and those accesses, with ->lock. This is a step toward taking those accesses out from under reconfig_mutex. [Fixed missing "mddev->pers" -> "pers" conversion, thanks to Dan Carpenter <dan.carpenter@oracle.com>] Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/md.c55
-rw-r--r--drivers/md/md.h1
2 files changed, 35 insertions, 21 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 58f531f8dcc2..4db4e4146a35 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3264,15 +3264,20 @@ __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3264static ssize_t 3264static ssize_t
3265level_show(struct mddev *mddev, char *page) 3265level_show(struct mddev *mddev, char *page)
3266{ 3266{
3267 struct md_personality *p = mddev->pers; 3267 struct md_personality *p;
3268 int ret;
3269 spin_lock(&mddev->lock);
3270 p = mddev->pers;
3268 if (p) 3271 if (p)
3269 return sprintf(page, "%s\n", p->name); 3272 ret = sprintf(page, "%s\n", p->name);
3270 else if (mddev->clevel[0]) 3273 else if (mddev->clevel[0])
3271 return sprintf(page, "%s\n", mddev->clevel); 3274 ret = sprintf(page, "%s\n", mddev->clevel);
3272 else if (mddev->level != LEVEL_NONE) 3275 else if (mddev->level != LEVEL_NONE)
3273 return sprintf(page, "%d\n", mddev->level); 3276 ret = sprintf(page, "%d\n", mddev->level);
3274 else 3277 else
3275 return 0; 3278 ret = 0;
3279 spin_unlock(&mddev->lock);
3280 return ret;
3276} 3281}
3277 3282
3278static ssize_t 3283static ssize_t
@@ -3374,6 +3379,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3374 /* Looks like we have a winner */ 3379 /* Looks like we have a winner */
3375 mddev_suspend(mddev); 3380 mddev_suspend(mddev);
3376 mddev_detach(mddev); 3381 mddev_detach(mddev);
3382
3383 spin_lock(&mddev->lock);
3377 oldpers = mddev->pers; 3384 oldpers = mddev->pers;
3378 oldpriv = mddev->private; 3385 oldpriv = mddev->private;
3379 mddev->pers = pers; 3386 mddev->pers = pers;
@@ -3385,6 +3392,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
3385 mddev->delta_disks = 0; 3392 mddev->delta_disks = 0;
3386 mddev->reshape_backwards = 0; 3393 mddev->reshape_backwards = 0;
3387 mddev->degraded = 0; 3394 mddev->degraded = 0;
3395 spin_unlock(&mddev->lock);
3388 3396
3389 if (oldpers->sync_request == NULL && 3397 if (oldpers->sync_request == NULL &&
3390 mddev->external) { 3398 mddev->external) {
@@ -4866,7 +4874,6 @@ int md_run(struct mddev *mddev)
4866 mddev->clevel); 4874 mddev->clevel);
4867 return -EINVAL; 4875 return -EINVAL;
4868 } 4876 }
4869 mddev->pers = pers;
4870 spin_unlock(&pers_lock); 4877 spin_unlock(&pers_lock);
4871 if (mddev->level != pers->level) { 4878 if (mddev->level != pers->level) {
4872 mddev->level = pers->level; 4879 mddev->level = pers->level;
@@ -4877,7 +4884,6 @@ int md_run(struct mddev *mddev)
4877 if (mddev->reshape_position != MaxSector && 4884 if (mddev->reshape_position != MaxSector &&
4878 pers->start_reshape == NULL) { 4885 pers->start_reshape == NULL) {
4879 /* This personality cannot handle reshaping... */ 4886 /* This personality cannot handle reshaping... */
4880 mddev->pers = NULL;
4881 module_put(pers->owner); 4887 module_put(pers->owner);
4882 return -EINVAL; 4888 return -EINVAL;
4883 } 4889 }
@@ -4921,19 +4927,19 @@ int md_run(struct mddev *mddev)
4921 if (start_readonly && mddev->ro == 0) 4927 if (start_readonly && mddev->ro == 0)
4922 mddev->ro = 2; /* read-only, but switch on first write */ 4928 mddev->ro = 2; /* read-only, but switch on first write */
4923 4929
4924 err = mddev->pers->run(mddev); 4930 err = pers->run(mddev);
4925 if (err) 4931 if (err)
4926 printk(KERN_ERR "md: pers->run() failed ...\n"); 4932 printk(KERN_ERR "md: pers->run() failed ...\n");
4927 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4933 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
4928 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4934 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4929 " but 'external_size' not in effect?\n", __func__); 4935 " but 'external_size' not in effect?\n", __func__);
4930 printk(KERN_ERR 4936 printk(KERN_ERR
4931 "md: invalid array_size %llu > default size %llu\n", 4937 "md: invalid array_size %llu > default size %llu\n",
4932 (unsigned long long)mddev->array_sectors / 2, 4938 (unsigned long long)mddev->array_sectors / 2,
4933 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4939 (unsigned long long)pers->size(mddev, 0, 0) / 2);
4934 err = -EINVAL; 4940 err = -EINVAL;
4935 } 4941 }
4936 if (err == 0 && mddev->pers->sync_request && 4942 if (err == 0 && pers->sync_request &&
4937 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 4943 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
4938 err = bitmap_create(mddev); 4944 err = bitmap_create(mddev);
4939 if (err) 4945 if (err)
@@ -4942,9 +4948,8 @@ int md_run(struct mddev *mddev)
4942 } 4948 }
4943 if (err) { 4949 if (err) {
4944 mddev_detach(mddev); 4950 mddev_detach(mddev);
4945 mddev->pers->free(mddev, mddev->private); 4951 pers->free(mddev, mddev->private);
4946 module_put(mddev->pers->owner); 4952 module_put(pers->owner);
4947 mddev->pers = NULL;
4948 bitmap_destroy(mddev); 4953 bitmap_destroy(mddev);
4949 return err; 4954 return err;
4950 } 4955 }
@@ -4953,7 +4958,7 @@ int md_run(struct mddev *mddev)
4953 mddev->queue->backing_dev_info.congested_fn = md_congested; 4958 mddev->queue->backing_dev_info.congested_fn = md_congested;
4954 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec); 4959 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
4955 } 4960 }
4956 if (mddev->pers->sync_request) { 4961 if (pers->sync_request) {
4957 if (mddev->kobj.sd && 4962 if (mddev->kobj.sd &&
4958 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4963 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4959 printk(KERN_WARNING 4964 printk(KERN_WARNING
@@ -4972,7 +4977,10 @@ int md_run(struct mddev *mddev)
4972 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4977 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4973 mddev->in_sync = 1; 4978 mddev->in_sync = 1;
4974 smp_wmb(); 4979 smp_wmb();
4980 spin_lock(&mddev->lock);
4981 mddev->pers = pers;
4975 mddev->ready = 1; 4982 mddev->ready = 1;
4983 spin_unlock(&mddev->lock);
4976 rdev_for_each(rdev, mddev) 4984 rdev_for_each(rdev, mddev)
4977 if (rdev->raid_disk >= 0) 4985 if (rdev->raid_disk >= 0)
4978 if (sysfs_link_rdev(mddev, rdev)) 4986 if (sysfs_link_rdev(mddev, rdev))
@@ -5126,7 +5134,7 @@ static void mddev_detach(struct mddev *mddev)
5126 wait_event(bitmap->behind_wait, 5134 wait_event(bitmap->behind_wait,
5127 atomic_read(&bitmap->behind_writes) == 0); 5135 atomic_read(&bitmap->behind_writes) == 0);
5128 } 5136 }
5129 if (mddev->pers->quiesce) { 5137 if (mddev->pers && mddev->pers->quiesce) {
5130 mddev->pers->quiesce(mddev, 1); 5138 mddev->pers->quiesce(mddev, 1);
5131 mddev->pers->quiesce(mddev, 0); 5139 mddev->pers->quiesce(mddev, 0);
5132 } 5140 }
@@ -5137,13 +5145,16 @@ static void mddev_detach(struct mddev *mddev)
5137 5145
5138static void __md_stop(struct mddev *mddev) 5146static void __md_stop(struct mddev *mddev)
5139{ 5147{
5140 mddev->ready = 0; 5148 struct md_personality *pers = mddev->pers;
5141 mddev_detach(mddev); 5149 mddev_detach(mddev);
5142 mddev->pers->free(mddev, mddev->private); 5150 spin_lock(&mddev->lock);
5143 if (mddev->pers->sync_request && mddev->to_remove == NULL) 5151 mddev->ready = 0;
5144 mddev->to_remove = &md_redundancy_group;
5145 module_put(mddev->pers->owner);
5146 mddev->pers = NULL; 5152 mddev->pers = NULL;
5153 spin_unlock(&mddev->lock);
5154 pers->free(mddev, mddev->private);
5155 if (pers->sync_request && mddev->to_remove == NULL)
5156 mddev->to_remove = &md_redundancy_group;
5157 module_put(pers->owner);
5147 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5158 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5148} 5159}
5149 5160
@@ -6942,6 +6953,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
6942 if (mddev_lock(mddev) < 0) 6953 if (mddev_lock(mddev) < 0)
6943 return -EINTR; 6954 return -EINTR;
6944 6955
6956 spin_lock(&mddev->lock);
6945 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6957 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6946 seq_printf(seq, "%s : %sactive", mdname(mddev), 6958 seq_printf(seq, "%s : %sactive", mdname(mddev),
6947 mddev->pers ? "" : "in"); 6959 mddev->pers ? "" : "in");
@@ -7012,6 +7024,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
7012 7024
7013 seq_printf(seq, "\n"); 7025 seq_printf(seq, "\n");
7014 } 7026 }
7027 spin_unlock(&mddev->lock);
7015 mddev_unlock(mddev); 7028 mddev_unlock(mddev);
7016 7029
7017 return 0; 7030 return 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 37e7c17e56a6..e41559dccdc9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -391,6 +391,7 @@ struct mddev {
391 * rdev superblocks, events 391 * rdev superblocks, events
392 * clearing MD_CHANGE_* 392 * clearing MD_CHANGE_*
393 * in_sync - and related safemode and MD_CHANGE changes 393 * in_sync - and related safemode and MD_CHANGE changes
394 * pers (also protected by reconfig_mutex and pending IO).
394 */ 395 */
395 spinlock_t lock; 396 spinlock_t lock;
396 wait_queue_head_t sb_wait; /* for waiting on superblock updates */ 397 wait_queue_head_t sb_wait; /* for waiting on superblock updates */