diff options
author | NeilBrown <neilb@suse.de> | 2014-12-14 20:56:59 -0500 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2015-02-05 17:32:55 -0500 |
commit | 7b1485bab9c49b0d3811d72beb0de60c7b8b337d (patch) | |
tree | 7cb275a901ca30331d6c53be02be58bd578cb2b8 /drivers/md/raid5.c | |
parent | f97fcad38f2ecf2e34b6f0ab93f74f2978dbe008 (diff) |
md/raid5: use ->lock to protect accessing raid5 sysfs attributes.
It is important that mddev->private isn't freed while
a sysfs attribute function is accessing it.
So use mddev->lock to protect the setting of ->private to NULL, and
take that lock when checking ->private for NULL and de-referencing it
in the sysfs access functions.
This only applies to the read ('show') side of access. Write
access will be handled separately.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 44 |
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dab908b2aa9a..d5b80174b3b3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5354,11 +5354,14 @@ static void raid5d(struct md_thread *thread) | |||
5354 | static ssize_t | 5354 | static ssize_t |
5355 | raid5_show_stripe_cache_size(struct mddev *mddev, char *page) | 5355 | raid5_show_stripe_cache_size(struct mddev *mddev, char *page) |
5356 | { | 5356 | { |
5357 | struct r5conf *conf = mddev->private; | 5357 | struct r5conf *conf; |
5358 | int ret = 0; | ||
5359 | spin_lock(&mddev->lock); | ||
5360 | conf = mddev->private; | ||
5358 | if (conf) | 5361 | if (conf) |
5359 | return sprintf(page, "%d\n", conf->max_nr_stripes); | 5362 | ret = sprintf(page, "%d\n", conf->max_nr_stripes); |
5360 | else | 5363 | spin_unlock(&mddev->lock); |
5361 | return 0; | 5364 | return ret; |
5362 | } | 5365 | } |
5363 | 5366 | ||
5364 | int | 5367 | int |
@@ -5422,11 +5425,14 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, | |||
5422 | static ssize_t | 5425 | static ssize_t |
5423 | raid5_show_preread_threshold(struct mddev *mddev, char *page) | 5426 | raid5_show_preread_threshold(struct mddev *mddev, char *page) |
5424 | { | 5427 | { |
5425 | struct r5conf *conf = mddev->private; | 5428 | struct r5conf *conf; |
5429 | int ret = 0; | ||
5430 | spin_lock(&mddev->lock); | ||
5431 | conf = mddev->private; | ||
5426 | if (conf) | 5432 | if (conf) |
5427 | return sprintf(page, "%d\n", conf->bypass_threshold); | 5433 | ret = sprintf(page, "%d\n", conf->bypass_threshold); |
5428 | else | 5434 | spin_unlock(&mddev->lock); |
5429 | return 0; | 5435 | return ret; |
5430 | } | 5436 | } |
5431 | 5437 | ||
5432 | static ssize_t | 5438 | static ssize_t |
@@ -5456,11 +5462,14 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, | |||
5456 | static ssize_t | 5462 | static ssize_t |
5457 | raid5_show_skip_copy(struct mddev *mddev, char *page) | 5463 | raid5_show_skip_copy(struct mddev *mddev, char *page) |
5458 | { | 5464 | { |
5459 | struct r5conf *conf = mddev->private; | 5465 | struct r5conf *conf; |
5466 | int ret = 0; | ||
5467 | spin_lock(&mddev->lock); | ||
5468 | conf = mddev->private; | ||
5460 | if (conf) | 5469 | if (conf) |
5461 | return sprintf(page, "%d\n", conf->skip_copy); | 5470 | ret = sprintf(page, "%d\n", conf->skip_copy); |
5462 | else | 5471 | spin_unlock(&mddev->lock); |
5463 | return 0; | 5472 | return ret; |
5464 | } | 5473 | } |
5465 | 5474 | ||
5466 | static ssize_t | 5475 | static ssize_t |
@@ -5512,11 +5521,14 @@ raid5_stripecache_active = __ATTR_RO(stripe_cache_active); | |||
5512 | static ssize_t | 5521 | static ssize_t |
5513 | raid5_show_group_thread_cnt(struct mddev *mddev, char *page) | 5522 | raid5_show_group_thread_cnt(struct mddev *mddev, char *page) |
5514 | { | 5523 | { |
5515 | struct r5conf *conf = mddev->private; | 5524 | struct r5conf *conf; |
5525 | int ret = 0; | ||
5526 | spin_lock(&mddev->lock); | ||
5527 | conf = mddev->private; | ||
5516 | if (conf) | 5528 | if (conf) |
5517 | return sprintf(page, "%d\n", conf->worker_cnt_per_group); | 5529 | ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); |
5518 | else | 5530 | spin_unlock(&mddev->lock); |
5519 | return 0; | 5531 | return ret; |
5520 | } | 5532 | } |
5521 | 5533 | ||
5522 | static int alloc_thread_groups(struct r5conf *conf, int cnt, | 5534 | static int alloc_thread_groups(struct r5conf *conf, int cnt, |