aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:57:01 -0500
committerNeilBrown <neilb@suse.de>2015-02-05 17:32:56 -0500
commit6791875e2e5393845b9c781d2998481089735134 (patch)
treea89ef0e3fbd3de6ff0bfe6bf3496f281853ab08c /drivers/md/raid5.c
parent5c47daf6e76f657d961a96d89f6419fde8eda557 (diff)
md: make reconfig_mutex optional for writes to md sysfs files.
Rather than using mddev_lock() to take the reconfig_mutex when writing to any md sysfs file, we only take mddev_lock() in the particular _store() functions that require it. Admittedly this is most, but it isn't all. This also allows us to remove special-case handling for new_dev_store (in md_attr_store). Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c134
1 files changed, 76 insertions, 58 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d5b80174b3b3..aa76865b804b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5400,21 +5400,25 @@ EXPORT_SYMBOL(raid5_set_cache_size);
5400static ssize_t 5400static ssize_t
5401raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 5401raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
5402{ 5402{
5403 struct r5conf *conf = mddev->private; 5403 struct r5conf *conf;
5404 unsigned long new; 5404 unsigned long new;
5405 int err; 5405 int err;
5406 5406
5407 if (len >= PAGE_SIZE) 5407 if (len >= PAGE_SIZE)
5408 return -EINVAL; 5408 return -EINVAL;
5409 if (!conf)
5410 return -ENODEV;
5411
5412 if (kstrtoul(page, 10, &new)) 5409 if (kstrtoul(page, 10, &new))
5413 return -EINVAL; 5410 return -EINVAL;
5414 err = raid5_set_cache_size(mddev, new); 5411 err = mddev_lock(mddev);
5415 if (err) 5412 if (err)
5416 return err; 5413 return err;
5417 return len; 5414 conf = mddev->private;
5415 if (!conf)
5416 err = -ENODEV;
5417 else
5418 err = raid5_set_cache_size(mddev, new);
5419 mddev_unlock(mddev);
5420
5421 return err ?: len;
5418} 5422}
5419 5423
5420static struct md_sysfs_entry 5424static struct md_sysfs_entry
@@ -5438,19 +5442,27 @@ raid5_show_preread_threshold(struct mddev *mddev, char *page)
5438static ssize_t 5442static ssize_t
5439raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 5443raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
5440{ 5444{
5441 struct r5conf *conf = mddev->private; 5445 struct r5conf *conf;
5442 unsigned long new; 5446 unsigned long new;
5447 int err;
5448
5443 if (len >= PAGE_SIZE) 5449 if (len >= PAGE_SIZE)
5444 return -EINVAL; 5450 return -EINVAL;
5445 if (!conf)
5446 return -ENODEV;
5447
5448 if (kstrtoul(page, 10, &new)) 5451 if (kstrtoul(page, 10, &new))
5449 return -EINVAL; 5452 return -EINVAL;
5450 if (new > conf->max_nr_stripes) 5453
5451 return -EINVAL; 5454 err = mddev_lock(mddev);
5452 conf->bypass_threshold = new; 5455 if (err)
5453 return len; 5456 return err;
5457 conf = mddev->private;
5458 if (!conf)
5459 err = -ENODEV;
5460 else if (new > conf->max_nr_stripes)
5461 err = -EINVAL;
5462 else
5463 conf->bypass_threshold = new;
5464 mddev_unlock(mddev);
5465 return err ?: len;
5454} 5466}
5455 5467
5456static struct md_sysfs_entry 5468static struct md_sysfs_entry
@@ -5475,29 +5487,35 @@ raid5_show_skip_copy(struct mddev *mddev, char *page)
5475static ssize_t 5487static ssize_t
5476raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 5488raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
5477{ 5489{
5478 struct r5conf *conf = mddev->private; 5490 struct r5conf *conf;
5479 unsigned long new; 5491 unsigned long new;
5492 int err;
5493
5480 if (len >= PAGE_SIZE) 5494 if (len >= PAGE_SIZE)
5481 return -EINVAL; 5495 return -EINVAL;
5482 if (!conf)
5483 return -ENODEV;
5484
5485 if (kstrtoul(page, 10, &new)) 5496 if (kstrtoul(page, 10, &new))
5486 return -EINVAL; 5497 return -EINVAL;
5487 new = !!new; 5498 new = !!new;
5488 if (new == conf->skip_copy)
5489 return len;
5490 5499
5491 mddev_suspend(mddev); 5500 err = mddev_lock(mddev);
5492 conf->skip_copy = new; 5501 if (err)
5493 if (new) 5502 return err;
5494 mddev->queue->backing_dev_info.capabilities |= 5503 conf = mddev->private;
5495 BDI_CAP_STABLE_WRITES; 5504 if (!conf)
5496 else 5505 err = -ENODEV;
5497 mddev->queue->backing_dev_info.capabilities &= 5506 else if (new != conf->skip_copy) {
5498 ~BDI_CAP_STABLE_WRITES; 5507 mddev_suspend(mddev);
5499 mddev_resume(mddev); 5508 conf->skip_copy = new;
5500 return len; 5509 if (new)
5510 mddev->queue->backing_dev_info.capabilities |=
5511 BDI_CAP_STABLE_WRITES;
5512 else
5513 mddev->queue->backing_dev_info.capabilities &=
5514 ~BDI_CAP_STABLE_WRITES;
5515 mddev_resume(mddev);
5516 }
5517 mddev_unlock(mddev);
5518 return err ?: len;
5501} 5519}
5502 5520
5503static struct md_sysfs_entry 5521static struct md_sysfs_entry
@@ -5538,7 +5556,7 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt,
5538static ssize_t 5556static ssize_t
5539raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 5557raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
5540{ 5558{
5541 struct r5conf *conf = mddev->private; 5559 struct r5conf *conf;
5542 unsigned long new; 5560 unsigned long new;
5543 int err; 5561 int err;
5544 struct r5worker_group *new_groups, *old_groups; 5562 struct r5worker_group *new_groups, *old_groups;
@@ -5546,41 +5564,41 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
5546 5564
5547 if (len >= PAGE_SIZE) 5565 if (len >= PAGE_SIZE)
5548 return -EINVAL; 5566 return -EINVAL;
5549 if (!conf)
5550 return -ENODEV;
5551
5552 if (kstrtoul(page, 10, &new)) 5567 if (kstrtoul(page, 10, &new))
5553 return -EINVAL; 5568 return -EINVAL;
5554 5569
5555 if (new == conf->worker_cnt_per_group) 5570 err = mddev_lock(mddev);
5556 return len; 5571 if (err)
5557 5572 return err;
5558 mddev_suspend(mddev); 5573 conf = mddev->private;
5574 if (!conf)
5575 err = -ENODEV;
5576 else if (new != conf->worker_cnt_per_group) {
5577 mddev_suspend(mddev);
5559 5578
5560 old_groups = conf->worker_groups; 5579 old_groups = conf->worker_groups;
5561 if (old_groups) 5580 if (old_groups)
5562 flush_workqueue(raid5_wq); 5581 flush_workqueue(raid5_wq);
5563 5582
5564 err = alloc_thread_groups(conf, new, 5583 err = alloc_thread_groups(conf, new,
5565 &group_cnt, &worker_cnt_per_group, 5584 &group_cnt, &worker_cnt_per_group,
5566 &new_groups); 5585 &new_groups);
5567 if (!err) { 5586 if (!err) {
5568 spin_lock_irq(&conf->device_lock); 5587 spin_lock_irq(&conf->device_lock);
5569 conf->group_cnt = group_cnt; 5588 conf->group_cnt = group_cnt;
5570 conf->worker_cnt_per_group = worker_cnt_per_group; 5589 conf->worker_cnt_per_group = worker_cnt_per_group;
5571 conf->worker_groups = new_groups; 5590 conf->worker_groups = new_groups;
5572 spin_unlock_irq(&conf->device_lock); 5591 spin_unlock_irq(&conf->device_lock);
5573 5592
5574 if (old_groups) 5593 if (old_groups)
5575 kfree(old_groups[0].workers); 5594 kfree(old_groups[0].workers);
5576 kfree(old_groups); 5595 kfree(old_groups);
5596 }
5597 mddev_resume(mddev);
5577 } 5598 }
5599 mddev_unlock(mddev);
5578 5600
5579 mddev_resume(mddev); 5601 return err ?: len;
5580
5581 if (err)
5582 return err;
5583 return len;
5584} 5602}
5585 5603
5586static struct md_sysfs_entry 5604static struct md_sysfs_entry