diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-06-07 20:52:54 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-06-08 10:41:17 -0400 |
commit | 28dec870aaf704af1421ac014f7f8abf4cac7c69 (patch) | |
tree | 8eeb2a26a9b8276cbe966ff77aae106f48c21221 | |
parent | 2a2a4c510b761e800098992cac61281c86527e5d (diff) |
md: Unify mddev destruction paths
Previously, mddev_put() had a couple different paths for freeing a
mddev, due to the fact that the kobject wasn't initialized when the
mddev was first allocated. If we move the kobject_init() to when it's
first allocated and just use kobject_add() later, we can clean all this
up.
This also removes a hack in mddev_put() to avoid freeing biosets under a
spinlock, which involved copying biosets on the stack after the reset
bioset_init() changes.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/md/md.c | 53 |
1 files changed, 18 insertions, 35 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index fc692b7128bb..22203eba1e6e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -84,6 +84,8 @@ static void autostart_arrays(int part); | |||
84 | static LIST_HEAD(pers_list); | 84 | static LIST_HEAD(pers_list); |
85 | static DEFINE_SPINLOCK(pers_lock); | 85 | static DEFINE_SPINLOCK(pers_lock); |
86 | 86 | ||
87 | static struct kobj_type md_ktype; | ||
88 | |||
87 | struct md_cluster_operations *md_cluster_ops; | 89 | struct md_cluster_operations *md_cluster_ops; |
88 | EXPORT_SYMBOL(md_cluster_ops); | 90 | EXPORT_SYMBOL(md_cluster_ops); |
89 | struct module *md_cluster_mod; | 91 | struct module *md_cluster_mod; |
@@ -510,11 +512,6 @@ static void mddev_delayed_delete(struct work_struct *ws); | |||
510 | 512 | ||
511 | static void mddev_put(struct mddev *mddev) | 513 | static void mddev_put(struct mddev *mddev) |
512 | { | 514 | { |
513 | struct bio_set bs, sync_bs; | ||
514 | |||
515 | memset(&bs, 0, sizeof(bs)); | ||
516 | memset(&sync_bs, 0, sizeof(sync_bs)); | ||
517 | |||
518 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) | 515 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) |
519 | return; | 516 | return; |
520 | if (!mddev->raid_disks && list_empty(&mddev->disks) && | 517 | if (!mddev->raid_disks && list_empty(&mddev->disks) && |
@@ -522,30 +519,23 @@ static void mddev_put(struct mddev *mddev) | |||
522 | /* Array is not configured at all, and not held active, | 519 | /* Array is not configured at all, and not held active, |
523 | * so destroy it */ | 520 | * so destroy it */ |
524 | list_del_init(&mddev->all_mddevs); | 521 | list_del_init(&mddev->all_mddevs); |
525 | bs = mddev->bio_set; | 522 | |
526 | sync_bs = mddev->sync_set; | 523 | /* |
527 | memset(&mddev->bio_set, 0, sizeof(mddev->bio_set)); | 524 | * Call queue_work inside the spinlock so that |
528 | memset(&mddev->sync_set, 0, sizeof(mddev->sync_set)); | 525 | * flush_workqueue() after mddev_find will succeed in waiting |
529 | if (mddev->gendisk) { | 526 | * for the work to be done. |
530 | /* We did a probe so need to clean up. Call | 527 | */ |
531 | * queue_work inside the spinlock so that | 528 | INIT_WORK(&mddev->del_work, mddev_delayed_delete); |
532 | * flush_workqueue() after mddev_find will | 529 | queue_work(md_misc_wq, &mddev->del_work); |
533 | * succeed in waiting for the work to be done. | ||
534 | */ | ||
535 | INIT_WORK(&mddev->del_work, mddev_delayed_delete); | ||
536 | queue_work(md_misc_wq, &mddev->del_work); | ||
537 | } else | ||
538 | kfree(mddev); | ||
539 | } | 530 | } |
540 | spin_unlock(&all_mddevs_lock); | 531 | spin_unlock(&all_mddevs_lock); |
541 | bioset_exit(&bs); | ||
542 | bioset_exit(&sync_bs); | ||
543 | } | 532 | } |
544 | 533 | ||
545 | static void md_safemode_timeout(struct timer_list *t); | 534 | static void md_safemode_timeout(struct timer_list *t); |
546 | 535 | ||
547 | void mddev_init(struct mddev *mddev) | 536 | void mddev_init(struct mddev *mddev) |
548 | { | 537 | { |
538 | kobject_init(&mddev->kobj, &md_ktype); | ||
549 | mutex_init(&mddev->open_mutex); | 539 | mutex_init(&mddev->open_mutex); |
550 | mutex_init(&mddev->reconfig_mutex); | 540 | mutex_init(&mddev->reconfig_mutex); |
551 | mutex_init(&mddev->bitmap_info.mutex); | 541 | mutex_init(&mddev->bitmap_info.mutex); |
@@ -5215,6 +5205,8 @@ static void md_free(struct kobject *ko) | |||
5215 | put_disk(mddev->gendisk); | 5205 | put_disk(mddev->gendisk); |
5216 | percpu_ref_exit(&mddev->writes_pending); | 5206 | percpu_ref_exit(&mddev->writes_pending); |
5217 | 5207 | ||
5208 | bioset_exit(&mddev->bio_set); | ||
5209 | bioset_exit(&mddev->sync_set); | ||
5218 | kfree(mddev); | 5210 | kfree(mddev); |
5219 | } | 5211 | } |
5220 | 5212 | ||
@@ -5348,8 +5340,7 @@ static int md_alloc(dev_t dev, char *name) | |||
5348 | mutex_lock(&mddev->open_mutex); | 5340 | mutex_lock(&mddev->open_mutex); |
5349 | add_disk(disk); | 5341 | add_disk(disk); |
5350 | 5342 | ||
5351 | error = kobject_init_and_add(&mddev->kobj, &md_ktype, | 5343 | error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); |
5352 | &disk_to_dev(disk)->kobj, "%s", "md"); | ||
5353 | if (error) { | 5344 | if (error) { |
5354 | /* This isn't possible, but as kobject_init_and_add is marked | 5345 | /* This isn't possible, but as kobject_init_and_add is marked |
5355 | * __must_check, we must do something with the result | 5346 | * __must_check, we must do something with the result |
@@ -5506,7 +5497,7 @@ int md_run(struct mddev *mddev) | |||
5506 | if (!bioset_initialized(&mddev->sync_set)) { | 5497 | if (!bioset_initialized(&mddev->sync_set)) { |
5507 | err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); | 5498 | err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
5508 | if (err) | 5499 | if (err) |
5509 | goto abort; | 5500 | return err; |
5510 | } | 5501 | } |
5511 | 5502 | ||
5512 | spin_lock(&pers_lock); | 5503 | spin_lock(&pers_lock); |
@@ -5519,8 +5510,7 @@ int md_run(struct mddev *mddev) | |||
5519 | else | 5510 | else |
5520 | pr_warn("md: personality for level %s is not loaded!\n", | 5511 | pr_warn("md: personality for level %s is not loaded!\n", |
5521 | mddev->clevel); | 5512 | mddev->clevel); |
5522 | err = -EINVAL; | 5513 | return -EINVAL; |
5523 | goto abort; | ||
5524 | } | 5514 | } |
5525 | spin_unlock(&pers_lock); | 5515 | spin_unlock(&pers_lock); |
5526 | if (mddev->level != pers->level) { | 5516 | if (mddev->level != pers->level) { |
@@ -5533,8 +5523,7 @@ int md_run(struct mddev *mddev) | |||
5533 | pers->start_reshape == NULL) { | 5523 | pers->start_reshape == NULL) { |
5534 | /* This personality cannot handle reshaping... */ | 5524 | /* This personality cannot handle reshaping... */ |
5535 | module_put(pers->owner); | 5525 | module_put(pers->owner); |
5536 | err = -EINVAL; | 5526 | return -EINVAL; |
5537 | goto abort; | ||
5538 | } | 5527 | } |
5539 | 5528 | ||
5540 | if (pers->sync_request) { | 5529 | if (pers->sync_request) { |
@@ -5603,7 +5592,7 @@ int md_run(struct mddev *mddev) | |||
5603 | mddev->private = NULL; | 5592 | mddev->private = NULL; |
5604 | module_put(pers->owner); | 5593 | module_put(pers->owner); |
5605 | bitmap_destroy(mddev); | 5594 | bitmap_destroy(mddev); |
5606 | goto abort; | 5595 | return err; |
5607 | } | 5596 | } |
5608 | if (mddev->queue) { | 5597 | if (mddev->queue) { |
5609 | bool nonrot = true; | 5598 | bool nonrot = true; |
@@ -5665,12 +5654,6 @@ int md_run(struct mddev *mddev) | |||
5665 | sysfs_notify_dirent_safe(mddev->sysfs_action); | 5654 | sysfs_notify_dirent_safe(mddev->sysfs_action); |
5666 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 5655 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
5667 | return 0; | 5656 | return 0; |
5668 | |||
5669 | abort: | ||
5670 | bioset_exit(&mddev->bio_set); | ||
5671 | bioset_exit(&mddev->sync_set); | ||
5672 | |||
5673 | return err; | ||
5674 | } | 5657 | } |
5675 | EXPORT_SYMBOL_GPL(md_run); | 5658 | EXPORT_SYMBOL_GPL(md_run); |
5676 | 5659 | ||