diff options
author | Dan McLeran <daniel.mcleran@intel.com> | 2014-04-07 19:10:11 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2014-04-10 17:04:46 -0400 |
commit | b9afca3efb18a9b8392cb544a3e29e8b1168400c (patch) | |
tree | 44bb0145248950a7beedad5e592dfc9ff7ecfa4d /drivers/block | |
parent | b355084a891985d4cd0ca23b1a83366af2c4232d (diff) |
NVMe: Start-stop nvme_thread during device add-remove.
Done to ensure nvme_thread is not running when there
are no devices to poll.
Signed-off-by: Dan McLeran <daniel.mcleran@intel.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme-core.c | 56 |
1 files changed, 42 insertions, 14 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 7c57b1d955a1..2d69bfec95a4 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -64,6 +64,7 @@ static DEFINE_SPINLOCK(dev_list_lock); | |||
64 | static LIST_HEAD(dev_list); | 64 | static LIST_HEAD(dev_list); |
65 | static struct task_struct *nvme_thread; | 65 | static struct task_struct *nvme_thread; |
66 | static struct workqueue_struct *nvme_workq; | 66 | static struct workqueue_struct *nvme_workq; |
67 | static wait_queue_head_t nvme_kthread_wait; | ||
67 | 68 | ||
68 | static void nvme_reset_failed_dev(struct work_struct *ws); | 69 | static void nvme_reset_failed_dev(struct work_struct *ws); |
69 | 70 | ||
@@ -2374,6 +2375,26 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) | |||
2374 | kthread_stop(kworker_task); | 2375 | kthread_stop(kworker_task); |
2375 | } | 2376 | } |
2376 | 2377 | ||
2378 | /* | ||
2379 | * Remove the node from the device list and check | ||
2380 | * for whether or not we need to stop the nvme_thread. | ||
2381 | */ | ||
2382 | static void nvme_dev_list_remove(struct nvme_dev *dev) | ||
2383 | { | ||
2384 | struct task_struct *tmp = NULL; | ||
2385 | |||
2386 | spin_lock(&dev_list_lock); | ||
2387 | list_del_init(&dev->node); | ||
2388 | if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) { | ||
2389 | tmp = nvme_thread; | ||
2390 | nvme_thread = NULL; | ||
2391 | } | ||
2392 | spin_unlock(&dev_list_lock); | ||
2393 | |||
2394 | if (tmp) | ||
2395 | kthread_stop(tmp); | ||
2396 | } | ||
2397 | |||
2377 | static void nvme_dev_shutdown(struct nvme_dev *dev) | 2398 | static void nvme_dev_shutdown(struct nvme_dev *dev) |
2378 | { | 2399 | { |
2379 | int i; | 2400 | int i; |
@@ -2381,9 +2402,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) | |||
2381 | dev->initialized = 0; | 2402 | dev->initialized = 0; |
2382 | unregister_hotcpu_notifier(&dev->nb); | 2403 | unregister_hotcpu_notifier(&dev->nb); |
2383 | 2404 | ||
2384 | spin_lock(&dev_list_lock); | 2405 | nvme_dev_list_remove(dev); |
2385 | list_del_init(&dev->node); | ||
2386 | spin_unlock(&dev_list_lock); | ||
2387 | 2406 | ||
2388 | if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { | 2407 | if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { |
2389 | for (i = dev->queue_count - 1; i >= 0; i--) { | 2408 | for (i = dev->queue_count - 1; i >= 0; i--) { |
@@ -2524,6 +2543,7 @@ static const struct file_operations nvme_dev_fops = { | |||
2524 | static int nvme_dev_start(struct nvme_dev *dev) | 2543 | static int nvme_dev_start(struct nvme_dev *dev) |
2525 | { | 2544 | { |
2526 | int result; | 2545 | int result; |
2546 | bool start_thread = false; | ||
2527 | 2547 | ||
2528 | result = nvme_dev_map(dev); | 2548 | result = nvme_dev_map(dev); |
2529 | if (result) | 2549 | if (result) |
@@ -2534,9 +2554,24 @@ static int nvme_dev_start(struct nvme_dev *dev) | |||
2534 | goto unmap; | 2554 | goto unmap; |
2535 | 2555 | ||
2536 | spin_lock(&dev_list_lock); | 2556 | spin_lock(&dev_list_lock); |
2557 | if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { | ||
2558 | start_thread = true; | ||
2559 | nvme_thread = NULL; | ||
2560 | } | ||
2537 | list_add(&dev->node, &dev_list); | 2561 | list_add(&dev->node, &dev_list); |
2538 | spin_unlock(&dev_list_lock); | 2562 | spin_unlock(&dev_list_lock); |
2539 | 2563 | ||
2564 | if (start_thread) { | ||
2565 | nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); | ||
2566 | wake_up(&nvme_kthread_wait); | ||
2567 | } else | ||
2568 | wait_event_killable(nvme_kthread_wait, nvme_thread); | ||
2569 | |||
2570 | if (IS_ERR_OR_NULL(nvme_thread)) { | ||
2571 | result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; | ||
2572 | goto disable; | ||
2573 | } | ||
2574 | |||
2540 | result = nvme_setup_io_queues(dev); | 2575 | result = nvme_setup_io_queues(dev); |
2541 | if (result && result != -EBUSY) | 2576 | if (result && result != -EBUSY) |
2542 | goto disable; | 2577 | goto disable; |
@@ -2545,9 +2580,7 @@ static int nvme_dev_start(struct nvme_dev *dev) | |||
2545 | 2580 | ||
2546 | disable: | 2581 | disable: |
2547 | nvme_disable_queue(dev, 0); | 2582 | nvme_disable_queue(dev, 0); |
2548 | spin_lock(&dev_list_lock); | 2583 | nvme_dev_list_remove(dev); |
2549 | list_del_init(&dev->node); | ||
2550 | spin_unlock(&dev_list_lock); | ||
2551 | unmap: | 2584 | unmap: |
2552 | nvme_dev_unmap(dev); | 2585 | nvme_dev_unmap(dev); |
2553 | return result; | 2586 | return result; |
@@ -2776,14 +2809,11 @@ static int __init nvme_init(void) | |||
2776 | { | 2809 | { |
2777 | int result; | 2810 | int result; |
2778 | 2811 | ||
2779 | nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); | 2812 | init_waitqueue_head(&nvme_kthread_wait); |
2780 | if (IS_ERR(nvme_thread)) | ||
2781 | return PTR_ERR(nvme_thread); | ||
2782 | 2813 | ||
2783 | result = -ENOMEM; | ||
2784 | nvme_workq = create_singlethread_workqueue("nvme"); | 2814 | nvme_workq = create_singlethread_workqueue("nvme"); |
2785 | if (!nvme_workq) | 2815 | if (!nvme_workq) |
2786 | goto kill_kthread; | 2816 | return -ENOMEM; |
2787 | 2817 | ||
2788 | result = register_blkdev(nvme_major, "nvme"); | 2818 | result = register_blkdev(nvme_major, "nvme"); |
2789 | if (result < 0) | 2819 | if (result < 0) |
@@ -2800,8 +2830,6 @@ static int __init nvme_init(void) | |||
2800 | unregister_blkdev(nvme_major, "nvme"); | 2830 | unregister_blkdev(nvme_major, "nvme"); |
2801 | kill_workq: | 2831 | kill_workq: |
2802 | destroy_workqueue(nvme_workq); | 2832 | destroy_workqueue(nvme_workq); |
2803 | kill_kthread: | ||
2804 | kthread_stop(nvme_thread); | ||
2805 | return result; | 2833 | return result; |
2806 | } | 2834 | } |
2807 | 2835 | ||
@@ -2810,7 +2838,7 @@ static void __exit nvme_exit(void) | |||
2810 | pci_unregister_driver(&nvme_driver); | 2838 | pci_unregister_driver(&nvme_driver); |
2811 | unregister_blkdev(nvme_major, "nvme"); | 2839 | unregister_blkdev(nvme_major, "nvme"); |
2812 | destroy_workqueue(nvme_workq); | 2840 | destroy_workqueue(nvme_workq); |
2813 | kthread_stop(nvme_thread); | 2841 | BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); |
2814 | } | 2842 | } |
2815 | 2843 | ||
2816 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); | 2844 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); |