diff options
author | Christoph Hellwig <hch@lst.de> | 2015-10-22 08:03:33 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-12-22 11:38:23 -0500 |
commit | 7385014c073263b077442439299fad013edd4409 (patch) | |
tree | 862c56f67438962389649e8b706f4026f0c7eb0e | |
parent | 749941f2365db8198b5d75c83a575ee6e55bf03b (diff) |
nvme: only add a controller to dev_list after it's been fully initialized
Without this we can easily get bad derferences on nvmeq->d_db when the nvme
kthread tries to poll the CQs for controllers that are in half initialized
state.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/nvme/host/pci.c | 51 |
1 files changed, 30 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1f92b328522a..d82f08d671e6 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1994,6 +1994,30 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) | |||
1994 | kthread_stop(kworker_task); | 1994 | kthread_stop(kworker_task); |
1995 | } | 1995 | } |
1996 | 1996 | ||
1997 | static int nvme_dev_list_add(struct nvme_dev *dev) | ||
1998 | { | ||
1999 | bool start_thread = false; | ||
2000 | |||
2001 | spin_lock(&dev_list_lock); | ||
2002 | if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { | ||
2003 | start_thread = true; | ||
2004 | nvme_thread = NULL; | ||
2005 | } | ||
2006 | list_add(&dev->node, &dev_list); | ||
2007 | spin_unlock(&dev_list_lock); | ||
2008 | |||
2009 | if (start_thread) { | ||
2010 | nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); | ||
2011 | wake_up_all(&nvme_kthread_wait); | ||
2012 | } else | ||
2013 | wait_event_killable(nvme_kthread_wait, nvme_thread); | ||
2014 | |||
2015 | if (IS_ERR_OR_NULL(nvme_thread)) | ||
2016 | return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; | ||
2017 | |||
2018 | return 0; | ||
2019 | } | ||
2020 | |||
1997 | /* | 2021 | /* |
1998 | * Remove the node from the device list and check | 2022 | * Remove the node from the device list and check |
1999 | * for whether or not we need to stop the nvme_thread. | 2023 | * for whether or not we need to stop the nvme_thread. |
@@ -2109,7 +2133,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) | |||
2109 | static void nvme_probe_work(struct work_struct *work) | 2133 | static void nvme_probe_work(struct work_struct *work) |
2110 | { | 2134 | { |
2111 | struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); | 2135 | struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); |
2112 | bool start_thread = false; | ||
2113 | int result; | 2136 | int result; |
2114 | 2137 | ||
2115 | result = nvme_dev_map(dev); | 2138 | result = nvme_dev_map(dev); |
@@ -2120,25 +2143,6 @@ static void nvme_probe_work(struct work_struct *work) | |||
2120 | if (result) | 2143 | if (result) |
2121 | goto unmap; | 2144 | goto unmap; |
2122 | 2145 | ||
2123 | spin_lock(&dev_list_lock); | ||
2124 | if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { | ||
2125 | start_thread = true; | ||
2126 | nvme_thread = NULL; | ||
2127 | } | ||
2128 | list_add(&dev->node, &dev_list); | ||
2129 | spin_unlock(&dev_list_lock); | ||
2130 | |||
2131 | if (start_thread) { | ||
2132 | nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); | ||
2133 | wake_up_all(&nvme_kthread_wait); | ||
2134 | } else | ||
2135 | wait_event_killable(nvme_kthread_wait, nvme_thread); | ||
2136 | |||
2137 | if (IS_ERR_OR_NULL(nvme_thread)) { | ||
2138 | result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; | ||
2139 | goto disable; | ||
2140 | } | ||
2141 | |||
2142 | nvme_init_queue(dev->queues[0], 0); | 2146 | nvme_init_queue(dev->queues[0], 0); |
2143 | result = nvme_alloc_admin_tags(dev); | 2147 | result = nvme_alloc_admin_tags(dev); |
2144 | if (result) | 2148 | if (result) |
@@ -2154,6 +2158,10 @@ static void nvme_probe_work(struct work_struct *work) | |||
2154 | 2158 | ||
2155 | dev->ctrl.event_limit = 1; | 2159 | dev->ctrl.event_limit = 1; |
2156 | 2160 | ||
2161 | result = nvme_dev_list_add(dev); | ||
2162 | if (result) | ||
2163 | goto remove; | ||
2164 | |||
2157 | /* | 2165 | /* |
2158 | * Keep the controller around but remove all namespaces if we don't have | 2166 | * Keep the controller around but remove all namespaces if we don't have |
2159 | * any working I/O queue. | 2167 | * any working I/O queue. |
@@ -2168,6 +2176,8 @@ static void nvme_probe_work(struct work_struct *work) | |||
2168 | 2176 | ||
2169 | return; | 2177 | return; |
2170 | 2178 | ||
2179 | remove: | ||
2180 | nvme_dev_list_remove(dev); | ||
2171 | free_tags: | 2181 | free_tags: |
2172 | nvme_dev_remove_admin(dev); | 2182 | nvme_dev_remove_admin(dev); |
2173 | blk_put_queue(dev->ctrl.admin_q); | 2183 | blk_put_queue(dev->ctrl.admin_q); |
@@ -2175,7 +2185,6 @@ static void nvme_probe_work(struct work_struct *work) | |||
2175 | dev->queues[0]->tags = NULL; | 2185 | dev->queues[0]->tags = NULL; |
2176 | disable: | 2186 | disable: |
2177 | nvme_disable_queue(dev, 0); | 2187 | nvme_disable_queue(dev, 0); |
2178 | nvme_dev_list_remove(dev); | ||
2179 | unmap: | 2188 | unmap: |
2180 | nvme_dev_unmap(dev); | 2189 | nvme_dev_unmap(dev); |
2181 | out: | 2190 | out: |