aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/pci.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-04-26 07:51:59 -0400
committerJens Axboe <axboe@fb.com>2016-05-02 11:09:24 -0400
commit5955be2144b3b56182e2175e7e3d2ddf27fb485d (patch)
tree32a52527211b8d0c2353f12bc53be31c8a76ed03 /drivers/nvme/host/pci.c
parent92911a55d42084cd285250c275d9f238783638c2 (diff)
nvme: move namespace scanning to core
Move the scan work item and surrounding code to the common code. For now we need a new finish_scan method to allow the PCI driver to set the irq affinity hints, but I have plans in the works to obsolete this as well. Note that this moves the namespace scanning from nvme_wq to the system workqueue, but as we don't rely on namespace scanning to finish from reset or I/O this should be fine. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by Jon Derrick: <jonathan.derrick@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r--drivers/nvme/host/pci.c32
1 files changed, 5 insertions, 27 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9b2deba0bc91..15bc33755324 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -92,7 +92,6 @@ struct nvme_dev {
92 struct msix_entry *entry; 92 struct msix_entry *entry;
93 void __iomem *bar; 93 void __iomem *bar;
94 struct work_struct reset_work; 94 struct work_struct reset_work;
95 struct work_struct scan_work;
96 struct work_struct remove_work; 95 struct work_struct remove_work;
97 struct work_struct async_work; 96 struct work_struct async_work;
98 struct timer_list watchdog_timer; 97 struct timer_list watchdog_timer;
@@ -266,16 +265,6 @@ static int nvme_init_request(void *data, struct request *req,
266 return 0; 265 return 0;
267} 266}
268 267
269static void nvme_queue_scan(struct nvme_dev *dev)
270{
271 /*
272 * Do not queue new scan work when a controller is reset during
273 * removal.
274 */
275 if (dev->ctrl.state == NVME_CTRL_LIVE)
276 queue_work(nvme_workq, &dev->scan_work);
277}
278
279static void nvme_complete_async_event(struct nvme_dev *dev, 268static void nvme_complete_async_event(struct nvme_dev *dev,
280 struct nvme_completion *cqe) 269 struct nvme_completion *cqe)
281{ 270{
@@ -293,7 +282,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
293 switch (result & 0xff07) { 282 switch (result & 0xff07) {
294 case NVME_AER_NOTICE_NS_CHANGED: 283 case NVME_AER_NOTICE_NS_CHANGED:
295 dev_info(dev->ctrl.device, "rescanning\n"); 284 dev_info(dev->ctrl.device, "rescanning\n");
296 nvme_queue_scan(dev); 285 nvme_queue_scan(&dev->ctrl);
297 default: 286 default:
298 dev_warn(dev->ctrl.device, "async event result %08x\n", result); 287 dev_warn(dev->ctrl.device, "async event result %08x\n", result);
299 } 288 }
@@ -1520,8 +1509,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1520 return result; 1509 return result;
1521} 1510}
1522 1511
1523static void nvme_set_irq_hints(struct nvme_dev *dev) 1512static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
1524{ 1513{
1514 struct nvme_dev *dev = to_nvme_dev(ctrl);
1525 struct nvme_queue *nvmeq; 1515 struct nvme_queue *nvmeq;
1526 int i; 1516 int i;
1527 1517
@@ -1536,16 +1526,6 @@ static void nvme_set_irq_hints(struct nvme_dev *dev)
1536 } 1526 }
1537} 1527}
1538 1528
1539static void nvme_dev_scan(struct work_struct *work)
1540{
1541 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
1542
1543 if (!dev->tagset.tags)
1544 return;
1545 nvme_scan_namespaces(&dev->ctrl);
1546 nvme_set_irq_hints(dev);
1547}
1548
1549static void nvme_del_queue_end(struct request *req, int error) 1529static void nvme_del_queue_end(struct request *req, int error)
1550{ 1530{
1551 struct nvme_queue *nvmeq = req->end_io_data; 1531 struct nvme_queue *nvmeq = req->end_io_data;
@@ -1894,7 +1874,7 @@ static void nvme_reset_work(struct work_struct *work)
1894 } 1874 }
1895 1875
1896 if (dev->online_queues > 1) 1876 if (dev->online_queues > 1)
1897 nvme_queue_scan(dev); 1877 nvme_queue_scan(&dev->ctrl);
1898 return; 1878 return;
1899 1879
1900 out: 1880 out:
@@ -1954,6 +1934,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
1954 .reg_read64 = nvme_pci_reg_read64, 1934 .reg_read64 = nvme_pci_reg_read64,
1955 .reset_ctrl = nvme_pci_reset_ctrl, 1935 .reset_ctrl = nvme_pci_reset_ctrl,
1956 .free_ctrl = nvme_pci_free_ctrl, 1936 .free_ctrl = nvme_pci_free_ctrl,
1937 .post_scan = nvme_pci_post_scan,
1957}; 1938};
1958 1939
1959static int nvme_dev_map(struct nvme_dev *dev) 1940static int nvme_dev_map(struct nvme_dev *dev)
@@ -2005,7 +1986,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2005 if (result) 1986 if (result)
2006 goto free; 1987 goto free;
2007 1988
2008 INIT_WORK(&dev->scan_work, nvme_dev_scan);
2009 INIT_WORK(&dev->reset_work, nvme_reset_work); 1989 INIT_WORK(&dev->reset_work, nvme_reset_work);
2010 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 1990 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2011 INIT_WORK(&dev->async_work, nvme_async_event_work); 1991 INIT_WORK(&dev->async_work, nvme_async_event_work);
@@ -2071,8 +2051,6 @@ static void nvme_remove(struct pci_dev *pdev)
2071 2051
2072 pci_set_drvdata(pdev, NULL); 2052 pci_set_drvdata(pdev, NULL);
2073 flush_work(&dev->async_work); 2053 flush_work(&dev->async_work);
2074 flush_work(&dev->scan_work);
2075 nvme_remove_namespaces(&dev->ctrl);
2076 nvme_uninit_ctrl(&dev->ctrl); 2054 nvme_uninit_ctrl(&dev->ctrl);
2077 nvme_dev_disable(dev, true); 2055 nvme_dev_disable(dev, true);
2078 flush_work(&dev->reset_work); 2056 flush_work(&dev->reset_work);