diff options
author | Christoph Hellwig <hch@lst.de> | 2016-04-26 07:51:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-05-02 11:09:24 -0400 |
commit | 5955be2144b3b56182e2175e7e3d2ddf27fb485d (patch) | |
tree | 32a52527211b8d0c2353f12bc53be31c8a76ed03 /drivers | |
parent | 92911a55d42084cd285250c275d9f238783638c2 (diff) |
nvme: move namespace scanning to core
Move the scan work item and surrounding code to the common code. For now
we need a new finish_scan method to allow the PCI driver to set the
irq affinity hints, but I have plans in the works to obsolete this as well.
Note that this moves the namespace scanning from nvme_wq to the system
workqueue, but as we don't rely on namespace scanning to finish from reset
or I/O this should be fine.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by Jon Derrick: <jonathan.derrick@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/nvme/host/core.c | 30 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 4 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 32 |
3 files changed, 34 insertions, 32 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index bd8f598d0c37..899bb4181495 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -1518,7 +1518,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) | |||
1518 | return ret; | 1518 | return ret; |
1519 | } | 1519 | } |
1520 | 1520 | ||
1521 | static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn) | 1521 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) |
1522 | { | 1522 | { |
1523 | struct nvme_ns *ns, *next; | 1523 | struct nvme_ns *ns, *next; |
1524 | unsigned i; | 1524 | unsigned i; |
@@ -1534,11 +1534,16 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn) | |||
1534 | } | 1534 | } |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | void nvme_scan_namespaces(struct nvme_ctrl *ctrl) | 1537 | static void nvme_scan_work(struct work_struct *work) |
1538 | { | 1538 | { |
1539 | struct nvme_ctrl *ctrl = | ||
1540 | container_of(work, struct nvme_ctrl, scan_work); | ||
1539 | struct nvme_id_ctrl *id; | 1541 | struct nvme_id_ctrl *id; |
1540 | unsigned nn; | 1542 | unsigned nn; |
1541 | 1543 | ||
1544 | if (ctrl->state != NVME_CTRL_LIVE) | ||
1545 | return; | ||
1546 | |||
1542 | if (nvme_identify_ctrl(ctrl, &id)) | 1547 | if (nvme_identify_ctrl(ctrl, &id)) |
1543 | return; | 1548 | return; |
1544 | 1549 | ||
@@ -1549,13 +1554,26 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl) | |||
1549 | if (!nvme_scan_ns_list(ctrl, nn)) | 1554 | if (!nvme_scan_ns_list(ctrl, nn)) |
1550 | goto done; | 1555 | goto done; |
1551 | } | 1556 | } |
1552 | __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn)); | 1557 | nvme_scan_ns_sequential(ctrl, nn); |
1553 | done: | 1558 | done: |
1554 | list_sort(NULL, &ctrl->namespaces, ns_cmp); | 1559 | list_sort(NULL, &ctrl->namespaces, ns_cmp); |
1555 | mutex_unlock(&ctrl->namespaces_mutex); | 1560 | mutex_unlock(&ctrl->namespaces_mutex); |
1556 | kfree(id); | 1561 | kfree(id); |
1562 | |||
1563 | if (ctrl->ops->post_scan) | ||
1564 | ctrl->ops->post_scan(ctrl); | ||
1557 | } | 1565 | } |
1558 | EXPORT_SYMBOL_GPL(nvme_scan_namespaces); | 1566 | |
1567 | void nvme_queue_scan(struct nvme_ctrl *ctrl) | ||
1568 | { | ||
1569 | /* | ||
1570 | * Do not queue new scan work when a controller is reset during | ||
1571 | * removal. | ||
1572 | */ | ||
1573 | if (ctrl->state == NVME_CTRL_LIVE) | ||
1574 | schedule_work(&ctrl->scan_work); | ||
1575 | } | ||
1576 | EXPORT_SYMBOL_GPL(nvme_queue_scan); | ||
1559 | 1577 | ||
1560 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) | 1578 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) |
1561 | { | 1579 | { |
@@ -1597,6 +1615,9 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl) | |||
1597 | 1615 | ||
1598 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) | 1616 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
1599 | { | 1617 | { |
1618 | flush_work(&ctrl->scan_work); | ||
1619 | nvme_remove_namespaces(ctrl); | ||
1620 | |||
1600 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); | 1621 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); |
1601 | 1622 | ||
1602 | spin_lock(&dev_list_lock); | 1623 | spin_lock(&dev_list_lock); |
@@ -1640,6 +1661,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | |||
1640 | ctrl->dev = dev; | 1661 | ctrl->dev = dev; |
1641 | ctrl->ops = ops; | 1662 | ctrl->ops = ops; |
1642 | ctrl->quirks = quirks; | 1663 | ctrl->quirks = quirks; |
1664 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); | ||
1643 | 1665 | ||
1644 | ret = nvme_set_instance(ctrl); | 1666 | ret = nvme_set_instance(ctrl); |
1645 | if (ret) | 1667 | if (ret) |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 4135626a3d6f..9b63e719318a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -108,6 +108,7 @@ struct nvme_ctrl { | |||
108 | u32 vs; | 108 | u32 vs; |
109 | bool subsystem; | 109 | bool subsystem; |
110 | unsigned long quirks; | 110 | unsigned long quirks; |
111 | struct work_struct scan_work; | ||
111 | }; | 112 | }; |
112 | 113 | ||
113 | /* | 114 | /* |
@@ -147,6 +148,7 @@ struct nvme_ctrl_ops { | |||
147 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); | 148 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
148 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); | 149 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); |
149 | void (*free_ctrl)(struct nvme_ctrl *ctrl); | 150 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
151 | void (*post_scan)(struct nvme_ctrl *ctrl); | ||
150 | }; | 152 | }; |
151 | 153 | ||
152 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) | 154 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) |
@@ -207,7 +209,7 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); | |||
207 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); | 209 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); |
208 | int nvme_init_identify(struct nvme_ctrl *ctrl); | 210 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
209 | 211 | ||
210 | void nvme_scan_namespaces(struct nvme_ctrl *ctrl); | 212 | void nvme_queue_scan(struct nvme_ctrl *ctrl); |
211 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); | 213 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
212 | 214 | ||
213 | void nvme_stop_queues(struct nvme_ctrl *ctrl); | 215 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9b2deba0bc91..15bc33755324 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -92,7 +92,6 @@ struct nvme_dev { | |||
92 | struct msix_entry *entry; | 92 | struct msix_entry *entry; |
93 | void __iomem *bar; | 93 | void __iomem *bar; |
94 | struct work_struct reset_work; | 94 | struct work_struct reset_work; |
95 | struct work_struct scan_work; | ||
96 | struct work_struct remove_work; | 95 | struct work_struct remove_work; |
97 | struct work_struct async_work; | 96 | struct work_struct async_work; |
98 | struct timer_list watchdog_timer; | 97 | struct timer_list watchdog_timer; |
@@ -266,16 +265,6 @@ static int nvme_init_request(void *data, struct request *req, | |||
266 | return 0; | 265 | return 0; |
267 | } | 266 | } |
268 | 267 | ||
269 | static void nvme_queue_scan(struct nvme_dev *dev) | ||
270 | { | ||
271 | /* | ||
272 | * Do not queue new scan work when a controller is reset during | ||
273 | * removal. | ||
274 | */ | ||
275 | if (dev->ctrl.state == NVME_CTRL_LIVE) | ||
276 | queue_work(nvme_workq, &dev->scan_work); | ||
277 | } | ||
278 | |||
279 | static void nvme_complete_async_event(struct nvme_dev *dev, | 268 | static void nvme_complete_async_event(struct nvme_dev *dev, |
280 | struct nvme_completion *cqe) | 269 | struct nvme_completion *cqe) |
281 | { | 270 | { |
@@ -293,7 +282,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev, | |||
293 | switch (result & 0xff07) { | 282 | switch (result & 0xff07) { |
294 | case NVME_AER_NOTICE_NS_CHANGED: | 283 | case NVME_AER_NOTICE_NS_CHANGED: |
295 | dev_info(dev->ctrl.device, "rescanning\n"); | 284 | dev_info(dev->ctrl.device, "rescanning\n"); |
296 | nvme_queue_scan(dev); | 285 | nvme_queue_scan(&dev->ctrl); |
297 | default: | 286 | default: |
298 | dev_warn(dev->ctrl.device, "async event result %08x\n", result); | 287 | dev_warn(dev->ctrl.device, "async event result %08x\n", result); |
299 | } | 288 | } |
@@ -1520,8 +1509,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1520 | return result; | 1509 | return result; |
1521 | } | 1510 | } |
1522 | 1511 | ||
1523 | static void nvme_set_irq_hints(struct nvme_dev *dev) | 1512 | static void nvme_pci_post_scan(struct nvme_ctrl *ctrl) |
1524 | { | 1513 | { |
1514 | struct nvme_dev *dev = to_nvme_dev(ctrl); | ||
1525 | struct nvme_queue *nvmeq; | 1515 | struct nvme_queue *nvmeq; |
1526 | int i; | 1516 | int i; |
1527 | 1517 | ||
@@ -1536,16 +1526,6 @@ static void nvme_set_irq_hints(struct nvme_dev *dev) | |||
1536 | } | 1526 | } |
1537 | } | 1527 | } |
1538 | 1528 | ||
1539 | static void nvme_dev_scan(struct work_struct *work) | ||
1540 | { | ||
1541 | struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); | ||
1542 | |||
1543 | if (!dev->tagset.tags) | ||
1544 | return; | ||
1545 | nvme_scan_namespaces(&dev->ctrl); | ||
1546 | nvme_set_irq_hints(dev); | ||
1547 | } | ||
1548 | |||
1549 | static void nvme_del_queue_end(struct request *req, int error) | 1529 | static void nvme_del_queue_end(struct request *req, int error) |
1550 | { | 1530 | { |
1551 | struct nvme_queue *nvmeq = req->end_io_data; | 1531 | struct nvme_queue *nvmeq = req->end_io_data; |
@@ -1894,7 +1874,7 @@ static void nvme_reset_work(struct work_struct *work) | |||
1894 | } | 1874 | } |
1895 | 1875 | ||
1896 | if (dev->online_queues > 1) | 1876 | if (dev->online_queues > 1) |
1897 | nvme_queue_scan(dev); | 1877 | nvme_queue_scan(&dev->ctrl); |
1898 | return; | 1878 | return; |
1899 | 1879 | ||
1900 | out: | 1880 | out: |
@@ -1954,6 +1934,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { | |||
1954 | .reg_read64 = nvme_pci_reg_read64, | 1934 | .reg_read64 = nvme_pci_reg_read64, |
1955 | .reset_ctrl = nvme_pci_reset_ctrl, | 1935 | .reset_ctrl = nvme_pci_reset_ctrl, |
1956 | .free_ctrl = nvme_pci_free_ctrl, | 1936 | .free_ctrl = nvme_pci_free_ctrl, |
1937 | .post_scan = nvme_pci_post_scan, | ||
1957 | }; | 1938 | }; |
1958 | 1939 | ||
1959 | static int nvme_dev_map(struct nvme_dev *dev) | 1940 | static int nvme_dev_map(struct nvme_dev *dev) |
@@ -2005,7 +1986,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2005 | if (result) | 1986 | if (result) |
2006 | goto free; | 1987 | goto free; |
2007 | 1988 | ||
2008 | INIT_WORK(&dev->scan_work, nvme_dev_scan); | ||
2009 | INIT_WORK(&dev->reset_work, nvme_reset_work); | 1989 | INIT_WORK(&dev->reset_work, nvme_reset_work); |
2010 | INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); | 1990 | INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); |
2011 | INIT_WORK(&dev->async_work, nvme_async_event_work); | 1991 | INIT_WORK(&dev->async_work, nvme_async_event_work); |
@@ -2071,8 +2051,6 @@ static void nvme_remove(struct pci_dev *pdev) | |||
2071 | 2051 | ||
2072 | pci_set_drvdata(pdev, NULL); | 2052 | pci_set_drvdata(pdev, NULL); |
2073 | flush_work(&dev->async_work); | 2053 | flush_work(&dev->async_work); |
2074 | flush_work(&dev->scan_work); | ||
2075 | nvme_remove_namespaces(&dev->ctrl); | ||
2076 | nvme_uninit_ctrl(&dev->ctrl); | 2054 | nvme_uninit_ctrl(&dev->ctrl); |
2077 | nvme_dev_disable(dev, true); | 2055 | nvme_dev_disable(dev, true); |
2078 | flush_work(&dev->reset_work); | 2056 | flush_work(&dev->reset_work); |