aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2013-12-10 15:10:37 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2014-01-27 19:20:02 -0500
commitd4b4ff8e28b474fac0fbfa9cfc40f88b9e41e380 (patch)
tree356576d531535d11ebcf321f92131e0e4513ee32 /drivers/block/nvme-core.c
parent9a6b94584de1a0467d85b435df9c744c5c45a270 (diff)
NVMe: Schedule reset for failed controllers
Schedules a controller reset when it indicates it has a failed status. If the device does not become ready after a reset, the pci device will be scheduled for removal. Signed-off-by: Keith Busch <keith.busch@intel.com> [fixed checkpatch issue] Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 000bca43c23b..2f5b9f5f5a21 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -60,6 +60,8 @@ static LIST_HEAD(dev_list);
60static struct task_struct *nvme_thread; 60static struct task_struct *nvme_thread;
61static struct workqueue_struct *nvme_workq; 61static struct workqueue_struct *nvme_workq;
62 62
63static void nvme_reset_failed_dev(struct work_struct *ws);
64
63/* 65/*
64 * An NVM Express queue. Each device has at least two (one for admin 66 * An NVM Express queue. Each device has at least two (one for admin
65 * commands and one for I/O commands). 67 * commands and one for I/O commands).
@@ -1612,13 +1614,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1612 1614
1613static int nvme_kthread(void *data) 1615static int nvme_kthread(void *data)
1614{ 1616{
1615 struct nvme_dev *dev; 1617 struct nvme_dev *dev, *next;
1616 1618
1617 while (!kthread_should_stop()) { 1619 while (!kthread_should_stop()) {
1618 set_current_state(TASK_INTERRUPTIBLE); 1620 set_current_state(TASK_INTERRUPTIBLE);
1619 spin_lock(&dev_list_lock); 1621 spin_lock(&dev_list_lock);
1620 list_for_each_entry(dev, &dev_list, node) { 1622 list_for_each_entry_safe(dev, next, &dev_list, node) {
1621 int i; 1623 int i;
1624 if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
1625 dev->initialized) {
1626 if (work_busy(&dev->reset_work))
1627 continue;
1628 list_del_init(&dev->node);
1629 dev_warn(&dev->pci_dev->dev,
1630 "Failed status, reset controller\n");
1631 INIT_WORK(&dev->reset_work,
1632 nvme_reset_failed_dev);
1633 queue_work(nvme_workq, &dev->reset_work);
1634 continue;
1635 }
1622 for (i = 0; i < dev->queue_count; i++) { 1636 for (i = 0; i < dev->queue_count; i++) {
1623 struct nvme_queue *nvmeq = dev->queues[i]; 1637 struct nvme_queue *nvmeq = dev->queues[i];
1624 if (!nvmeq) 1638 if (!nvmeq)
@@ -2006,6 +2020,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2006{ 2020{
2007 int i; 2021 int i;
2008 2022
2023 dev->initialized = 0;
2009 for (i = dev->queue_count - 1; i >= 0; i--) 2024 for (i = dev->queue_count - 1; i >= 0; i--)
2010 nvme_disable_queue(dev, i); 2025 nvme_disable_queue(dev, i);
2011 2026
@@ -2196,6 +2211,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2196 queue_work(nvme_workq, &dev->reset_work); 2211 queue_work(nvme_workq, &dev->reset_work);
2197 spin_unlock(&dev_list_lock); 2212 spin_unlock(&dev_list_lock);
2198 } 2213 }
2214 dev->initialized = 1;
2199 return 0; 2215 return 0;
2200} 2216}
2201 2217
@@ -2269,6 +2285,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2269 if (result) 2285 if (result)
2270 goto remove; 2286 goto remove;
2271 2287
2288 dev->initialized = 1;
2272 kref_init(&dev->kref); 2289 kref_init(&dev->kref);
2273 return 0; 2290 return 0;
2274 2291