aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-01-07 20:55:52 -0500
committerJens Axboe <axboe@fb.com>2015-01-08 11:02:20 -0500
commitc9d3bf8810514b1d32b49254a8f3485f36380eed (patch)
tree086d04b656a7aa59aae4e5b8915ea503eb54f27f /drivers/block
parentcef6a948271d5437f96e731878f2e9cb8c9820b7 (diff)
NVMe: Start and stop h/w queues on reset
This freezes and stops all the queues on device shutdown and restarts them on resume. This fixes hotplug and reset issues when the controller is actively being used. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c44
1 files changed, 41 insertions, 3 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ad9a9b61fc1d..ff4ff0999f02 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -432,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
432 if (unlikely(status)) { 432 if (unlikely(status)) {
433 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 433 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
434 && (jiffies - req->start_time) < req->timeout) { 434 && (jiffies - req->start_time) < req->timeout) {
435 unsigned long flags;
436
435 blk_mq_requeue_request(req); 437 blk_mq_requeue_request(req);
436 blk_mq_kick_requeue_list(req->q); 438 spin_lock_irqsave(req->q->queue_lock, flags);
439 if (!blk_queue_stopped(req->q))
440 blk_mq_kick_requeue_list(req->q);
441 spin_unlock_irqrestore(req->q->queue_lock, flags);
437 return; 442 return;
438 } 443 }
439 req->errors = nvme_error_status(status); 444 req->errors = nvme_error_status(status);
@@ -2405,6 +2410,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
2405 kthread_stop(tmp); 2410 kthread_stop(tmp);
2406} 2411}
2407 2412
2413static void nvme_freeze_queues(struct nvme_dev *dev)
2414{
2415 struct nvme_ns *ns;
2416
2417 list_for_each_entry(ns, &dev->namespaces, list) {
2418 blk_mq_freeze_queue_start(ns->queue);
2419
2420 spin_lock(ns->queue->queue_lock);
2421 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
2422 spin_unlock(ns->queue->queue_lock);
2423
2424 blk_mq_cancel_requeue_work(ns->queue);
2425 blk_mq_stop_hw_queues(ns->queue);
2426 }
2427}
2428
2429static void nvme_unfreeze_queues(struct nvme_dev *dev)
2430{
2431 struct nvme_ns *ns;
2432
2433 list_for_each_entry(ns, &dev->namespaces, list) {
2434 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
2435 blk_mq_unfreeze_queue(ns->queue);
2436 blk_mq_start_stopped_hw_queues(ns->queue, true);
2437 blk_mq_kick_requeue_list(ns->queue);
2438 }
2439}
2440
2408static void nvme_dev_shutdown(struct nvme_dev *dev) 2441static void nvme_dev_shutdown(struct nvme_dev *dev)
2409{ 2442{
2410 int i; 2443 int i;
@@ -2413,8 +2446,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2413 dev->initialized = 0; 2446 dev->initialized = 0;
2414 nvme_dev_list_remove(dev); 2447 nvme_dev_list_remove(dev);
2415 2448
2416 if (dev->bar) 2449 if (dev->bar) {
2450 nvme_freeze_queues(dev);
2417 csts = readl(&dev->bar->csts); 2451 csts = readl(&dev->bar->csts);
2452 }
2418 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2453 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
2419 for (i = dev->queue_count - 1; i >= 0; i--) { 2454 for (i = dev->queue_count - 1; i >= 0; i--) {
2420 struct nvme_queue *nvmeq = dev->queues[i]; 2455 struct nvme_queue *nvmeq = dev->queues[i];
@@ -2670,6 +2705,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2670 dev->reset_workfn = nvme_remove_disks; 2705 dev->reset_workfn = nvme_remove_disks;
2671 queue_work(nvme_workq, &dev->reset_work); 2706 queue_work(nvme_workq, &dev->reset_work);
2672 spin_unlock(&dev_list_lock); 2707 spin_unlock(&dev_list_lock);
2708 } else {
2709 nvme_unfreeze_queues(dev);
2710 nvme_set_irq_hints(dev);
2673 } 2711 }
2674 dev->initialized = 1; 2712 dev->initialized = 1;
2675 return 0; 2713 return 0;
@@ -2807,8 +2845,8 @@ static void nvme_remove(struct pci_dev *pdev)
2807 pci_set_drvdata(pdev, NULL); 2845 pci_set_drvdata(pdev, NULL);
2808 flush_work(&dev->reset_work); 2846 flush_work(&dev->reset_work);
2809 misc_deregister(&dev->miscdev); 2847 misc_deregister(&dev->miscdev);
2810 nvme_dev_remove(dev);
2811 nvme_dev_shutdown(dev); 2848 nvme_dev_shutdown(dev);
2849 nvme_dev_remove(dev);
2812 nvme_dev_remove_admin(dev); 2850 nvme_dev_remove_admin(dev);
2813 nvme_free_queues(dev, 0); 2851 nvme_free_queues(dev, 0);
2814 nvme_release_prp_pools(dev); 2852 nvme_release_prp_pools(dev);