aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/core.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d70df1d0072d..48cafaa6fbc5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -131,7 +131,7 @@ void nvme_complete_rq(struct request *req)
131{ 131{
132 if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { 132 if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
133 nvme_req(req)->retries++; 133 nvme_req(req)->retries++;
134 blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); 134 blk_mq_requeue_request(req, true);
135 return; 135 return;
136 } 136 }
137 137
@@ -2694,9 +2694,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2694 /* Forcibly unquiesce queues to avoid blocking dispatch */ 2694 /* Forcibly unquiesce queues to avoid blocking dispatch */
2695 blk_mq_unquiesce_queue(ctrl->admin_q); 2695 blk_mq_unquiesce_queue(ctrl->admin_q);
2696 2696
2697 /* Forcibly start all queues to avoid having stuck requests */
2698 blk_mq_start_hw_queues(ctrl->admin_q);
2699
2700 list_for_each_entry(ns, &ctrl->namespaces, list) { 2697 list_for_each_entry(ns, &ctrl->namespaces, list) {
2701 /* 2698 /*
2702 * Revalidating a dead namespace sets capacity to 0. This will 2699 * Revalidating a dead namespace sets capacity to 0. This will
@@ -2709,16 +2706,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2709 2706
2710 /* Forcibly unquiesce queues to avoid blocking dispatch */ 2707 /* Forcibly unquiesce queues to avoid blocking dispatch */
2711 blk_mq_unquiesce_queue(ns->queue); 2708 blk_mq_unquiesce_queue(ns->queue);
2712
2713 /*
2714 * Forcibly start all queues to avoid having stuck requests.
2715 * Note that we must ensure the queues are not stopped
2716 * when the final removal happens.
2717 */
2718 blk_mq_start_hw_queues(ns->queue);
2719
2720 /* draining requests in requeue list */
2721 blk_mq_kick_requeue_list(ns->queue);
2722 } 2709 }
2723 mutex_unlock(&ctrl->namespaces_mutex); 2710 mutex_unlock(&ctrl->namespaces_mutex);
2724} 2711}
@@ -2787,10 +2774,8 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
2787 struct nvme_ns *ns; 2774 struct nvme_ns *ns;
2788 2775
2789 mutex_lock(&ctrl->namespaces_mutex); 2776 mutex_lock(&ctrl->namespaces_mutex);
2790 list_for_each_entry(ns, &ctrl->namespaces, list) { 2777 list_for_each_entry(ns, &ctrl->namespaces, list)
2791 blk_mq_unquiesce_queue(ns->queue); 2778 blk_mq_unquiesce_queue(ns->queue);
2792 blk_mq_kick_requeue_list(ns->queue);
2793 }
2794 mutex_unlock(&ctrl->namespaces_mutex); 2779 mutex_unlock(&ctrl->namespaces_mutex);
2795} 2780}
2796EXPORT_SYMBOL_GPL(nvme_start_queues); 2781EXPORT_SYMBOL_GPL(nvme_start_queues);