aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/nvme/host/core.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..e7668c4bb4dd 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
100static void nvme_ns_remove(struct nvme_ns *ns); 100static void nvme_ns_remove(struct nvme_ns *ns);
101static int nvme_revalidate_disk(struct gendisk *disk); 101static int nvme_revalidate_disk(struct gendisk *disk);
102static void nvme_put_subsystem(struct nvme_subsystem *subsys); 102static void nvme_put_subsystem(struct nvme_subsystem *subsys);
103static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
104 unsigned nsid);
105
106static void nvme_set_queue_dying(struct nvme_ns *ns)
107{
108 /*
109 * Revalidating a dead namespace sets capacity to 0. This will end
110 * buffered writers dirtying pages that can't be synced.
111 */
112 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
113 return;
114 revalidate_disk(ns->disk);
115 blk_set_queue_dying(ns->queue);
116 /* Forcibly unquiesce queues to avoid blocking dispatch */
117 blk_mq_unquiesce_queue(ns->queue);
118}
103 119
104static void nvme_queue_scan(struct nvme_ctrl *ctrl) 120static void nvme_queue_scan(struct nvme_ctrl *ctrl)
105{ 121{
@@ -1151,19 +1167,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1151 1167
1152static void nvme_update_formats(struct nvme_ctrl *ctrl) 1168static void nvme_update_formats(struct nvme_ctrl *ctrl)
1153{ 1169{
1154 struct nvme_ns *ns, *next; 1170 struct nvme_ns *ns;
1155 LIST_HEAD(rm_list);
1156 1171
1157 down_write(&ctrl->namespaces_rwsem); 1172 down_read(&ctrl->namespaces_rwsem);
1158 list_for_each_entry(ns, &ctrl->namespaces, list) { 1173 list_for_each_entry(ns, &ctrl->namespaces, list)
1159 if (ns->disk && nvme_revalidate_disk(ns->disk)) { 1174 if (ns->disk && nvme_revalidate_disk(ns->disk))
1160 list_move_tail(&ns->list, &rm_list); 1175 nvme_set_queue_dying(ns);
1161 } 1176 up_read(&ctrl->namespaces_rwsem);
1162 }
1163 up_write(&ctrl->namespaces_rwsem);
1164 1177
1165 list_for_each_entry_safe(ns, next, &rm_list, list) 1178 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1166 nvme_ns_remove(ns);
1167} 1179}
1168 1180
1169static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1181static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -3138,7 +3150,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3138 3150
3139 down_write(&ctrl->namespaces_rwsem); 3151 down_write(&ctrl->namespaces_rwsem);
3140 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3152 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3141 if (ns->head->ns_id > nsid) 3153 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3142 list_move_tail(&ns->list, &rm_list); 3154 list_move_tail(&ns->list, &rm_list);
3143 } 3155 }
3144 up_write(&ctrl->namespaces_rwsem); 3156 up_write(&ctrl->namespaces_rwsem);
@@ -3542,19 +3554,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3542 if (ctrl->admin_q) 3554 if (ctrl->admin_q)
3543 blk_mq_unquiesce_queue(ctrl->admin_q); 3555 blk_mq_unquiesce_queue(ctrl->admin_q);
3544 3556
3545 list_for_each_entry(ns, &ctrl->namespaces, list) { 3557 list_for_each_entry(ns, &ctrl->namespaces, list)
3546 /* 3558 nvme_set_queue_dying(ns);
3547 * Revalidating a dead namespace sets capacity to 0. This will
3548 * end buffered writers dirtying pages that can't be synced.
3549 */
3550 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
3551 continue;
3552 revalidate_disk(ns->disk);
3553 blk_set_queue_dying(ns->queue);
3554 3559
3555 /* Forcibly unquiesce queues to avoid blocking dispatch */
3556 blk_mq_unquiesce_queue(ns->queue);
3557 }
3558 up_read(&ctrl->namespaces_rwsem); 3560 up_read(&ctrl->namespaces_rwsem);
3559} 3561}
3560EXPORT_SYMBOL_GPL(nvme_kill_queues); 3562EXPORT_SYMBOL_GPL(nvme_kill_queues);