aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2019-01-28 11:46:07 -0500
committerChristoph Hellwig <hch@lst.de>2019-02-06 10:35:33 -0500
commite7ad43c3eda6a1690c4c3c341f95dc1c6898da83 (patch)
tree34ea98eb7f45c7adbed89fdeaa0e974ee50e675c
parentec51f8ee1e63498e9f521ec0e5a6d04622bb2c67 (diff)
nvme: lock NS list changes while handling command effects
If a controller supports the NS Change Notification, the namespace scan_work is automatically triggered after attaching a new namespace. Occasionally the namespace scan_work may append the new namespace to the list before the admin command effects handling is completed. The effects handling unfreezes namespaces, but if it unfreezes the newly attached namespace, its request_queue freeze depth will be off and we'll hit the warning in blk_mq_unfreeze_queue(). On the next namespace add, we will fail to freeze that queue due to the previous bad accounting and deadlock waiting for frozen. Fix that by preventing scan work from altering the namespace list while command effects handling needs to pair freeze with unfreeze. Reported-by: Wen Xiong <wenxiong@us.ibm.com> Tested-by: Wen Xiong <wenxiong@us.ibm.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/nvme.h1
2 files changed, 8 insertions, 1 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 150e49723c15..6a9dd68c0f4f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1253 * effects say only one namespace is affected. 1253 * effects say only one namespace is affected.
1254 */ 1254 */
1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1256 mutex_lock(&ctrl->scan_lock);
1256 nvme_start_freeze(ctrl); 1257 nvme_start_freeze(ctrl);
1257 nvme_wait_freeze(ctrl); 1258 nvme_wait_freeze(ctrl);
1258 } 1259 }
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1281 */ 1282 */
1282 if (effects & NVME_CMD_EFFECTS_LBCC) 1283 if (effects & NVME_CMD_EFFECTS_LBCC)
1283 nvme_update_formats(ctrl); 1284 nvme_update_formats(ctrl);
1284 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1285 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1285 nvme_unfreeze(ctrl); 1286 nvme_unfreeze(ctrl);
1287 mutex_unlock(&ctrl->scan_lock);
1288 }
1286 if (effects & NVME_CMD_EFFECTS_CCC) 1289 if (effects & NVME_CMD_EFFECTS_CCC)
1287 nvme_init_identify(ctrl); 1290 nvme_init_identify(ctrl);
1288 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1291 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -3401,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work)
3401 if (nvme_identify_ctrl(ctrl, &id)) 3404 if (nvme_identify_ctrl(ctrl, &id))
3402 return; 3405 return;
3403 3406
3407 mutex_lock(&ctrl->scan_lock);
3404 nn = le32_to_cpu(id->nn); 3408 nn = le32_to_cpu(id->nn);
3405 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3409 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3406 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3410 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3409,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work)
3409 } 3413 }
3410 nvme_scan_ns_sequential(ctrl, nn); 3414 nvme_scan_ns_sequential(ctrl, nn);
3411out_free_id: 3415out_free_id:
3416 mutex_unlock(&ctrl->scan_lock);
3412 kfree(id); 3417 kfree(id);
3413 down_write(&ctrl->namespaces_rwsem); 3418 down_write(&ctrl->namespaces_rwsem);
3414 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3419 list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3652,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3652 3657
3653 ctrl->state = NVME_CTRL_NEW; 3658 ctrl->state = NVME_CTRL_NEW;
3654 spin_lock_init(&ctrl->lock); 3659 spin_lock_init(&ctrl->lock);
3660 mutex_init(&ctrl->scan_lock);
3655 INIT_LIST_HEAD(&ctrl->namespaces); 3661 INIT_LIST_HEAD(&ctrl->namespaces);
3656 init_rwsem(&ctrl->namespaces_rwsem); 3662 init_rwsem(&ctrl->namespaces_rwsem);
3657 ctrl->dev = dev; 3663 ctrl->dev = dev;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ab961bdeea89..c4a1bb41abf0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -154,6 +154,7 @@ struct nvme_ctrl {
154 enum nvme_ctrl_state state; 154 enum nvme_ctrl_state state;
155 bool identified; 155 bool identified;
156 spinlock_t lock; 156 spinlock_t lock;
157 struct mutex scan_lock;
157 const struct nvme_ctrl_ops *ops; 158 const struct nvme_ctrl_ops *ops;
158 struct request_queue *admin_q; 159 struct request_queue *admin_q;
159 struct request_queue *connect_q; 160 struct request_queue *connect_q;