aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-01-07 20:55:53 -0500
committerJens Axboe <axboe@fb.com>2015-01-08 11:02:23 -0500
commit7a509a6b07dd5a08d91f8a7e0cccb9a6438ce439 (patch)
treefe8efd2728b78c13ee0de12339ec98739ef5e34a
parentc9d3bf8810514b1d32b49254a8f3485f36380eed (diff)
NVMe: Fix locking on abort handling
The queues and device need to be locked when messing with them. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nvme-core.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ff4ff0999f02..cb529e9a82dd 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1021,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
1021 struct nvme_command cmd; 1021 struct nvme_command cmd;
1022 1022
1023 if (!nvmeq->qid || cmd_rq->aborted) { 1023 if (!nvmeq->qid || cmd_rq->aborted) {
1024 unsigned long flags;
1025
1026 spin_lock_irqsave(&dev_list_lock, flags);
1024 if (work_busy(&dev->reset_work)) 1027 if (work_busy(&dev->reset_work))
1025 return; 1028 goto out;
1026 list_del_init(&dev->node); 1029 list_del_init(&dev->node);
1027 dev_warn(&dev->pci_dev->dev, 1030 dev_warn(&dev->pci_dev->dev,
1028 "I/O %d QID %d timeout, reset controller\n", 1031 "I/O %d QID %d timeout, reset controller\n",
1029 req->tag, nvmeq->qid); 1032 req->tag, nvmeq->qid);
1030 dev->reset_workfn = nvme_reset_failed_dev; 1033 dev->reset_workfn = nvme_reset_failed_dev;
1031 queue_work(nvme_workq, &dev->reset_work); 1034 queue_work(nvme_workq, &dev->reset_work);
1035 out:
1036 spin_unlock_irqrestore(&dev_list_lock, flags);
1032 return; 1037 return;
1033 } 1038 }
1034 1039
@@ -1096,25 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1096 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1101 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1097 struct nvme_queue *nvmeq = cmd->nvmeq; 1102 struct nvme_queue *nvmeq = cmd->nvmeq;
1098 1103
1104 /*
1105 * The aborted req will be completed on receiving the abort req.
1106 * We enable the timer again. If hit twice, it'll cause a device reset,
1107 * as the device then is in a faulty state.
1108 */
1109 int ret = BLK_EH_RESET_TIMER;
1110
1099 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1111 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1100 nvmeq->qid); 1112 nvmeq->qid);
1101 1113
1114 spin_lock_irq(&nvmeq->q_lock);
1102 if (!nvmeq->dev->initialized) { 1115 if (!nvmeq->dev->initialized) {
1103 /* 1116 /*
1104 * Force cancelled command frees the request, which requires we 1117 * Force cancelled command frees the request, which requires we
1105 * return BLK_EH_NOT_HANDLED. 1118 * return BLK_EH_NOT_HANDLED.
1106 */ 1119 */
1107 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); 1120 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1108 return BLK_EH_NOT_HANDLED; 1121 ret = BLK_EH_NOT_HANDLED;
1109 } 1122 } else
1110 nvme_abort_req(req); 1123 nvme_abort_req(req);
1124 spin_unlock_irq(&nvmeq->q_lock);
1111 1125
1112 /* 1126 return ret;
1113 * The aborted req will be completed on receiving the abort req.
1114 * We enable the timer again. If hit twice, it'll cause a device reset,
1115 * as the device then is in a faulty state.
1116 */
1117 return BLK_EH_RESET_TIMER;
1118} 1127}
1119 1128
1120static void nvme_free_queue(struct nvme_queue *nvmeq) 1129static void nvme_free_queue(struct nvme_queue *nvmeq)