aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-01-07 20:55:48 -0500
committerJens Axboe <axboe@fb.com>2015-01-08 11:00:29 -0500
commitc917dfe52834979610d45022226445d1dc7c67d8 (patch)
tree75548167908bbd44b55631879c54840bde339dfc /drivers/block
parenteb130dbfc40eabcd4e10797310bda6b9f6dd7e76 (diff)
NVMe: Start all requests
Once the nvme callback is set for a request, the driver can start it and make it available for timeout handling. For timed out commands on a device that is not initialized, this fixes potential deadlocks that can occur on startup and shutdown when a device is unresponsive since they can now be cancelled. Asynchronous requests do not have any expected timeout, so these are using the new "REQ_NO_TIMEOUT" request flags. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index f7d083bb3bd5..286fa4cfc937 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
215 cmd->fn = handler; 215 cmd->fn = handler;
216 cmd->ctx = ctx; 216 cmd->ctx = ctx;
217 cmd->aborted = 0; 217 cmd->aborted = 0;
218 blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
218} 219}
219 220
220/* Special values must be less than 0x1000 */ 221/* Special values must be less than 0x1000 */
@@ -664,8 +665,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
664 } 665 }
665 } 666 }
666 667
667 blk_mq_start_request(req);
668
669 nvme_set_info(cmd, iod, req_completion); 668 nvme_set_info(cmd, iod, req_completion);
670 spin_lock_irq(&nvmeq->q_lock); 669 spin_lock_irq(&nvmeq->q_lock);
671 if (req->cmd_flags & REQ_DISCARD) 670 if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +834,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
835 if (IS_ERR(req)) 834 if (IS_ERR(req))
836 return PTR_ERR(req); 835 return PTR_ERR(req);
837 836
837 req->cmd_flags |= REQ_NO_TIMEOUT;
838 cmd_info = blk_mq_rq_to_pdu(req); 838 cmd_info = blk_mq_rq_to_pdu(req);
839 nvme_set_info(cmd_info, req, async_req_completion); 839 nvme_set_info(cmd_info, req, async_req_completion);
840 840
@@ -1086,8 +1086,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1086 1086
1087 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1087 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1088 nvmeq->qid); 1088 nvmeq->qid);
1089 if (nvmeq->dev->initialized) 1089
1090 nvme_abort_req(req); 1090 if (!nvmeq->dev->initialized) {
1091 /*
1092 * Force cancelled command frees the request, which requires we
1093 * return BLK_EH_NOT_HANDLED.
1094 */
1095 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1096 return BLK_EH_NOT_HANDLED;
1097 }
1098 nvme_abort_req(req);
1091 1099
1092 /* 1100 /*
1093 * The aborted req will be completed on receiving the abort req. 1101 * The aborted req will be completed on receiving the abort req.