aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2019-01-08 03:53:22 -0500
committerJens Axboe <axboe@kernel.dk>2019-01-23 19:16:59 -0500
commit4c174e6366746ae8d49f9cc409f728eebb7a9ac9 (patch)
treeec4a787d4180da6a704960e6d9ca6cb2b4d05435
parent7fc5854f8c6efae9e7624970ab49a1eac2faefb1 (diff)
nvme-rdma: fix timeout handler
Currently, we have several problems with the timeout handler: 1. If we timeout on the controller establishment flow, we will hang because we don't execute the error recovery (and we shouldn't because the create_ctrl flow needs to fail and cleanup on its own) 2. We might also hang if we get a disconnet on a queue while the controller is already deleting. This racy flow can cause the controller disable/shutdown admin command to hang. We cannot complete a timed out request from the timeout handler without mutual exclusion from the teardown flow (e.g. nvme_rdma_error_recovery_work). So we serialize it in the timeout handler and teardown io and admin queues to guarantee that no one races with us from completing the request. Reported-by: Jaesoo Lee <jalee@purestorage.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/nvme/host/rdma.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0a2fd2949ad7..4101961feb44 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1689,18 +1689,28 @@ static enum blk_eh_timer_return
1689nvme_rdma_timeout(struct request *rq, bool reserved) 1689nvme_rdma_timeout(struct request *rq, bool reserved)
1690{ 1690{
1691 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1691 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1692 struct nvme_rdma_queue *queue = req->queue;
1693 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1692 1694
1693 dev_warn(req->queue->ctrl->ctrl.device, 1695 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1694 "I/O %d QID %d timeout, reset controller\n", 1696 rq->tag, nvme_rdma_queue_idx(queue));
1695 rq->tag, nvme_rdma_queue_idx(req->queue));
1696 1697
1697 /* queue error recovery */ 1698 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1698 nvme_rdma_error_recovery(req->queue->ctrl); 1699 /*
1700 * Teardown immediately if controller times out while starting
1701 * or we are already started error recovery. all outstanding
1702 * requests are completed on shutdown, so we return BLK_EH_DONE.
1703 */
1704 flush_work(&ctrl->err_work);
1705 nvme_rdma_teardown_io_queues(ctrl, false);
1706 nvme_rdma_teardown_admin_queue(ctrl, false);
1707 return BLK_EH_DONE;
1708 }
1699 1709
1700 /* fail with DNR on cmd timeout */ 1710 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1701 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1711 nvme_rdma_error_recovery(ctrl);
1702 1712
1703 return BLK_EH_DONE; 1713 return BLK_EH_RESET_TIMER;
1704} 1714}
1705 1715
1706static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1716static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,