aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-06-11 11:34:06 -0400
committerChristoph Hellwig <hch@lst.de>2018-06-15 05:21:00 -0400
commit3bc32bb1186ccaf3177cbf29caa6cc14dc510b7b (patch)
tree72b01daec4fd9aad31e6768ba86789e79f420f48
parente6c3456aa897c9799de5423b28550efad14a51b0 (diff)
nvme-fabrics: refactor queue ready check
Move the is_connected check to the fibre channel transport, as it has no meaning for other transports. To facilitate this split out a new nvmf_fail_nonready_command helper that is called by the transport when it is asked to handle a command on a queue that is not ready. Also avoid a function call for the queue live fast path by inlining the check. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: James Smart <james.smart@broadcom.com>
-rw-r--r--drivers/nvme/host/fabrics.c59
-rw-r--r--drivers/nvme/host/fabrics.h13
-rw-r--r--drivers/nvme/host/fc.c9
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--drivers/nvme/target/loop.c7
5 files changed, 45 insertions, 50 deletions
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index fa32c1216409..6b4e253b9347 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -536,38 +536,40 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
536 return NULL; 536 return NULL;
537} 537}
538 538
539blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq, 539/*
540 bool queue_live, bool is_connected) 540 * For something we're not in a state to send to the device the default action
541 * is to busy it and retry it after the controller state is recovered. However,
542 * anything marked for failfast or nvme multipath is immediately failed.
543 *
544 * Note: commands used to initialize the controller will be marked for failfast.
545 * Note: nvme cli/ioctl commands are marked for failfast.
546 */
547blk_status_t nvmf_fail_nonready_command(struct request *rq)
541{ 548{
542 struct nvme_command *cmd = nvme_req(rq)->cmd; 549 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550 return BLK_STS_RESOURCE;
551 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552 return BLK_STS_IOERR;
553}
554EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
543 555
544 if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected)) 556bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
545 return BLK_STS_OK; 557 bool queue_live)
558{
559 struct nvme_command *cmd = nvme_req(rq)->cmd;
546 560
547 switch (ctrl->state) { 561 switch (ctrl->state) {
548 case NVME_CTRL_NEW: 562 case NVME_CTRL_NEW:
549 case NVME_CTRL_CONNECTING: 563 case NVME_CTRL_CONNECTING:
550 case NVME_CTRL_DELETING: 564 case NVME_CTRL_DELETING:
551 /* 565 /*
552 * This is the case of starting a new or deleting an association
553 * but connectivity was lost before it was fully created or torn
554 * down. We need to error the commands used to initialize the
555 * controller so the reconnect can go into a retry attempt. The
556 * commands should all be marked REQ_FAILFAST_DRIVER, which will
557 * hit the reject path below. Anything else will be queued while
558 * the state settles.
559 */
560 if (!is_connected)
561 break;
562
563 /*
564 * If queue is live, allow only commands that are internally 566 * If queue is live, allow only commands that are internally
565 * generated pass through. These are commands on the admin 567 * generated pass through. These are commands on the admin
566 * queue to initialize the controller. This will reject any 568 * queue to initialize the controller. This will reject any
567 * ioctl admin cmds received while initializing. 569 * ioctl admin cmds received while initializing.
568 */ 570 */
569 if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) 571 if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
570 return BLK_STS_OK; 572 return true;
571 573
572 /* 574 /*
573 * If the queue is not live, allow only a connect command. This 575 * If the queue is not live, allow only a connect command. This
@@ -577,26 +579,13 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
577 if (!queue_live && blk_rq_is_passthrough(rq) && 579 if (!queue_live && blk_rq_is_passthrough(rq) &&
578 cmd->common.opcode == nvme_fabrics_command && 580 cmd->common.opcode == nvme_fabrics_command &&
579 cmd->fabrics.fctype == nvme_fabrics_type_connect) 581 cmd->fabrics.fctype == nvme_fabrics_type_connect)
580 return BLK_STS_OK; 582 return true;
581 break; 583 return false;
582 default: 584 default:
583 break; 585 return false;
584 } 586 }
585
586 /*
587 * Any other new io is something we're not in a state to send to the
588 * device. Default action is to busy it and retry it after the
589 * controller state is recovered. However, anything marked for failfast
590 * or nvme multipath is immediately failed. Note: commands used to
591 * initialize the controller will be marked for failfast.
592 * Note: nvme cli/ioctl commands are marked for failfast.
593 */
594 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
595 return BLK_STS_RESOURCE;
596 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
597 return BLK_STS_IOERR;
598} 587}
599EXPORT_SYMBOL_GPL(nvmf_check_if_ready); 588EXPORT_SYMBOL_GPL(__nvmf_check_ready);
600 589
601static const match_table_t opt_tokens = { 590static const match_table_t opt_tokens = {
602 { NVMF_OPT_TRANSPORT, "transport=%s" }, 591 { NVMF_OPT_TRANSPORT, "transport=%s" },
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 7491a0bbf711..2ea949a3868c 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,16 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162void nvmf_free_options(struct nvmf_ctrl_options *opts); 162void nvmf_free_options(struct nvmf_ctrl_options *opts);
163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, 165blk_status_t nvmf_fail_nonready_command(struct request *rq);
166 struct request *rq, bool queue_live, bool is_connected); 166bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167 bool queue_live);
168
169static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
170 bool queue_live)
171{
172 if (likely(ctrl->state == NVME_CTRL_LIVE))
173 return true;
174 return __nvmf_check_ready(ctrl, rq, queue_live);
175}
167 176
168#endif /* _NVME_FABRICS_H */ 177#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 318e827e74ec..b528a2f5826c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2266,14 +2266,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2266 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2266 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2267 struct nvme_command *sqe = &cmdiu->sqe; 2267 struct nvme_command *sqe = &cmdiu->sqe;
2268 enum nvmefc_fcp_datadir io_dir; 2268 enum nvmefc_fcp_datadir io_dir;
2269 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2269 u32 data_len; 2270 u32 data_len;
2270 blk_status_t ret; 2271 blk_status_t ret;
2271 2272
2272 ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq, 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2273 test_bit(NVME_FC_Q_LIVE, &queue->flags), 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2274 ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE); 2275 return nvmf_fail_nonready_command(rq);
2275 if (unlikely(ret))
2276 return ret;
2277 2276
2278 ret = nvme_setup_cmd(ns, rq, sqe); 2277 ret = nvme_setup_cmd(ns, rq, sqe);
2279 if (ret) 2278 if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7cd4199db225..c9424da0d23e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1630,15 +1630,14 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1630 struct nvme_rdma_qe *sqe = &req->sqe; 1630 struct nvme_rdma_qe *sqe = &req->sqe;
1631 struct nvme_command *c = sqe->data; 1631 struct nvme_command *c = sqe->data;
1632 struct ib_device *dev; 1632 struct ib_device *dev;
1633 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
1633 blk_status_t ret; 1634 blk_status_t ret;
1634 int err; 1635 int err;
1635 1636
1636 WARN_ON_ONCE(rq->tag < 0); 1637 WARN_ON_ONCE(rq->tag < 0);
1637 1638
1638 ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq, 1639 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1639 test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true); 1640 return nvmf_fail_nonready_command(rq);
1640 if (unlikely(ret))
1641 return ret;
1642 1641
1643 dev = queue->device->dev; 1642 dev = queue->device->dev;
1644 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1643 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1304ec3a7ede..d8d91f04bd7e 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -158,12 +158,11 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
158 struct nvme_loop_queue *queue = hctx->driver_data; 158 struct nvme_loop_queue *queue = hctx->driver_data;
159 struct request *req = bd->rq; 159 struct request *req = bd->rq;
160 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 160 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
161 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
161 blk_status_t ret; 162 blk_status_t ret;
162 163
163 ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req, 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
164 test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true); 165 return nvmf_fail_nonready_command(req);
165 if (unlikely(ret))
166 return ret;
167 166
168 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
169 if (ret) 168 if (ret)