aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/nvme/host/core.c45
-rw-r--r--drivers/nvme/host/fabrics.h9
-rw-r--r--drivers/nvme/host/fc.c157
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c39
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/io-cmd.c7
7 files changed, 106 insertions, 170 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f431c32774f3..0fe7ea35c221 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
120 int ret; 120 int ret;
121 121
122 ret = nvme_reset_ctrl(ctrl); 122 ret = nvme_reset_ctrl(ctrl);
123 if (!ret) 123 if (!ret) {
124 flush_work(&ctrl->reset_work); 124 flush_work(&ctrl->reset_work);
125 if (ctrl->state != NVME_CTRL_LIVE)
126 ret = -ENETRESET;
127 }
128
125 return ret; 129 return ret;
126} 130}
127EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 131EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
265 switch (new_state) { 269 switch (new_state) {
266 case NVME_CTRL_ADMIN_ONLY: 270 case NVME_CTRL_ADMIN_ONLY:
267 switch (old_state) { 271 switch (old_state) {
268 case NVME_CTRL_RECONNECTING: 272 case NVME_CTRL_CONNECTING:
269 changed = true; 273 changed = true;
270 /* FALLTHRU */ 274 /* FALLTHRU */
271 default: 275 default:
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
276 switch (old_state) { 280 switch (old_state) {
277 case NVME_CTRL_NEW: 281 case NVME_CTRL_NEW:
278 case NVME_CTRL_RESETTING: 282 case NVME_CTRL_RESETTING:
279 case NVME_CTRL_RECONNECTING: 283 case NVME_CTRL_CONNECTING:
280 changed = true; 284 changed = true;
281 /* FALLTHRU */ 285 /* FALLTHRU */
282 default: 286 default:
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
294 break; 298 break;
295 } 299 }
296 break; 300 break;
297 case NVME_CTRL_RECONNECTING: 301 case NVME_CTRL_CONNECTING:
298 switch (old_state) { 302 switch (old_state) {
299 case NVME_CTRL_LIVE: 303 case NVME_CTRL_NEW:
300 case NVME_CTRL_RESETTING: 304 case NVME_CTRL_RESETTING:
301 changed = true; 305 changed = true;
302 /* FALLTHRU */ 306 /* FALLTHRU */
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
309 case NVME_CTRL_LIVE: 313 case NVME_CTRL_LIVE:
310 case NVME_CTRL_ADMIN_ONLY: 314 case NVME_CTRL_ADMIN_ONLY:
311 case NVME_CTRL_RESETTING: 315 case NVME_CTRL_RESETTING:
312 case NVME_CTRL_RECONNECTING: 316 case NVME_CTRL_CONNECTING:
313 changed = true; 317 changed = true;
314 /* FALLTHRU */ 318 /* FALLTHRU */
315 default: 319 default:
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
518 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 522 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
519 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 523 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
520 524
521 range[n].cattr = cpu_to_le32(0); 525 if (n < segments) {
522 range[n].nlb = cpu_to_le32(nlb); 526 range[n].cattr = cpu_to_le32(0);
523 range[n].slba = cpu_to_le64(slba); 527 range[n].nlb = cpu_to_le32(nlb);
528 range[n].slba = cpu_to_le64(slba);
529 }
524 n++; 530 n++;
525 } 531 }
526 532
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
794 800
795static int nvme_keep_alive(struct nvme_ctrl *ctrl) 801static int nvme_keep_alive(struct nvme_ctrl *ctrl)
796{ 802{
797 struct nvme_command c;
798 struct request *rq; 803 struct request *rq;
799 804
800 memset(&c, 0, sizeof(c)); 805 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
801 c.common.opcode = nvme_admin_keep_alive;
802
803 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
804 NVME_QID_ANY); 806 NVME_QID_ANY);
805 if (IS_ERR(rq)) 807 if (IS_ERR(rq))
806 return PTR_ERR(rq); 808 return PTR_ERR(rq);
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
832 return; 834 return;
833 835
834 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 836 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
837 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
838 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
835 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 839 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
836} 840}
837EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 841EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1117 1121
1118static void nvme_update_formats(struct nvme_ctrl *ctrl) 1122static void nvme_update_formats(struct nvme_ctrl *ctrl)
1119{ 1123{
1120 struct nvme_ns *ns; 1124 struct nvme_ns *ns, *next;
1125 LIST_HEAD(rm_list);
1121 1126
1122 mutex_lock(&ctrl->namespaces_mutex); 1127 mutex_lock(&ctrl->namespaces_mutex);
1123 list_for_each_entry(ns, &ctrl->namespaces, list) { 1128 list_for_each_entry(ns, &ctrl->namespaces, list) {
1124 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1129 if (ns->disk && nvme_revalidate_disk(ns->disk)) {
1125 nvme_ns_remove(ns); 1130 list_move_tail(&ns->list, &rm_list);
1131 }
1126 } 1132 }
1127 mutex_unlock(&ctrl->namespaces_mutex); 1133 mutex_unlock(&ctrl->namespaces_mutex);
1134
1135 list_for_each_entry_safe(ns, next, &rm_list, list)
1136 nvme_ns_remove(ns);
1128} 1137}
1129 1138
1130static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1139static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
2687 [NVME_CTRL_LIVE] = "live", 2696 [NVME_CTRL_LIVE] = "live",
2688 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2697 [NVME_CTRL_ADMIN_ONLY] = "only-admin",
2689 [NVME_CTRL_RESETTING] = "resetting", 2698 [NVME_CTRL_RESETTING] = "resetting",
2690 [NVME_CTRL_RECONNECTING]= "reconnecting", 2699 [NVME_CTRL_CONNECTING] = "connecting",
2691 [NVME_CTRL_DELETING] = "deleting", 2700 [NVME_CTRL_DELETING] = "deleting",
2692 [NVME_CTRL_DEAD] = "dead", 2701 [NVME_CTRL_DEAD] = "dead",
2693 }; 2702 };
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 25b19f722f5b..a3145d90c1d2 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
171 cmd->common.opcode != nvme_fabrics_command || 171 cmd->common.opcode != nvme_fabrics_command ||
172 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 172 cmd->fabrics.fctype != nvme_fabrics_type_connect) {
173 /* 173 /*
174 * Reconnecting state means transport disruption, which can take 174 * Connecting state means transport disruption or initial
175 * a long time and even might fail permanently, fail fast to 175 * establishment, which can take a long time and even might
176 * give upper layers a chance to failover. 176 * fail permanently, fail fast to give upper layers a chance
177 * to failover.
177 * Deleting state means that the ctrl will never accept commands 178 * Deleting state means that the ctrl will never accept commands
178 * again, fail it permanently. 179 * again, fail it permanently.
179 */ 180 */
180 if (ctrl->state == NVME_CTRL_RECONNECTING || 181 if (ctrl->state == NVME_CTRL_CONNECTING ||
181 ctrl->state == NVME_CTRL_DELETING) { 182 ctrl->state == NVME_CTRL_DELETING) {
182 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 183 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
183 return BLK_STS_IOERR; 184 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b856d7c919d2..7f51f8414b97 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
55 55
56enum nvme_fcop_flags { 56enum nvme_fcop_flags {
57 FCOP_FLAGS_TERMIO = (1 << 0), 57 FCOP_FLAGS_TERMIO = (1 << 0),
58 FCOP_FLAGS_RELEASED = (1 << 1), 58 FCOP_FLAGS_AEN = (1 << 1),
59 FCOP_FLAGS_COMPLETE = (1 << 2),
60 FCOP_FLAGS_AEN = (1 << 3),
61}; 59};
62 60
63struct nvmefc_ls_req_op { 61struct nvmefc_ls_req_op {
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
532{ 530{
533 switch (ctrl->ctrl.state) { 531 switch (ctrl->ctrl.state) {
534 case NVME_CTRL_NEW: 532 case NVME_CTRL_NEW:
535 case NVME_CTRL_RECONNECTING: 533 case NVME_CTRL_CONNECTING:
536 /* 534 /*
537 * As all reconnects were suppressed, schedule a 535 * As all reconnects were suppressed, schedule a
538 * connect. 536 * connect.
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
777 } 775 }
778 break; 776 break;
779 777
780 case NVME_CTRL_RECONNECTING: 778 case NVME_CTRL_CONNECTING:
781 /* 779 /*
782 * The association has already been terminated and the 780 * The association has already been terminated and the
783 * controller is attempting reconnects. No need to do anything 781 * controller is attempting reconnects. No need to do anything
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1470 1468
1471/* *********************** NVME Ctrl Routines **************************** */ 1469/* *********************** NVME Ctrl Routines **************************** */
1472 1470
1473static void __nvme_fc_final_op_cleanup(struct request *rq);
1474static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1471static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1475 1472
1476static int 1473static int
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1512static int 1509static int
1513__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1510__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1514{ 1511{
1515 int state; 1512 unsigned long flags;
1513 int opstate;
1514
1515 spin_lock_irqsave(&ctrl->lock, flags);
1516 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1517 if (opstate != FCPOP_STATE_ACTIVE)
1518 atomic_set(&op->state, opstate);
1519 else if (ctrl->flags & FCCTRL_TERMIO)
1520 ctrl->iocnt++;
1521 spin_unlock_irqrestore(&ctrl->lock, flags);
1516 1522
1517 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1523 if (opstate != FCPOP_STATE_ACTIVE)
1518 if (state != FCPOP_STATE_ACTIVE) {
1519 atomic_set(&op->state, state);
1520 return -ECANCELED; 1524 return -ECANCELED;
1521 }
1522 1525
1523 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1526 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1524 &ctrl->rport->remoteport, 1527 &ctrl->rport->remoteport,
@@ -1532,60 +1535,26 @@ static void
1532nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1535nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1533{ 1536{
1534 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1537 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1535 unsigned long flags; 1538 int i;
1536 int i, ret;
1537
1538 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1539 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1540 continue;
1541
1542 spin_lock_irqsave(&ctrl->lock, flags);
1543 if (ctrl->flags & FCCTRL_TERMIO) {
1544 ctrl->iocnt++;
1545 aen_op->flags |= FCOP_FLAGS_TERMIO;
1546 }
1547 spin_unlock_irqrestore(&ctrl->lock, flags);
1548
1549 ret = __nvme_fc_abort_op(ctrl, aen_op);
1550 if (ret) {
1551 /*
1552 * if __nvme_fc_abort_op failed the io wasn't
1553 * active. Thus this call path is running in
1554 * parallel to the io complete. Treat as non-error.
1555 */
1556 1539
1557 /* back out the flags/counters */ 1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1558 spin_lock_irqsave(&ctrl->lock, flags); 1541 __nvme_fc_abort_op(ctrl, aen_op);
1559 if (ctrl->flags & FCCTRL_TERMIO)
1560 ctrl->iocnt--;
1561 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1562 spin_unlock_irqrestore(&ctrl->lock, flags);
1563 return;
1564 }
1565 }
1566} 1542}
1567 1543
1568static inline int 1544static inline void
1569__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1545__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1570 struct nvme_fc_fcp_op *op) 1546 struct nvme_fc_fcp_op *op, int opstate)
1571{ 1547{
1572 unsigned long flags; 1548 unsigned long flags;
1573 bool complete_rq = false;
1574 1549
1575 spin_lock_irqsave(&ctrl->lock, flags); 1550 if (opstate == FCPOP_STATE_ABORTED) {
1576 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1551 spin_lock_irqsave(&ctrl->lock, flags);
1577 if (ctrl->flags & FCCTRL_TERMIO) { 1552 if (ctrl->flags & FCCTRL_TERMIO) {
1578 if (!--ctrl->iocnt) 1553 if (!--ctrl->iocnt)
1579 wake_up(&ctrl->ioabort_wait); 1554 wake_up(&ctrl->ioabort_wait);
1580 } 1555 }
1556 spin_unlock_irqrestore(&ctrl->lock, flags);
1581 } 1557 }
1582 if (op->flags & FCOP_FLAGS_RELEASED)
1583 complete_rq = true;
1584 else
1585 op->flags |= FCOP_FLAGS_COMPLETE;
1586 spin_unlock_irqrestore(&ctrl->lock, flags);
1587
1588 return complete_rq;
1589} 1558}
1590 1559
1591static void 1560static void
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1601 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1570 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1602 union nvme_result result; 1571 union nvme_result result;
1603 bool terminate_assoc = true; 1572 bool terminate_assoc = true;
1573 int opstate;
1604 1574
1605 /* 1575 /*
1606 * WARNING: 1576 * WARNING:
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1639 * association to be terminated. 1609 * association to be terminated.
1640 */ 1610 */
1641 1611
1612 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1613
1642 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1614 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1643 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1615 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1644 1616
1645 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || 1617 if (opstate == FCPOP_STATE_ABORTED)
1646 op->flags & FCOP_FLAGS_TERMIO)
1647 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1618 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1648 else if (freq->status) 1619 else if (freq->status)
1649 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1620 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1708done: 1679done:
1709 if (op->flags & FCOP_FLAGS_AEN) { 1680 if (op->flags & FCOP_FLAGS_AEN) {
1710 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1681 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1711 __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1682 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1712 atomic_set(&op->state, FCPOP_STATE_IDLE); 1683 atomic_set(&op->state, FCPOP_STATE_IDLE);
1713 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1684 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1714 nvme_fc_ctrl_put(ctrl); 1685 nvme_fc_ctrl_put(ctrl);
@@ -1722,13 +1693,11 @@ done:
1722 if (status && 1693 if (status &&
1723 (blk_queue_dying(rq->q) || 1694 (blk_queue_dying(rq->q) ||
1724 ctrl->ctrl.state == NVME_CTRL_NEW || 1695 ctrl->ctrl.state == NVME_CTRL_NEW ||
1725 ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) 1696 ctrl->ctrl.state == NVME_CTRL_CONNECTING))
1726 status |= cpu_to_le16(NVME_SC_DNR << 1); 1697 status |= cpu_to_le16(NVME_SC_DNR << 1);
1727 1698
1728 if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) 1699 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1729 __nvme_fc_final_op_cleanup(rq); 1700 nvme_end_request(rq, status, result);
1730 else
1731 nvme_end_request(rq, status, result);
1732 1701
1733check_error: 1702check_error:
1734 if (terminate_assoc) 1703 if (terminate_assoc)
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2415} 2384}
2416 2385
2417static void 2386static void
2418__nvme_fc_final_op_cleanup(struct request *rq) 2387nvme_fc_complete_rq(struct request *rq)
2419{ 2388{
2420 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2389 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2421 struct nvme_fc_ctrl *ctrl = op->ctrl; 2390 struct nvme_fc_ctrl *ctrl = op->ctrl;
2422 2391
2423 atomic_set(&op->state, FCPOP_STATE_IDLE); 2392 atomic_set(&op->state, FCPOP_STATE_IDLE);
2424 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2425 FCOP_FLAGS_COMPLETE);
2426 2393
2427 nvme_fc_unmap_data(ctrl, rq, op); 2394 nvme_fc_unmap_data(ctrl, rq, op);
2428 nvme_complete_rq(rq); 2395 nvme_complete_rq(rq);
2429 nvme_fc_ctrl_put(ctrl); 2396 nvme_fc_ctrl_put(ctrl);
2430
2431}
2432
2433static void
2434nvme_fc_complete_rq(struct request *rq)
2435{
2436 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2437 struct nvme_fc_ctrl *ctrl = op->ctrl;
2438 unsigned long flags;
2439 bool completed = false;
2440
2441 /*
2442 * the core layer, on controller resets after calling
2443 * nvme_shutdown_ctrl(), calls complete_rq without our
2444 * calling blk_mq_complete_request(), thus there may still
2445 * be live i/o outstanding with the LLDD. Means transport has
2446 * to track complete calls vs fcpio_done calls to know what
2447 * path to take on completes and dones.
2448 */
2449 spin_lock_irqsave(&ctrl->lock, flags);
2450 if (op->flags & FCOP_FLAGS_COMPLETE)
2451 completed = true;
2452 else
2453 op->flags |= FCOP_FLAGS_RELEASED;
2454 spin_unlock_irqrestore(&ctrl->lock, flags);
2455
2456 if (completed)
2457 __nvme_fc_final_op_cleanup(rq);
2458} 2397}
2459 2398
2460/* 2399/*
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2476 struct nvme_ctrl *nctrl = data; 2415 struct nvme_ctrl *nctrl = data;
2477 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2416 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2478 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2417 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2479 unsigned long flags;
2480 int status;
2481 2418
2482 if (!blk_mq_request_started(req)) 2419 if (!blk_mq_request_started(req))
2483 return; 2420 return;
2484 2421
2485 spin_lock_irqsave(&ctrl->lock, flags); 2422 __nvme_fc_abort_op(ctrl, op);
2486 if (ctrl->flags & FCCTRL_TERMIO) {
2487 ctrl->iocnt++;
2488 op->flags |= FCOP_FLAGS_TERMIO;
2489 }
2490 spin_unlock_irqrestore(&ctrl->lock, flags);
2491
2492 status = __nvme_fc_abort_op(ctrl, op);
2493 if (status) {
2494 /*
2495 * if __nvme_fc_abort_op failed the io wasn't
2496 * active. Thus this call path is running in
2497 * parallel to the io complete. Treat as non-error.
2498 */
2499
2500 /* back out the flags/counters */
2501 spin_lock_irqsave(&ctrl->lock, flags);
2502 if (ctrl->flags & FCCTRL_TERMIO)
2503 ctrl->iocnt--;
2504 op->flags &= ~FCOP_FLAGS_TERMIO;
2505 spin_unlock_irqrestore(&ctrl->lock, flags);
2506 return;
2507 }
2508} 2423}
2509 2424
2510 2425
@@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2943 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2858 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2944 bool recon = true; 2859 bool recon = true;
2945 2860
2946 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) 2861 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2947 return; 2862 return;
2948 2863
2949 if (portptr->port_state == FC_OBJSTATE_ONLINE) 2864 if (portptr->port_state == FC_OBJSTATE_ONLINE)
@@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2991 /* will block will waiting for io to terminate */ 2906 /* will block will waiting for io to terminate */
2992 nvme_fc_delete_association(ctrl); 2907 nvme_fc_delete_association(ctrl);
2993 2908
2994 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 2909 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2995 dev_err(ctrl->ctrl.device, 2910 dev_err(ctrl->ctrl.device,
2996 "NVME-FC{%d}: error_recovery: Couldn't change state " 2911 "NVME-FC{%d}: error_recovery: Couldn't change state "
2997 "to RECONNECTING\n", ctrl->cnum); 2912 "to CONNECTING\n", ctrl->cnum);
2998 return; 2913 return;
2999 } 2914 }
3000 2915
@@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3195 * transport errors (frame drop, LS failure) inherently must kill 3110 * transport errors (frame drop, LS failure) inherently must kill
3196 * the association. The transport is coded so that any command used 3111 * the association. The transport is coded so that any command used
3197 * to create the association (prior to a LIVE state transition 3112 * to create the association (prior to a LIVE state transition
3198 * while NEW or RECONNECTING) will fail if it completes in error or 3113 * while NEW or CONNECTING) will fail if it completes in error or
3199 * times out. 3114 * times out.
3200 * 3115 *
3201 * As such: as the connect request was mostly likely due to a 3116 * As such: as the connect request was mostly likely due to a
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8e4550fa08f8..0521e4707d1c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -123,7 +123,7 @@ enum nvme_ctrl_state {
123 NVME_CTRL_LIVE, 123 NVME_CTRL_LIVE,
124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
125 NVME_CTRL_RESETTING, 125 NVME_CTRL_RESETTING,
126 NVME_CTRL_RECONNECTING, 126 NVME_CTRL_CONNECTING,
127 NVME_CTRL_DELETING, 127 NVME_CTRL_DELETING,
128 NVME_CTRL_DEAD, 128 NVME_CTRL_DEAD,
129}; 129};
@@ -183,6 +183,7 @@ struct nvme_ctrl {
183 struct work_struct scan_work; 183 struct work_struct scan_work;
184 struct work_struct async_event_work; 184 struct work_struct async_event_work;
185 struct delayed_work ka_work; 185 struct delayed_work ka_work;
186 struct nvme_command ka_cmd;
186 struct work_struct fw_act_work; 187 struct work_struct fw_act_work;
187 188
188 /* Power saving configuration */ 189 /* Power saving configuration */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6fe7af00a1f4..73036d2fbbd5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1142 switch (dev->ctrl.state) { 1142 switch (dev->ctrl.state) {
1143 case NVME_CTRL_RESETTING: 1143 case NVME_CTRL_RESETTING:
1144 case NVME_CTRL_RECONNECTING: 1144 case NVME_CTRL_CONNECTING:
1145 return false; 1145 return false;
1146 default: 1146 default:
1147 break; 1147 break;
@@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1215 * cancellation error. All outstanding requests are completed on 1215 * cancellation error. All outstanding requests are completed on
1216 * shutdown, so we return BLK_EH_HANDLED. 1216 * shutdown, so we return BLK_EH_HANDLED.
1217 */ 1217 */
1218 if (dev->ctrl.state == NVME_CTRL_RESETTING) { 1218 switch (dev->ctrl.state) {
1219 case NVME_CTRL_CONNECTING:
1220 case NVME_CTRL_RESETTING:
1219 dev_warn(dev->ctrl.device, 1221 dev_warn(dev->ctrl.device,
1220 "I/O %d QID %d timeout, disable controller\n", 1222 "I/O %d QID %d timeout, disable controller\n",
1221 req->tag, nvmeq->qid); 1223 req->tag, nvmeq->qid);
1222 nvme_dev_disable(dev, false); 1224 nvme_dev_disable(dev, false);
1223 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1225 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1224 return BLK_EH_HANDLED; 1226 return BLK_EH_HANDLED;
1227 default:
1228 break;
1225 } 1229 }
1226 1230
1227 /* 1231 /*
@@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1364static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1368static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1365 int qid, int depth) 1369 int qid, int depth)
1366{ 1370{
1367 if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1371 /* CMB SQEs will be mapped before creation */
1368 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1372 if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
1369 dev->ctrl.page_size); 1373 return 0;
1370 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1371 nvmeq->sq_cmds_io = dev->cmb + offset;
1372 } else {
1373 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1374 &nvmeq->sq_dma_addr, GFP_KERNEL);
1375 if (!nvmeq->sq_cmds)
1376 return -ENOMEM;
1377 }
1378 1374
1375 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1376 &nvmeq->sq_dma_addr, GFP_KERNEL);
1377 if (!nvmeq->sq_cmds)
1378 return -ENOMEM;
1379 return 0; 1379 return 0;
1380} 1380}
1381 1381
@@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1449 struct nvme_dev *dev = nvmeq->dev; 1449 struct nvme_dev *dev = nvmeq->dev;
1450 int result; 1450 int result;
1451 1451
1452 if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1453 unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
1454 dev->ctrl.page_size);
1455 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1456 nvmeq->sq_cmds_io = dev->cmb + offset;
1457 }
1458
1452 nvmeq->cq_vector = qid - 1; 1459 nvmeq->cq_vector = qid - 1;
1453 result = adapter_alloc_cq(dev, qid, nvmeq); 1460 result = adapter_alloc_cq(dev, qid, nvmeq);
1454 if (result < 0) 1461 if (result < 0)
@@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
2288 nvme_dev_disable(dev, false); 2295 nvme_dev_disable(dev, false);
2289 2296
2290 /* 2297 /*
2291 * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the 2298 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2292 * initializing procedure here. 2299 * initializing procedure here.
2293 */ 2300 */
2294 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { 2301 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2295 dev_warn(dev->ctrl.device, 2302 dev_warn(dev->ctrl.device,
2296 "failed to mark controller RECONNECTING\n"); 2303 "failed to mark controller CONNECTING\n");
2297 goto out; 2304 goto out;
2298 } 2305 }
2299 2306
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2bc059f7d73c..3a51ed50eff2 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -887,7 +887,7 @@ free_ctrl:
887static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 887static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
888{ 888{
889 /* If we are resetting/deleting then do nothing */ 889 /* If we are resetting/deleting then do nothing */
890 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 890 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
892 ctrl->ctrl.state == NVME_CTRL_LIVE); 892 ctrl->ctrl.state == NVME_CTRL_LIVE);
893 return; 893 return;
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
974 nvme_start_queues(&ctrl->ctrl); 974 nvme_start_queues(&ctrl->ctrl);
975 975
976 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 976 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
977 /* state change failure should never happen */ 977 /* state change failure should never happen */
978 WARN_ON_ONCE(1); 978 WARN_ON_ONCE(1);
979 return; 979 return;
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1756 nvme_stop_ctrl(&ctrl->ctrl); 1756 nvme_stop_ctrl(&ctrl->ctrl);
1757 nvme_rdma_shutdown_ctrl(ctrl, false); 1757 nvme_rdma_shutdown_ctrl(ctrl, false);
1758 1758
1759 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1759 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1760 /* state change failure should never happen */ 1760 /* state change failure should never happen */
1761 WARN_ON_ONCE(1); 1761 WARN_ON_ONCE(1);
1762 return; 1762 return;
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1784 return; 1784 return;
1785 1785
1786out_fail: 1786out_fail:
1787 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1787 ++ctrl->ctrl.nr_reconnects;
1788 nvme_remove_namespaces(&ctrl->ctrl); 1788 nvme_rdma_reconnect_or_remove(ctrl);
1789 nvme_rdma_shutdown_ctrl(ctrl, true);
1790 nvme_uninit_ctrl(&ctrl->ctrl);
1791 nvme_put_ctrl(&ctrl->ctrl);
1792} 1789}
1793 1790
1794static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1791static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1942 if (!ctrl->queues) 1939 if (!ctrl->queues)
1943 goto out_uninit_ctrl; 1940 goto out_uninit_ctrl;
1944 1941
1942 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
1943 WARN_ON_ONCE(!changed);
1944
1945 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1945 ret = nvme_rdma_configure_admin_queue(ctrl, true);
1946 if (ret) 1946 if (ret)
1947 goto out_kfree_queues; 1947 goto out_kfree_queues;
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0a4372a016f2..28bbdff4a88b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
105static u16 nvmet_discard_range(struct nvmet_ns *ns, 105static u16 nvmet_discard_range(struct nvmet_ns *ns,
106 struct nvme_dsm_range *range, struct bio **bio) 106 struct nvme_dsm_range *range, struct bio **bio)
107{ 107{
108 if (__blkdev_issue_discard(ns->bdev, 108 int ret;
109
110 ret = __blkdev_issue_discard(ns->bdev,
109 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 111 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
110 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 112 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
111 GFP_KERNEL, 0, bio)) 113 GFP_KERNEL, 0, bio);
114 if (ret && ret != -EOPNOTSUPP)
112 return NVME_SC_INTERNAL | NVME_SC_DNR; 115 return NVME_SC_INTERNAL | NVME_SC_DNR;
113 return 0; 116 return 0;
114} 117}