diff options
author | Jens Axboe <axboe@fb.com> | 2017-04-27 13:33:01 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-27 13:33:01 -0400 |
commit | b06e13c38dbd5a03e945ce711f6909c91888f507 (patch) | |
tree | 5d4514f49463e014d6b40c646796a88ba13d9c09 | |
parent | c35e30b4727b390ce7a6dd7ead31335320c2b83e (diff) | |
parent | 7569b90a228ed7dfdc1f92f2c98d7a1b041f22eb (diff) |
Merge branch 'nvme-4.12' of git://git.infradead.org/nvme into for-4.12/post-merge
Christoph writes:
"A couple more updates for 4.12. The biggest pile is fc and lpfc
updates from James, but there are various small fixes and cleanups as
well."
Fixes up a few merge issues, and also a warning in
lpfc_nvmet_rcv_unsol_abort() if CONFIG_NVME_TARGET_FC isn't enabled.
Signed-off-by: Jens Axboe <axboe@fb.com>
27 files changed, 1576 insertions, 719 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index ecc1048de837..4976db56e351 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/parser.h> | 19 | #include <linux/parser.h> |
20 | #include <uapi/scsi/fc/fc_fs.h> | 20 | #include <uapi/scsi/fc/fc_fs.h> |
21 | #include <uapi/scsi/fc/fc_els.h> | 21 | #include <uapi/scsi/fc/fc_els.h> |
22 | #include <linux/delay.h> | ||
22 | 23 | ||
23 | #include "nvme.h" | 24 | #include "nvme.h" |
24 | #include "fabrics.h" | 25 | #include "fabrics.h" |
@@ -44,6 +45,8 @@ enum nvme_fc_queue_flags { | |||
44 | 45 | ||
45 | #define NVMEFC_QUEUE_DELAY 3 /* ms units */ | 46 | #define NVMEFC_QUEUE_DELAY 3 /* ms units */ |
46 | 47 | ||
48 | #define NVME_FC_MAX_CONNECT_ATTEMPTS 1 | ||
49 | |||
47 | struct nvme_fc_queue { | 50 | struct nvme_fc_queue { |
48 | struct nvme_fc_ctrl *ctrl; | 51 | struct nvme_fc_ctrl *ctrl; |
49 | struct device *dev; | 52 | struct device *dev; |
@@ -65,6 +68,7 @@ enum nvme_fcop_flags { | |||
65 | FCOP_FLAGS_TERMIO = (1 << 0), | 68 | FCOP_FLAGS_TERMIO = (1 << 0), |
66 | FCOP_FLAGS_RELEASED = (1 << 1), | 69 | FCOP_FLAGS_RELEASED = (1 << 1), |
67 | FCOP_FLAGS_COMPLETE = (1 << 2), | 70 | FCOP_FLAGS_COMPLETE = (1 << 2), |
71 | FCOP_FLAGS_AEN = (1 << 3), | ||
68 | }; | 72 | }; |
69 | 73 | ||
70 | struct nvmefc_ls_req_op { | 74 | struct nvmefc_ls_req_op { |
@@ -86,6 +90,7 @@ enum nvme_fcpop_state { | |||
86 | FCPOP_STATE_IDLE = 1, | 90 | FCPOP_STATE_IDLE = 1, |
87 | FCPOP_STATE_ACTIVE = 2, | 91 | FCPOP_STATE_ACTIVE = 2, |
88 | FCPOP_STATE_ABORTED = 3, | 92 | FCPOP_STATE_ABORTED = 3, |
93 | FCPOP_STATE_COMPLETE = 4, | ||
89 | }; | 94 | }; |
90 | 95 | ||
91 | struct nvme_fc_fcp_op { | 96 | struct nvme_fc_fcp_op { |
@@ -104,6 +109,7 @@ struct nvme_fc_fcp_op { | |||
104 | struct request *rq; | 109 | struct request *rq; |
105 | 110 | ||
106 | atomic_t state; | 111 | atomic_t state; |
112 | u32 flags; | ||
107 | u32 rqno; | 113 | u32 rqno; |
108 | u32 nents; | 114 | u32 nents; |
109 | 115 | ||
@@ -134,19 +140,17 @@ struct nvme_fc_rport { | |||
134 | struct kref ref; | 140 | struct kref ref; |
135 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | 141 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ |
136 | 142 | ||
137 | enum nvme_fcctrl_state { | 143 | enum nvme_fcctrl_flags { |
138 | FCCTRL_INIT = 0, | 144 | FCCTRL_TERMIO = (1 << 0), |
139 | FCCTRL_ACTIVE = 1, | ||
140 | }; | 145 | }; |
141 | 146 | ||
142 | struct nvme_fc_ctrl { | 147 | struct nvme_fc_ctrl { |
143 | spinlock_t lock; | 148 | spinlock_t lock; |
144 | struct nvme_fc_queue *queues; | 149 | struct nvme_fc_queue *queues; |
145 | u32 queue_count; | ||
146 | |||
147 | struct device *dev; | 150 | struct device *dev; |
148 | struct nvme_fc_lport *lport; | 151 | struct nvme_fc_lport *lport; |
149 | struct nvme_fc_rport *rport; | 152 | struct nvme_fc_rport *rport; |
153 | u32 queue_count; | ||
150 | u32 cnum; | 154 | u32 cnum; |
151 | 155 | ||
152 | u64 association_id; | 156 | u64 association_id; |
@@ -159,8 +163,14 @@ struct nvme_fc_ctrl { | |||
159 | struct blk_mq_tag_set tag_set; | 163 | struct blk_mq_tag_set tag_set; |
160 | 164 | ||
161 | struct work_struct delete_work; | 165 | struct work_struct delete_work; |
166 | struct work_struct reset_work; | ||
167 | struct delayed_work connect_work; | ||
168 | int reconnect_delay; | ||
169 | int connect_attempts; | ||
170 | |||
162 | struct kref ref; | 171 | struct kref ref; |
163 | int state; | 172 | u32 flags; |
173 | u32 iocnt; | ||
164 | 174 | ||
165 | struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS]; | 175 | struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS]; |
166 | 176 | ||
@@ -1132,6 +1142,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |||
1132 | 1142 | ||
1133 | /* *********************** NVME Ctrl Routines **************************** */ | 1143 | /* *********************** NVME Ctrl Routines **************************** */ |
1134 | 1144 | ||
1145 | static void __nvme_fc_final_op_cleanup(struct request *rq); | ||
1135 | 1146 | ||
1136 | static int | 1147 | static int |
1137 | nvme_fc_reinit_request(void *data, struct request *rq) | 1148 | nvme_fc_reinit_request(void *data, struct request *rq) |
@@ -1169,21 +1180,84 @@ nvme_fc_exit_request(void *data, struct request *rq, | |||
1169 | return __nvme_fc_exit_request(data, op); | 1180 | return __nvme_fc_exit_request(data, op); |
1170 | } | 1181 | } |
1171 | 1182 | ||
1183 | static int | ||
1184 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | ||
1185 | { | ||
1186 | int state; | ||
1187 | |||
1188 | state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | ||
1189 | if (state != FCPOP_STATE_ACTIVE) { | ||
1190 | atomic_set(&op->state, state); | ||
1191 | return -ECANCELED; | ||
1192 | } | ||
1193 | |||
1194 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | ||
1195 | &ctrl->rport->remoteport, | ||
1196 | op->queue->lldd_handle, | ||
1197 | &op->fcp_req); | ||
1198 | |||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1172 | static void | 1202 | static void |
1173 | nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl) | 1203 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) |
1174 | { | 1204 | { |
1175 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | 1205 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; |
1176 | int i; | 1206 | unsigned long flags; |
1207 | int i, ret; | ||
1177 | 1208 | ||
1178 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | 1209 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { |
1179 | if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT) | 1210 | if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) |
1180 | continue; | 1211 | continue; |
1181 | __nvme_fc_exit_request(ctrl, aen_op); | 1212 | |
1182 | nvme_fc_ctrl_put(ctrl); | 1213 | spin_lock_irqsave(&ctrl->lock, flags); |
1214 | if (ctrl->flags & FCCTRL_TERMIO) { | ||
1215 | ctrl->iocnt++; | ||
1216 | aen_op->flags |= FCOP_FLAGS_TERMIO; | ||
1217 | } | ||
1218 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1219 | |||
1220 | ret = __nvme_fc_abort_op(ctrl, aen_op); | ||
1221 | if (ret) { | ||
1222 | /* | ||
1223 | * if __nvme_fc_abort_op failed the io wasn't | ||
1224 | * active. Thus this call path is running in | ||
1225 | * parallel to the io complete. Treat as non-error. | ||
1226 | */ | ||
1227 | |||
1228 | /* back out the flags/counters */ | ||
1229 | spin_lock_irqsave(&ctrl->lock, flags); | ||
1230 | if (ctrl->flags & FCCTRL_TERMIO) | ||
1231 | ctrl->iocnt--; | ||
1232 | aen_op->flags &= ~FCOP_FLAGS_TERMIO; | ||
1233 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1234 | return; | ||
1235 | } | ||
1236 | } | ||
1237 | } | ||
1238 | |||
1239 | static inline int | ||
1240 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, | ||
1241 | struct nvme_fc_fcp_op *op) | ||
1242 | { | ||
1243 | unsigned long flags; | ||
1244 | bool complete_rq = false; | ||
1245 | |||
1246 | spin_lock_irqsave(&ctrl->lock, flags); | ||
1247 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { | ||
1248 | if (ctrl->flags & FCCTRL_TERMIO) | ||
1249 | ctrl->iocnt--; | ||
1183 | } | 1250 | } |
1251 | if (op->flags & FCOP_FLAGS_RELEASED) | ||
1252 | complete_rq = true; | ||
1253 | else | ||
1254 | op->flags |= FCOP_FLAGS_COMPLETE; | ||
1255 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1256 | |||
1257 | return complete_rq; | ||
1184 | } | 1258 | } |
1185 | 1259 | ||
1186 | void | 1260 | static void |
1187 | nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | 1261 | nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) |
1188 | { | 1262 | { |
1189 | struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); | 1263 | struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); |
@@ -1192,8 +1266,10 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1192 | struct nvme_fc_ctrl *ctrl = op->ctrl; | 1266 | struct nvme_fc_ctrl *ctrl = op->ctrl; |
1193 | struct nvme_fc_queue *queue = op->queue; | 1267 | struct nvme_fc_queue *queue = op->queue; |
1194 | struct nvme_completion *cqe = &op->rsp_iu.cqe; | 1268 | struct nvme_completion *cqe = &op->rsp_iu.cqe; |
1269 | struct nvme_command *sqe = &op->cmd_iu.sqe; | ||
1195 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); | 1270 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
1196 | union nvme_result result; | 1271 | union nvme_result result; |
1272 | bool complete_rq; | ||
1197 | 1273 | ||
1198 | /* | 1274 | /* |
1199 | * WARNING: | 1275 | * WARNING: |
@@ -1274,7 +1350,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1274 | be32_to_cpu(op->rsp_iu.xfrd_len) != | 1350 | be32_to_cpu(op->rsp_iu.xfrd_len) != |
1275 | freq->transferred_length || | 1351 | freq->transferred_length || |
1276 | op->rsp_iu.status_code || | 1352 | op->rsp_iu.status_code || |
1277 | op->rqno != le16_to_cpu(cqe->command_id))) { | 1353 | sqe->common.command_id != cqe->command_id)) { |
1278 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); | 1354 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); |
1279 | goto done; | 1355 | goto done; |
1280 | } | 1356 | } |
@@ -1288,13 +1364,25 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1288 | } | 1364 | } |
1289 | 1365 | ||
1290 | done: | 1366 | done: |
1291 | if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) { | 1367 | if (op->flags & FCOP_FLAGS_AEN) { |
1292 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); | 1368 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
1369 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); | ||
1370 | atomic_set(&op->state, FCPOP_STATE_IDLE); | ||
1371 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | ||
1293 | nvme_fc_ctrl_put(ctrl); | 1372 | nvme_fc_ctrl_put(ctrl); |
1294 | return; | 1373 | return; |
1295 | } | 1374 | } |
1296 | 1375 | ||
1297 | nvme_end_request(rq, status, result); | 1376 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); |
1377 | if (!complete_rq) { | ||
1378 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { | ||
1379 | status = cpu_to_le16(NVME_SC_ABORT_REQ); | ||
1380 | if (blk_queue_dying(rq->q)) | ||
1381 | status |= cpu_to_le16(NVME_SC_DNR); | ||
1382 | } | ||
1383 | nvme_end_request(rq, status, result); | ||
1384 | } else | ||
1385 | __nvme_fc_final_op_cleanup(rq); | ||
1298 | } | 1386 | } |
1299 | 1387 | ||
1300 | static int | 1388 | static int |
@@ -1375,25 +1463,55 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) | |||
1375 | struct nvme_fc_fcp_op *aen_op; | 1463 | struct nvme_fc_fcp_op *aen_op; |
1376 | struct nvme_fc_cmd_iu *cmdiu; | 1464 | struct nvme_fc_cmd_iu *cmdiu; |
1377 | struct nvme_command *sqe; | 1465 | struct nvme_command *sqe; |
1466 | void *private; | ||
1378 | int i, ret; | 1467 | int i, ret; |
1379 | 1468 | ||
1380 | aen_op = ctrl->aen_ops; | 1469 | aen_op = ctrl->aen_ops; |
1381 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | 1470 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { |
1471 | private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, | ||
1472 | GFP_KERNEL); | ||
1473 | if (!private) | ||
1474 | return -ENOMEM; | ||
1475 | |||
1382 | cmdiu = &aen_op->cmd_iu; | 1476 | cmdiu = &aen_op->cmd_iu; |
1383 | sqe = &cmdiu->sqe; | 1477 | sqe = &cmdiu->sqe; |
1384 | ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], | 1478 | ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], |
1385 | aen_op, (struct request *)NULL, | 1479 | aen_op, (struct request *)NULL, |
1386 | (AEN_CMDID_BASE + i)); | 1480 | (AEN_CMDID_BASE + i)); |
1387 | if (ret) | 1481 | if (ret) { |
1482 | kfree(private); | ||
1388 | return ret; | 1483 | return ret; |
1484 | } | ||
1485 | |||
1486 | aen_op->flags = FCOP_FLAGS_AEN; | ||
1487 | aen_op->fcp_req.first_sgl = NULL; /* no sg list */ | ||
1488 | aen_op->fcp_req.private = private; | ||
1389 | 1489 | ||
1390 | memset(sqe, 0, sizeof(*sqe)); | 1490 | memset(sqe, 0, sizeof(*sqe)); |
1391 | sqe->common.opcode = nvme_admin_async_event; | 1491 | sqe->common.opcode = nvme_admin_async_event; |
1492 | /* Note: core layer may overwrite the sqe.command_id value */ | ||
1392 | sqe->common.command_id = AEN_CMDID_BASE + i; | 1493 | sqe->common.command_id = AEN_CMDID_BASE + i; |
1393 | } | 1494 | } |
1394 | return 0; | 1495 | return 0; |
1395 | } | 1496 | } |
1396 | 1497 | ||
1498 | static void | ||
1499 | nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) | ||
1500 | { | ||
1501 | struct nvme_fc_fcp_op *aen_op; | ||
1502 | int i; | ||
1503 | |||
1504 | aen_op = ctrl->aen_ops; | ||
1505 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | ||
1506 | if (!aen_op->fcp_req.private) | ||
1507 | continue; | ||
1508 | |||
1509 | __nvme_fc_exit_request(ctrl, aen_op); | ||
1510 | |||
1511 | kfree(aen_op->fcp_req.private); | ||
1512 | aen_op->fcp_req.private = NULL; | ||
1513 | } | ||
1514 | } | ||
1397 | 1515 | ||
1398 | static inline void | 1516 | static inline void |
1399 | __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, | 1517 | __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, |
@@ -1493,15 +1611,6 @@ __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, | |||
1493 | } | 1611 | } |
1494 | 1612 | ||
1495 | static void | 1613 | static void |
1496 | nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl) | ||
1497 | { | ||
1498 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | ||
1499 | blk_cleanup_queue(ctrl->ctrl.admin_q); | ||
1500 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | ||
1501 | nvme_fc_free_queue(&ctrl->queues[0]); | ||
1502 | } | ||
1503 | |||
1504 | static void | ||
1505 | nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) | 1614 | nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) |
1506 | { | 1615 | { |
1507 | int i; | 1616 | int i; |
@@ -1588,19 +1697,27 @@ nvme_fc_ctrl_free(struct kref *ref) | |||
1588 | container_of(ref, struct nvme_fc_ctrl, ref); | 1697 | container_of(ref, struct nvme_fc_ctrl, ref); |
1589 | unsigned long flags; | 1698 | unsigned long flags; |
1590 | 1699 | ||
1591 | if (ctrl->state != FCCTRL_INIT) { | 1700 | if (ctrl->ctrl.tagset) { |
1592 | /* remove from rport list */ | 1701 | blk_cleanup_queue(ctrl->ctrl.connect_q); |
1593 | spin_lock_irqsave(&ctrl->rport->lock, flags); | 1702 | blk_mq_free_tag_set(&ctrl->tag_set); |
1594 | list_del(&ctrl->ctrl_list); | ||
1595 | spin_unlock_irqrestore(&ctrl->rport->lock, flags); | ||
1596 | } | 1703 | } |
1597 | 1704 | ||
1705 | /* remove from rport list */ | ||
1706 | spin_lock_irqsave(&ctrl->rport->lock, flags); | ||
1707 | list_del(&ctrl->ctrl_list); | ||
1708 | spin_unlock_irqrestore(&ctrl->rport->lock, flags); | ||
1709 | |||
1710 | blk_cleanup_queue(ctrl->ctrl.admin_q); | ||
1711 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | ||
1712 | |||
1713 | kfree(ctrl->queues); | ||
1714 | |||
1598 | put_device(ctrl->dev); | 1715 | put_device(ctrl->dev); |
1599 | nvme_fc_rport_put(ctrl->rport); | 1716 | nvme_fc_rport_put(ctrl->rport); |
1600 | 1717 | ||
1601 | kfree(ctrl->queues); | ||
1602 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); | 1718 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
1603 | nvmf_free_options(ctrl->ctrl.opts); | 1719 | if (ctrl->ctrl.opts) |
1720 | nvmf_free_options(ctrl->ctrl.opts); | ||
1604 | kfree(ctrl); | 1721 | kfree(ctrl); |
1605 | } | 1722 | } |
1606 | 1723 | ||
@@ -1621,57 +1738,38 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) | |||
1621 | * controller. Called after last nvme_put_ctrl() call | 1738 | * controller. Called after last nvme_put_ctrl() call |
1622 | */ | 1739 | */ |
1623 | static void | 1740 | static void |
1624 | nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl) | 1741 | nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) |
1625 | { | 1742 | { |
1626 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | 1743 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
1627 | 1744 | ||
1628 | WARN_ON(nctrl != &ctrl->ctrl); | 1745 | WARN_ON(nctrl != &ctrl->ctrl); |
1629 | 1746 | ||
1630 | /* | ||
1631 | * Tear down the association, which will generate link | ||
1632 | * traffic to terminate connections | ||
1633 | */ | ||
1634 | |||
1635 | if (ctrl->state != FCCTRL_INIT) { | ||
1636 | /* send a Disconnect(association) LS to fc-nvme target */ | ||
1637 | nvme_fc_xmt_disconnect_assoc(ctrl); | ||
1638 | |||
1639 | if (ctrl->ctrl.tagset) { | ||
1640 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
1641 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
1642 | nvme_fc_delete_hw_io_queues(ctrl); | ||
1643 | nvme_fc_free_io_queues(ctrl); | ||
1644 | } | ||
1645 | |||
1646 | nvme_fc_exit_aen_ops(ctrl); | ||
1647 | |||
1648 | nvme_fc_destroy_admin_queue(ctrl); | ||
1649 | } | ||
1650 | |||
1651 | nvme_fc_ctrl_put(ctrl); | 1747 | nvme_fc_ctrl_put(ctrl); |
1652 | } | 1748 | } |
1653 | 1749 | ||
1654 | 1750 | static void | |
1655 | static int | 1751 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) |
1656 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | ||
1657 | { | 1752 | { |
1658 | int state; | 1753 | dev_warn(ctrl->ctrl.device, |
1754 | "NVME-FC{%d}: transport association error detected: %s\n", | ||
1755 | ctrl->cnum, errmsg); | ||
1756 | dev_info(ctrl->ctrl.device, | ||
1757 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); | ||
1659 | 1758 | ||
1660 | state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | 1759 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { |
1661 | if (state != FCPOP_STATE_ACTIVE) { | 1760 | dev_err(ctrl->ctrl.device, |
1662 | atomic_set(&op->state, state); | 1761 | "NVME-FC{%d}: error_recovery: Couldn't change state " |
1663 | return -ECANCELED; /* fail */ | 1762 | "to RECONNECTING\n", ctrl->cnum); |
1763 | return; | ||
1664 | } | 1764 | } |
1665 | 1765 | ||
1666 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | 1766 | if (!queue_work(nvme_fc_wq, &ctrl->reset_work)) |
1667 | &ctrl->rport->remoteport, | 1767 | dev_err(ctrl->ctrl.device, |
1668 | op->queue->lldd_handle, | 1768 | "NVME-FC{%d}: error_recovery: Failed to schedule " |
1669 | &op->fcp_req); | 1769 | "reset work\n", ctrl->cnum); |
1670 | |||
1671 | return 0; | ||
1672 | } | 1770 | } |
1673 | 1771 | ||
1674 | enum blk_eh_timer_return | 1772 | static enum blk_eh_timer_return |
1675 | nvme_fc_timeout(struct request *rq, bool reserved) | 1773 | nvme_fc_timeout(struct request *rq, bool reserved) |
1676 | { | 1774 | { |
1677 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | 1775 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
@@ -1687,11 +1785,13 @@ nvme_fc_timeout(struct request *rq, bool reserved) | |||
1687 | return BLK_EH_HANDLED; | 1785 | return BLK_EH_HANDLED; |
1688 | 1786 | ||
1689 | /* | 1787 | /* |
1690 | * TODO: force a controller reset | 1788 | * we can't individually ABTS an io without affecting the queue, |
1691 | * when that happens, queues will be torn down and outstanding | 1789 | * thus killing the queue, adn thus the association. |
1692 | * ios will be terminated, and the above abort, on a single io | 1790 | * So resolve by performing a controller reset, which will stop |
1693 | * will no longer be needed. | 1791 | * the host/io stack, terminate the association on the link, |
1792 | * and recreate an association on the link. | ||
1694 | */ | 1793 | */ |
1794 | nvme_fc_error_recovery(ctrl, "io timeout error"); | ||
1695 | 1795 | ||
1696 | return BLK_EH_HANDLED; | 1796 | return BLK_EH_HANDLED; |
1697 | } | 1797 | } |
@@ -1785,6 +1885,13 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1785 | u32 csn; | 1885 | u32 csn; |
1786 | int ret; | 1886 | int ret; |
1787 | 1887 | ||
1888 | /* | ||
1889 | * before attempting to send the io, check to see if we believe | ||
1890 | * the target device is present | ||
1891 | */ | ||
1892 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | ||
1893 | return BLK_MQ_RQ_QUEUE_ERROR; | ||
1894 | |||
1788 | if (!nvme_fc_ctrl_get(ctrl)) | 1895 | if (!nvme_fc_ctrl_get(ctrl)) |
1789 | return BLK_MQ_RQ_QUEUE_ERROR; | 1896 | return BLK_MQ_RQ_QUEUE_ERROR; |
1790 | 1897 | ||
@@ -1829,14 +1936,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1829 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); | 1936 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); |
1830 | sqe->rw.dptr.sgl.addr = 0; | 1937 | sqe->rw.dptr.sgl.addr = 0; |
1831 | 1938 | ||
1832 | /* odd that we set the command_id - should come from nvme-fabrics */ | 1939 | if (!(op->flags & FCOP_FLAGS_AEN)) { |
1833 | WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno)); | ||
1834 | |||
1835 | if (op->rq) { /* skipped on aens */ | ||
1836 | ret = nvme_fc_map_data(ctrl, op->rq, op); | 1940 | ret = nvme_fc_map_data(ctrl, op->rq, op); |
1837 | if (ret < 0) { | 1941 | if (ret < 0) { |
1838 | dev_err(queue->ctrl->ctrl.device, | ||
1839 | "Failed to map data (%d)\n", ret); | ||
1840 | nvme_cleanup_cmd(op->rq); | 1942 | nvme_cleanup_cmd(op->rq); |
1841 | nvme_fc_ctrl_put(ctrl); | 1943 | nvme_fc_ctrl_put(ctrl); |
1842 | return (ret == -ENOMEM || ret == -EAGAIN) ? | 1944 | return (ret == -ENOMEM || ret == -EAGAIN) ? |
@@ -1849,7 +1951,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1849 | 1951 | ||
1850 | atomic_set(&op->state, FCPOP_STATE_ACTIVE); | 1952 | atomic_set(&op->state, FCPOP_STATE_ACTIVE); |
1851 | 1953 | ||
1852 | if (op->rq) | 1954 | if (!(op->flags & FCOP_FLAGS_AEN)) |
1853 | blk_mq_start_request(op->rq); | 1955 | blk_mq_start_request(op->rq); |
1854 | 1956 | ||
1855 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, | 1957 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, |
@@ -1857,9 +1959,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1857 | queue->lldd_handle, &op->fcp_req); | 1959 | queue->lldd_handle, &op->fcp_req); |
1858 | 1960 | ||
1859 | if (ret) { | 1961 | if (ret) { |
1860 | dev_err(ctrl->dev, | ||
1861 | "Send nvme command failed - lldd returned %d.\n", ret); | ||
1862 | |||
1863 | if (op->rq) { /* normal request */ | 1962 | if (op->rq) { /* normal request */ |
1864 | nvme_fc_unmap_data(ctrl, op->rq, op); | 1963 | nvme_fc_unmap_data(ctrl, op->rq, op); |
1865 | nvme_cleanup_cmd(op->rq); | 1964 | nvme_cleanup_cmd(op->rq); |
@@ -1929,12 +2028,8 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) | |||
1929 | struct nvme_fc_fcp_op *op; | 2028 | struct nvme_fc_fcp_op *op; |
1930 | 2029 | ||
1931 | req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag); | 2030 | req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag); |
1932 | if (!req) { | 2031 | if (!req) |
1933 | dev_err(queue->ctrl->ctrl.device, | ||
1934 | "tag 0x%x on QNum %#x not found\n", | ||
1935 | tag, queue->qnum); | ||
1936 | return 0; | 2032 | return 0; |
1937 | } | ||
1938 | 2033 | ||
1939 | op = blk_mq_rq_to_pdu(req); | 2034 | op = blk_mq_rq_to_pdu(req); |
1940 | 2035 | ||
@@ -1951,11 +2046,21 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) | |||
1951 | { | 2046 | { |
1952 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); | 2047 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); |
1953 | struct nvme_fc_fcp_op *aen_op; | 2048 | struct nvme_fc_fcp_op *aen_op; |
2049 | unsigned long flags; | ||
2050 | bool terminating = false; | ||
1954 | int ret; | 2051 | int ret; |
1955 | 2052 | ||
1956 | if (aer_idx > NVME_FC_NR_AEN_COMMANDS) | 2053 | if (aer_idx > NVME_FC_NR_AEN_COMMANDS) |
1957 | return; | 2054 | return; |
1958 | 2055 | ||
2056 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2057 | if (ctrl->flags & FCCTRL_TERMIO) | ||
2058 | terminating = true; | ||
2059 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2060 | |||
2061 | if (terminating) | ||
2062 | return; | ||
2063 | |||
1959 | aen_op = &ctrl->aen_ops[aer_idx]; | 2064 | aen_op = &ctrl->aen_ops[aer_idx]; |
1960 | 2065 | ||
1961 | ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, | 2066 | ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, |
@@ -1966,13 +2071,14 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) | |||
1966 | } | 2071 | } |
1967 | 2072 | ||
1968 | static void | 2073 | static void |
1969 | nvme_fc_complete_rq(struct request *rq) | 2074 | __nvme_fc_final_op_cleanup(struct request *rq) |
1970 | { | 2075 | { |
1971 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | 2076 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
1972 | struct nvme_fc_ctrl *ctrl = op->ctrl; | 2077 | struct nvme_fc_ctrl *ctrl = op->ctrl; |
1973 | int state; | ||
1974 | 2078 | ||
1975 | state = atomic_xchg(&op->state, FCPOP_STATE_IDLE); | 2079 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
2080 | op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | | ||
2081 | FCOP_FLAGS_COMPLETE); | ||
1976 | 2082 | ||
1977 | nvme_cleanup_cmd(rq); | 2083 | nvme_cleanup_cmd(rq); |
1978 | nvme_fc_unmap_data(ctrl, rq, op); | 2084 | nvme_fc_unmap_data(ctrl, rq, op); |
@@ -1981,6 +2087,84 @@ nvme_fc_complete_rq(struct request *rq) | |||
1981 | 2087 | ||
1982 | } | 2088 | } |
1983 | 2089 | ||
2090 | static void | ||
2091 | nvme_fc_complete_rq(struct request *rq) | ||
2092 | { | ||
2093 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | ||
2094 | struct nvme_fc_ctrl *ctrl = op->ctrl; | ||
2095 | unsigned long flags; | ||
2096 | bool completed = false; | ||
2097 | |||
2098 | /* | ||
2099 | * the core layer, on controller resets after calling | ||
2100 | * nvme_shutdown_ctrl(), calls complete_rq without our | ||
2101 | * calling blk_mq_complete_request(), thus there may still | ||
2102 | * be live i/o outstanding with the LLDD. Means transport has | ||
2103 | * to track complete calls vs fcpio_done calls to know what | ||
2104 | * path to take on completes and dones. | ||
2105 | */ | ||
2106 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2107 | if (op->flags & FCOP_FLAGS_COMPLETE) | ||
2108 | completed = true; | ||
2109 | else | ||
2110 | op->flags |= FCOP_FLAGS_RELEASED; | ||
2111 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2112 | |||
2113 | if (completed) | ||
2114 | __nvme_fc_final_op_cleanup(rq); | ||
2115 | } | ||
2116 | |||
2117 | /* | ||
2118 | * This routine is used by the transport when it needs to find active | ||
2119 | * io on a queue that is to be terminated. The transport uses | ||
2120 | * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke | ||
2121 | * this routine to kill them on a 1 by 1 basis. | ||
2122 | * | ||
2123 | * As FC allocates FC exchange for each io, the transport must contact | ||
2124 | * the LLDD to terminate the exchange, thus releasing the FC exchange. | ||
2125 | * After terminating the exchange the LLDD will call the transport's | ||
2126 | * normal io done path for the request, but it will have an aborted | ||
2127 | * status. The done path will return the io request back to the block | ||
2128 | * layer with an error status. | ||
2129 | */ | ||
2130 | static void | ||
2131 | nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | ||
2132 | { | ||
2133 | struct nvme_ctrl *nctrl = data; | ||
2134 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | ||
2135 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | ||
2136 | unsigned long flags; | ||
2137 | int status; | ||
2138 | |||
2139 | if (!blk_mq_request_started(req)) | ||
2140 | return; | ||
2141 | |||
2142 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2143 | if (ctrl->flags & FCCTRL_TERMIO) { | ||
2144 | ctrl->iocnt++; | ||
2145 | op->flags |= FCOP_FLAGS_TERMIO; | ||
2146 | } | ||
2147 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2148 | |||
2149 | status = __nvme_fc_abort_op(ctrl, op); | ||
2150 | if (status) { | ||
2151 | /* | ||
2152 | * if __nvme_fc_abort_op failed the io wasn't | ||
2153 | * active. Thus this call path is running in | ||
2154 | * parallel to the io complete. Treat as non-error. | ||
2155 | */ | ||
2156 | |||
2157 | /* back out the flags/counters */ | ||
2158 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2159 | if (ctrl->flags & FCCTRL_TERMIO) | ||
2160 | ctrl->iocnt--; | ||
2161 | op->flags &= ~FCOP_FLAGS_TERMIO; | ||
2162 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2163 | return; | ||
2164 | } | ||
2165 | } | ||
2166 | |||
2167 | |||
1984 | static const struct blk_mq_ops nvme_fc_mq_ops = { | 2168 | static const struct blk_mq_ops nvme_fc_mq_ops = { |
1985 | .queue_rq = nvme_fc_queue_rq, | 2169 | .queue_rq = nvme_fc_queue_rq, |
1986 | .complete = nvme_fc_complete_rq, | 2170 | .complete = nvme_fc_complete_rq, |
@@ -1992,145 +2176,275 @@ static const struct blk_mq_ops nvme_fc_mq_ops = { | |||
1992 | .timeout = nvme_fc_timeout, | 2176 | .timeout = nvme_fc_timeout, |
1993 | }; | 2177 | }; |
1994 | 2178 | ||
1995 | static const struct blk_mq_ops nvme_fc_admin_mq_ops = { | ||
1996 | .queue_rq = nvme_fc_queue_rq, | ||
1997 | .complete = nvme_fc_complete_rq, | ||
1998 | .init_request = nvme_fc_init_admin_request, | ||
1999 | .exit_request = nvme_fc_exit_request, | ||
2000 | .reinit_request = nvme_fc_reinit_request, | ||
2001 | .init_hctx = nvme_fc_init_admin_hctx, | ||
2002 | .timeout = nvme_fc_timeout, | ||
2003 | }; | ||
2004 | |||
2005 | static int | 2179 | static int |
2006 | nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl) | 2180 | nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) |
2007 | { | 2181 | { |
2008 | u32 segs; | 2182 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
2009 | int error; | 2183 | int ret; |
2010 | 2184 | ||
2011 | nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH); | 2185 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); |
2186 | if (ret) { | ||
2187 | dev_info(ctrl->ctrl.device, | ||
2188 | "set_queue_count failed: %d\n", ret); | ||
2189 | return ret; | ||
2190 | } | ||
2012 | 2191 | ||
2013 | error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | 2192 | ctrl->queue_count = opts->nr_io_queues + 1; |
2014 | NVME_FC_AQ_BLKMQ_DEPTH, | 2193 | if (!opts->nr_io_queues) |
2015 | (NVME_FC_AQ_BLKMQ_DEPTH / 4)); | 2194 | return 0; |
2016 | if (error) | ||
2017 | return error; | ||
2018 | 2195 | ||
2019 | memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); | 2196 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", |
2020 | ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; | 2197 | opts->nr_io_queues); |
2021 | ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH; | 2198 | |
2022 | ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ | 2199 | nvme_fc_init_io_queues(ctrl); |
2023 | ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; | 2200 | |
2024 | ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | 2201 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
2202 | ctrl->tag_set.ops = &nvme_fc_mq_ops; | ||
2203 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | ||
2204 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | ||
2205 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | ||
2206 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
2207 | ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | ||
2025 | (SG_CHUNK_SIZE * | 2208 | (SG_CHUNK_SIZE * |
2026 | sizeof(struct scatterlist)) + | 2209 | sizeof(struct scatterlist)) + |
2027 | ctrl->lport->ops->fcprqst_priv_sz; | 2210 | ctrl->lport->ops->fcprqst_priv_sz; |
2028 | ctrl->admin_tag_set.driver_data = ctrl; | 2211 | ctrl->tag_set.driver_data = ctrl; |
2029 | ctrl->admin_tag_set.nr_hw_queues = 1; | 2212 | ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; |
2030 | ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; | 2213 | ctrl->tag_set.timeout = NVME_IO_TIMEOUT; |
2031 | 2214 | ||
2032 | error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); | 2215 | ret = blk_mq_alloc_tag_set(&ctrl->tag_set); |
2033 | if (error) | 2216 | if (ret) |
2034 | goto out_free_queue; | 2217 | return ret; |
2035 | 2218 | ||
2036 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); | 2219 | ctrl->ctrl.tagset = &ctrl->tag_set; |
2037 | if (IS_ERR(ctrl->ctrl.admin_q)) { | 2220 | |
2038 | error = PTR_ERR(ctrl->ctrl.admin_q); | 2221 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
2039 | goto out_free_tagset; | 2222 | if (IS_ERR(ctrl->ctrl.connect_q)) { |
2223 | ret = PTR_ERR(ctrl->ctrl.connect_q); | ||
2224 | goto out_free_tag_set; | ||
2225 | } | ||
2226 | |||
2227 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | ||
2228 | if (ret) | ||
2229 | goto out_cleanup_blk_queue; | ||
2230 | |||
2231 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | ||
2232 | if (ret) | ||
2233 | goto out_delete_hw_queues; | ||
2234 | |||
2235 | return 0; | ||
2236 | |||
2237 | out_delete_hw_queues: | ||
2238 | nvme_fc_delete_hw_io_queues(ctrl); | ||
2239 | out_cleanup_blk_queue: | ||
2240 | nvme_stop_keep_alive(&ctrl->ctrl); | ||
2241 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
2242 | out_free_tag_set: | ||
2243 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
2244 | nvme_fc_free_io_queues(ctrl); | ||
2245 | |||
2246 | /* force put free routine to ignore io queues */ | ||
2247 | ctrl->ctrl.tagset = NULL; | ||
2248 | |||
2249 | return ret; | ||
2250 | } | ||
2251 | |||
2252 | static int | ||
2253 | nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) | ||
2254 | { | ||
2255 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
2256 | int ret; | ||
2257 | |||
2258 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | ||
2259 | if (ret) { | ||
2260 | dev_info(ctrl->ctrl.device, | ||
2261 | "set_queue_count failed: %d\n", ret); | ||
2262 | return ret; | ||
2040 | } | 2263 | } |
2041 | 2264 | ||
2042 | error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | 2265 | /* check for io queues existing */ |
2266 | if (ctrl->queue_count == 1) | ||
2267 | return 0; | ||
2268 | |||
2269 | dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n", | ||
2270 | opts->nr_io_queues); | ||
2271 | |||
2272 | nvme_fc_init_io_queues(ctrl); | ||
2273 | |||
2274 | ret = blk_mq_reinit_tagset(&ctrl->tag_set); | ||
2275 | if (ret) | ||
2276 | goto out_free_io_queues; | ||
2277 | |||
2278 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | ||
2279 | if (ret) | ||
2280 | goto out_free_io_queues; | ||
2281 | |||
2282 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | ||
2283 | if (ret) | ||
2284 | goto out_delete_hw_queues; | ||
2285 | |||
2286 | return 0; | ||
2287 | |||
2288 | out_delete_hw_queues: | ||
2289 | nvme_fc_delete_hw_io_queues(ctrl); | ||
2290 | out_free_io_queues: | ||
2291 | nvme_fc_free_io_queues(ctrl); | ||
2292 | return ret; | ||
2293 | } | ||
2294 | |||
2295 | /* | ||
2296 | * This routine restarts the controller on the host side, and | ||
2297 | * on the link side, recreates the controller association. | ||
2298 | */ | ||
2299 | static int | ||
2300 | nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | ||
2301 | { | ||
2302 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
2303 | u32 segs; | ||
2304 | int ret; | ||
2305 | bool changed; | ||
2306 | |||
2307 | ctrl->connect_attempts++; | ||
2308 | |||
2309 | /* | ||
2310 | * Create the admin queue | ||
2311 | */ | ||
2312 | |||
2313 | nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH); | ||
2314 | |||
2315 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | ||
2043 | NVME_FC_AQ_BLKMQ_DEPTH); | 2316 | NVME_FC_AQ_BLKMQ_DEPTH); |
2044 | if (error) | 2317 | if (ret) |
2045 | goto out_cleanup_queue; | 2318 | goto out_free_queue; |
2046 | 2319 | ||
2047 | error = nvmf_connect_admin_queue(&ctrl->ctrl); | 2320 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], |
2048 | if (error) | 2321 | NVME_FC_AQ_BLKMQ_DEPTH, |
2322 | (NVME_FC_AQ_BLKMQ_DEPTH / 4)); | ||
2323 | if (ret) | ||
2049 | goto out_delete_hw_queue; | 2324 | goto out_delete_hw_queue; |
2050 | 2325 | ||
2051 | error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); | 2326 | if (ctrl->ctrl.state != NVME_CTRL_NEW) |
2052 | if (error) { | 2327 | blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); |
2328 | |||
2329 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); | ||
2330 | if (ret) | ||
2331 | goto out_disconnect_admin_queue; | ||
2332 | |||
2333 | /* | ||
2334 | * Check controller capabilities | ||
2335 | * | ||
2336 | * todo:- add code to check if ctrl attributes changed from | ||
2337 | * prior connection values | ||
2338 | */ | ||
2339 | |||
2340 | ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); | ||
2341 | if (ret) { | ||
2053 | dev_err(ctrl->ctrl.device, | 2342 | dev_err(ctrl->ctrl.device, |
2054 | "prop_get NVME_REG_CAP failed\n"); | 2343 | "prop_get NVME_REG_CAP failed\n"); |
2055 | goto out_delete_hw_queue; | 2344 | goto out_disconnect_admin_queue; |
2056 | } | 2345 | } |
2057 | 2346 | ||
2058 | ctrl->ctrl.sqsize = | 2347 | ctrl->ctrl.sqsize = |
2059 | min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); | 2348 | min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); |
2060 | 2349 | ||
2061 | error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); | 2350 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); |
2062 | if (error) | 2351 | if (ret) |
2063 | goto out_delete_hw_queue; | 2352 | goto out_disconnect_admin_queue; |
2064 | 2353 | ||
2065 | segs = min_t(u32, NVME_FC_MAX_SEGMENTS, | 2354 | segs = min_t(u32, NVME_FC_MAX_SEGMENTS, |
2066 | ctrl->lport->ops->max_sgl_segments); | 2355 | ctrl->lport->ops->max_sgl_segments); |
2067 | ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9); | 2356 | ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9); |
2068 | 2357 | ||
2069 | error = nvme_init_identify(&ctrl->ctrl); | 2358 | ret = nvme_init_identify(&ctrl->ctrl); |
2070 | if (error) | 2359 | if (ret) |
2071 | goto out_delete_hw_queue; | 2360 | goto out_disconnect_admin_queue; |
2361 | |||
2362 | /* sanity checks */ | ||
2363 | |||
2364 | /* FC-NVME does not have other data in the capsule */ | ||
2365 | if (ctrl->ctrl.icdoff) { | ||
2366 | dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", | ||
2367 | ctrl->ctrl.icdoff); | ||
2368 | goto out_disconnect_admin_queue; | ||
2369 | } | ||
2072 | 2370 | ||
2073 | nvme_start_keep_alive(&ctrl->ctrl); | 2371 | nvme_start_keep_alive(&ctrl->ctrl); |
2074 | 2372 | ||
2075 | return 0; | 2373 | /* FC-NVME supports normal SGL Data Block Descriptors */ |
2374 | |||
2375 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | ||
2376 | /* warn if maxcmd is lower than queue_size */ | ||
2377 | dev_warn(ctrl->ctrl.device, | ||
2378 | "queue_size %zu > ctrl maxcmd %u, reducing " | ||
2379 | "to queue_size\n", | ||
2380 | opts->queue_size, ctrl->ctrl.maxcmd); | ||
2381 | opts->queue_size = ctrl->ctrl.maxcmd; | ||
2382 | } | ||
2383 | |||
2384 | ret = nvme_fc_init_aen_ops(ctrl); | ||
2385 | if (ret) | ||
2386 | goto out_term_aen_ops; | ||
2387 | |||
2388 | /* | ||
2389 | * Create the io queues | ||
2390 | */ | ||
2391 | |||
2392 | if (ctrl->queue_count > 1) { | ||
2393 | if (ctrl->ctrl.state == NVME_CTRL_NEW) | ||
2394 | ret = nvme_fc_create_io_queues(ctrl); | ||
2395 | else | ||
2396 | ret = nvme_fc_reinit_io_queues(ctrl); | ||
2397 | if (ret) | ||
2398 | goto out_term_aen_ops; | ||
2399 | } | ||
2076 | 2400 | ||
2401 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | ||
2402 | WARN_ON_ONCE(!changed); | ||
2403 | |||
2404 | ctrl->connect_attempts = 0; | ||
2405 | |||
2406 | kref_get(&ctrl->ctrl.kref); | ||
2407 | |||
2408 | if (ctrl->queue_count > 1) { | ||
2409 | nvme_start_queues(&ctrl->ctrl); | ||
2410 | nvme_queue_scan(&ctrl->ctrl); | ||
2411 | nvme_queue_async_events(&ctrl->ctrl); | ||
2412 | } | ||
2413 | |||
2414 | return 0; /* Success */ | ||
2415 | |||
2416 | out_term_aen_ops: | ||
2417 | nvme_fc_term_aen_ops(ctrl); | ||
2418 | nvme_stop_keep_alive(&ctrl->ctrl); | ||
2419 | out_disconnect_admin_queue: | ||
2420 | /* send a Disconnect(association) LS to fc-nvme target */ | ||
2421 | nvme_fc_xmt_disconnect_assoc(ctrl); | ||
2077 | out_delete_hw_queue: | 2422 | out_delete_hw_queue: |
2078 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | 2423 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); |
2079 | out_cleanup_queue: | ||
2080 | blk_cleanup_queue(ctrl->ctrl.admin_q); | ||
2081 | out_free_tagset: | ||
2082 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | ||
2083 | out_free_queue: | 2424 | out_free_queue: |
2084 | nvme_fc_free_queue(&ctrl->queues[0]); | 2425 | nvme_fc_free_queue(&ctrl->queues[0]); |
2085 | return error; | 2426 | |
2427 | return ret; | ||
2086 | } | 2428 | } |
2087 | 2429 | ||
2088 | /* | 2430 | /* |
2089 | * This routine is used by the transport when it needs to find active | 2431 | * This routine stops operation of the controller on the host side. |
2090 | * io on a queue that is to be terminated. The transport uses | 2432 | * On the host os stack side: Admin and IO queues are stopped, |
2091 | * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke | 2433 | * outstanding ios on them terminated via FC ABTS. |
2092 | * this routine to kill them on a 1 by 1 basis. | 2434 | * On the link side: the association is terminated. |
2093 | * | ||
2094 | * As FC allocates FC exchange for each io, the transport must contact | ||
2095 | * the LLDD to terminate the exchange, thus releasing the FC exchange. | ||
2096 | * After terminating the exchange the LLDD will call the transport's | ||
2097 | * normal io done path for the request, but it will have an aborted | ||
2098 | * status. The done path will return the io request back to the block | ||
2099 | * layer with an error status. | ||
2100 | */ | 2435 | */ |
2101 | static void | 2436 | static void |
2102 | nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | 2437 | nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) |
2103 | { | 2438 | { |
2104 | struct nvme_ctrl *nctrl = data; | 2439 | unsigned long flags; |
2105 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | ||
2106 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | ||
2107 | int status; | ||
2108 | |||
2109 | if (!blk_mq_request_started(req)) | ||
2110 | return; | ||
2111 | 2440 | ||
2112 | /* this performs an ABTS-LS on the FC exchange for the io */ | 2441 | nvme_stop_keep_alive(&ctrl->ctrl); |
2113 | status = __nvme_fc_abort_op(ctrl, op); | ||
2114 | /* | ||
2115 | * if __nvme_fc_abort_op failed: io wasn't active to abort | ||
2116 | * consider it done. Assume completion path already completing | ||
2117 | * in parallel | ||
2118 | */ | ||
2119 | if (status) | ||
2120 | /* io wasn't active to abort consider it done */ | ||
2121 | /* assume completion path already completing in parallel */ | ||
2122 | return; | ||
2123 | } | ||
2124 | 2442 | ||
2443 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2444 | ctrl->flags |= FCCTRL_TERMIO; | ||
2445 | ctrl->iocnt = 0; | ||
2446 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2125 | 2447 | ||
2126 | /* | ||
2127 | * This routine stops operation of the controller. Admin and IO queues | ||
2128 | * are stopped, outstanding ios on them terminated, and the nvme ctrl | ||
2129 | * is shutdown. | ||
2130 | */ | ||
2131 | static void | ||
2132 | nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl) | ||
2133 | { | ||
2134 | /* | 2448 | /* |
2135 | * If io queues are present, stop them and terminate all outstanding | 2449 | * If io queues are present, stop them and terminate all outstanding |
2136 | * ios on them. As FC allocates FC exchange for each io, the | 2450 | * ios on them. As FC allocates FC exchange for each io, the |
@@ -2149,35 +2463,79 @@ nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl) | |||
2149 | nvme_fc_terminate_exchange, &ctrl->ctrl); | 2463 | nvme_fc_terminate_exchange, &ctrl->ctrl); |
2150 | } | 2464 | } |
2151 | 2465 | ||
2152 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) | 2466 | /* |
2153 | nvme_shutdown_ctrl(&ctrl->ctrl); | 2467 | * Other transports, which don't have link-level contexts bound |
2468 | * to sqe's, would try to gracefully shutdown the controller by | ||
2469 | * writing the registers for shutdown and polling (call | ||
2470 | * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially | ||
2471 | * just aborted and we will wait on those contexts, and given | ||
2472 | * there was no indication of how live the controlelr is on the | ||
2473 | * link, don't send more io to create more contexts for the | ||
2474 | * shutdown. Let the controller fail via keepalive failure if | ||
2475 | * its still present. | ||
2476 | */ | ||
2154 | 2477 | ||
2155 | /* | 2478 | /* |
2156 | * now clean up the admin queue. Same thing as above. | 2479 | * clean up the admin queue. Same thing as above. |
2157 | * use blk_mq_tagset_busy_itr() and the transport routine to | 2480 | * use blk_mq_tagset_busy_itr() and the transport routine to |
2158 | * terminate the exchanges. | 2481 | * terminate the exchanges. |
2159 | */ | 2482 | */ |
2160 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | 2483 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); |
2161 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | 2484 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, |
2162 | nvme_fc_terminate_exchange, &ctrl->ctrl); | 2485 | nvme_fc_terminate_exchange, &ctrl->ctrl); |
2486 | |||
2487 | /* kill the aens as they are a separate path */ | ||
2488 | nvme_fc_abort_aen_ops(ctrl); | ||
2489 | |||
2490 | /* wait for all io that had to be aborted */ | ||
2491 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2492 | while (ctrl->iocnt) { | ||
2493 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2494 | msleep(1000); | ||
2495 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2496 | } | ||
2497 | ctrl->flags &= ~FCCTRL_TERMIO; | ||
2498 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2499 | |||
2500 | nvme_fc_term_aen_ops(ctrl); | ||
2501 | |||
2502 | /* | ||
2503 | * send a Disconnect(association) LS to fc-nvme target | ||
2504 | * Note: could have been sent at top of process, but | ||
2505 | * cleaner on link traffic if after the aborts complete. | ||
2506 | * Note: if association doesn't exist, association_id will be 0 | ||
2507 | */ | ||
2508 | if (ctrl->association_id) | ||
2509 | nvme_fc_xmt_disconnect_assoc(ctrl); | ||
2510 | |||
2511 | if (ctrl->ctrl.tagset) { | ||
2512 | nvme_fc_delete_hw_io_queues(ctrl); | ||
2513 | nvme_fc_free_io_queues(ctrl); | ||
2514 | } | ||
2515 | |||
2516 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | ||
2517 | nvme_fc_free_queue(&ctrl->queues[0]); | ||
2163 | } | 2518 | } |
2164 | 2519 | ||
2165 | /* | ||
2166 | * Called to teardown an association. | ||
2167 | * May be called with association fully in place or partially in place. | ||
2168 | */ | ||
2169 | static void | 2520 | static void |
2170 | __nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl) | 2521 | nvme_fc_delete_ctrl_work(struct work_struct *work) |
2171 | { | 2522 | { |
2172 | nvme_stop_keep_alive(&ctrl->ctrl); | 2523 | struct nvme_fc_ctrl *ctrl = |
2524 | container_of(work, struct nvme_fc_ctrl, delete_work); | ||
2173 | 2525 | ||
2174 | /* stop and terminate ios on admin and io queues */ | 2526 | cancel_work_sync(&ctrl->reset_work); |
2175 | nvme_fc_shutdown_ctrl(ctrl); | 2527 | cancel_delayed_work_sync(&ctrl->connect_work); |
2528 | |||
2529 | /* | ||
2530 | * kill the association on the link side. this will block | ||
2531 | * waiting for io to terminate | ||
2532 | */ | ||
2533 | nvme_fc_delete_association(ctrl); | ||
2176 | 2534 | ||
2177 | /* | 2535 | /* |
2178 | * tear down the controller | 2536 | * tear down the controller |
2179 | * This will result in the last reference on the nvme ctrl to | 2537 | * This will result in the last reference on the nvme ctrl to |
2180 | * expire, calling the transport nvme_fc_free_nvme_ctrl() callback. | 2538 | * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. |
2181 | * From there, the transport will tear down it's logical queues and | 2539 | * From there, the transport will tear down it's logical queues and |
2182 | * association. | 2540 | * association. |
2183 | */ | 2541 | */ |
@@ -2186,15 +2544,6 @@ __nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl) | |||
2186 | nvme_put_ctrl(&ctrl->ctrl); | 2544 | nvme_put_ctrl(&ctrl->ctrl); |
2187 | } | 2545 | } |
2188 | 2546 | ||
2189 | static void | ||
2190 | nvme_fc_del_ctrl_work(struct work_struct *work) | ||
2191 | { | ||
2192 | struct nvme_fc_ctrl *ctrl = | ||
2193 | container_of(work, struct nvme_fc_ctrl, delete_work); | ||
2194 | |||
2195 | __nvme_fc_remove_ctrl(ctrl); | ||
2196 | } | ||
2197 | |||
2198 | static int | 2547 | static int |
2199 | __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) | 2548 | __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) |
2200 | { | 2549 | { |
@@ -2214,25 +2563,85 @@ static int | |||
2214 | nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) | 2563 | nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) |
2215 | { | 2564 | { |
2216 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | 2565 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
2217 | struct nvme_fc_rport *rport = ctrl->rport; | ||
2218 | unsigned long flags; | ||
2219 | int ret; | 2566 | int ret; |
2220 | 2567 | ||
2221 | spin_lock_irqsave(&rport->lock, flags); | 2568 | if (!kref_get_unless_zero(&ctrl->ctrl.kref)) |
2569 | return -EBUSY; | ||
2570 | |||
2222 | ret = __nvme_fc_del_ctrl(ctrl); | 2571 | ret = __nvme_fc_del_ctrl(ctrl); |
2223 | spin_unlock_irqrestore(&rport->lock, flags); | ||
2224 | if (ret) | ||
2225 | return ret; | ||
2226 | 2572 | ||
2227 | flush_work(&ctrl->delete_work); | 2573 | if (!ret) |
2574 | flush_workqueue(nvme_fc_wq); | ||
2228 | 2575 | ||
2229 | return 0; | 2576 | nvme_put_ctrl(&ctrl->ctrl); |
2577 | |||
2578 | return ret; | ||
2579 | } | ||
2580 | |||
2581 | static void | ||
2582 | nvme_fc_reset_ctrl_work(struct work_struct *work) | ||
2583 | { | ||
2584 | struct nvme_fc_ctrl *ctrl = | ||
2585 | container_of(work, struct nvme_fc_ctrl, reset_work); | ||
2586 | int ret; | ||
2587 | |||
2588 | /* will block will waiting for io to terminate */ | ||
2589 | nvme_fc_delete_association(ctrl); | ||
2590 | |||
2591 | ret = nvme_fc_create_association(ctrl); | ||
2592 | if (ret) { | ||
2593 | dev_warn(ctrl->ctrl.device, | ||
2594 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", | ||
2595 | ctrl->cnum, ret); | ||
2596 | if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { | ||
2597 | dev_warn(ctrl->ctrl.device, | ||
2598 | "NVME-FC{%d}: Max reconnect attempts (%d) " | ||
2599 | "reached. Removing controller\n", | ||
2600 | ctrl->cnum, ctrl->connect_attempts); | ||
2601 | |||
2602 | if (!nvme_change_ctrl_state(&ctrl->ctrl, | ||
2603 | NVME_CTRL_DELETING)) { | ||
2604 | dev_err(ctrl->ctrl.device, | ||
2605 | "NVME-FC{%d}: failed to change state " | ||
2606 | "to DELETING\n", ctrl->cnum); | ||
2607 | return; | ||
2608 | } | ||
2609 | |||
2610 | WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); | ||
2611 | return; | ||
2612 | } | ||
2613 | |||
2614 | dev_warn(ctrl->ctrl.device, | ||
2615 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", | ||
2616 | ctrl->cnum, ctrl->reconnect_delay); | ||
2617 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, | ||
2618 | ctrl->reconnect_delay * HZ); | ||
2619 | } else | ||
2620 | dev_info(ctrl->ctrl.device, | ||
2621 | "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); | ||
2230 | } | 2622 | } |
2231 | 2623 | ||
2624 | /* | ||
2625 | * called by the nvme core layer, for sysfs interface that requests | ||
2626 | * a reset of the nvme controller | ||
2627 | */ | ||
2232 | static int | 2628 | static int |
2233 | nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) | 2629 | nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) |
2234 | { | 2630 | { |
2235 | return -EIO; | 2631 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
2632 | |||
2633 | dev_warn(ctrl->ctrl.device, | ||
2634 | "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); | ||
2635 | |||
2636 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) | ||
2637 | return -EBUSY; | ||
2638 | |||
2639 | if (!queue_work(nvme_fc_wq, &ctrl->reset_work)) | ||
2640 | return -EBUSY; | ||
2641 | |||
2642 | flush_work(&ctrl->reset_work); | ||
2643 | |||
2644 | return 0; | ||
2236 | } | 2645 | } |
2237 | 2646 | ||
2238 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { | 2647 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { |
@@ -2243,95 +2652,75 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { | |||
2243 | .reg_read64 = nvmf_reg_read64, | 2652 | .reg_read64 = nvmf_reg_read64, |
2244 | .reg_write32 = nvmf_reg_write32, | 2653 | .reg_write32 = nvmf_reg_write32, |
2245 | .reset_ctrl = nvme_fc_reset_nvme_ctrl, | 2654 | .reset_ctrl = nvme_fc_reset_nvme_ctrl, |
2246 | .free_ctrl = nvme_fc_free_nvme_ctrl, | 2655 | .free_ctrl = nvme_fc_nvme_ctrl_freed, |
2247 | .submit_async_event = nvme_fc_submit_async_event, | 2656 | .submit_async_event = nvme_fc_submit_async_event, |
2248 | .delete_ctrl = nvme_fc_del_nvme_ctrl, | 2657 | .delete_ctrl = nvme_fc_del_nvme_ctrl, |
2249 | .get_subsysnqn = nvmf_get_subsysnqn, | 2658 | .get_subsysnqn = nvmf_get_subsysnqn, |
2250 | .get_address = nvmf_get_address, | 2659 | .get_address = nvmf_get_address, |
2251 | }; | 2660 | }; |
2252 | 2661 | ||
2253 | static int | 2662 | static void |
2254 | nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | 2663 | nvme_fc_connect_ctrl_work(struct work_struct *work) |
2255 | { | 2664 | { |
2256 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
2257 | int ret; | 2665 | int ret; |
2258 | 2666 | ||
2259 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | 2667 | struct nvme_fc_ctrl *ctrl = |
2260 | if (ret) { | 2668 | container_of(to_delayed_work(work), |
2261 | dev_info(ctrl->ctrl.device, | 2669 | struct nvme_fc_ctrl, connect_work); |
2262 | "set_queue_count failed: %d\n", ret); | ||
2263 | return ret; | ||
2264 | } | ||
2265 | |||
2266 | ctrl->queue_count = opts->nr_io_queues + 1; | ||
2267 | if (!opts->nr_io_queues) | ||
2268 | return 0; | ||
2269 | |||
2270 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", | ||
2271 | opts->nr_io_queues); | ||
2272 | |||
2273 | nvme_fc_init_io_queues(ctrl); | ||
2274 | |||
2275 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | ||
2276 | ctrl->tag_set.ops = &nvme_fc_mq_ops; | ||
2277 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | ||
2278 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | ||
2279 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | ||
2280 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
2281 | ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | ||
2282 | (SG_CHUNK_SIZE * | ||
2283 | sizeof(struct scatterlist)) + | ||
2284 | ctrl->lport->ops->fcprqst_priv_sz; | ||
2285 | ctrl->tag_set.driver_data = ctrl; | ||
2286 | ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; | ||
2287 | ctrl->tag_set.timeout = NVME_IO_TIMEOUT; | ||
2288 | |||
2289 | ret = blk_mq_alloc_tag_set(&ctrl->tag_set); | ||
2290 | if (ret) | ||
2291 | return ret; | ||
2292 | |||
2293 | ctrl->ctrl.tagset = &ctrl->tag_set; | ||
2294 | |||
2295 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); | ||
2296 | if (IS_ERR(ctrl->ctrl.connect_q)) { | ||
2297 | ret = PTR_ERR(ctrl->ctrl.connect_q); | ||
2298 | goto out_free_tag_set; | ||
2299 | } | ||
2300 | |||
2301 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | ||
2302 | if (ret) | ||
2303 | goto out_cleanup_blk_queue; | ||
2304 | 2670 | ||
2305 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2671 | ret = nvme_fc_create_association(ctrl); |
2306 | if (ret) | 2672 | if (ret) { |
2307 | goto out_delete_hw_queues; | 2673 | dev_warn(ctrl->ctrl.device, |
2674 | "NVME-FC{%d}: Reconnect attempt failed (%d)\n", | ||
2675 | ctrl->cnum, ret); | ||
2676 | if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { | ||
2677 | dev_warn(ctrl->ctrl.device, | ||
2678 | "NVME-FC{%d}: Max reconnect attempts (%d) " | ||
2679 | "reached. Removing controller\n", | ||
2680 | ctrl->cnum, ctrl->connect_attempts); | ||
2681 | |||
2682 | if (!nvme_change_ctrl_state(&ctrl->ctrl, | ||
2683 | NVME_CTRL_DELETING)) { | ||
2684 | dev_err(ctrl->ctrl.device, | ||
2685 | "NVME-FC{%d}: failed to change state " | ||
2686 | "to DELETING\n", ctrl->cnum); | ||
2687 | return; | ||
2688 | } | ||
2308 | 2689 | ||
2309 | return 0; | 2690 | WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); |
2691 | return; | ||
2692 | } | ||
2310 | 2693 | ||
2311 | out_delete_hw_queues: | 2694 | dev_warn(ctrl->ctrl.device, |
2312 | nvme_fc_delete_hw_io_queues(ctrl); | 2695 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", |
2313 | out_cleanup_blk_queue: | 2696 | ctrl->cnum, ctrl->reconnect_delay); |
2314 | nvme_stop_keep_alive(&ctrl->ctrl); | 2697 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, |
2315 | blk_cleanup_queue(ctrl->ctrl.connect_q); | 2698 | ctrl->reconnect_delay * HZ); |
2316 | out_free_tag_set: | 2699 | } else |
2317 | blk_mq_free_tag_set(&ctrl->tag_set); | 2700 | dev_info(ctrl->ctrl.device, |
2318 | nvme_fc_free_io_queues(ctrl); | 2701 | "NVME-FC{%d}: controller reconnect complete\n", |
2702 | ctrl->cnum); | ||
2703 | } | ||
2319 | 2704 | ||
2320 | /* force put free routine to ignore io queues */ | ||
2321 | ctrl->ctrl.tagset = NULL; | ||
2322 | 2705 | ||
2323 | return ret; | 2706 | static const struct blk_mq_ops nvme_fc_admin_mq_ops = { |
2324 | } | 2707 | .queue_rq = nvme_fc_queue_rq, |
2708 | .complete = nvme_fc_complete_rq, | ||
2709 | .init_request = nvme_fc_init_admin_request, | ||
2710 | .exit_request = nvme_fc_exit_request, | ||
2711 | .reinit_request = nvme_fc_reinit_request, | ||
2712 | .init_hctx = nvme_fc_init_admin_hctx, | ||
2713 | .timeout = nvme_fc_timeout, | ||
2714 | }; | ||
2325 | 2715 | ||
2326 | 2716 | ||
2327 | static struct nvme_ctrl * | 2717 | static struct nvme_ctrl * |
2328 | __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | 2718 | nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
2329 | struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) | 2719 | struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) |
2330 | { | 2720 | { |
2331 | struct nvme_fc_ctrl *ctrl; | 2721 | struct nvme_fc_ctrl *ctrl; |
2332 | unsigned long flags; | 2722 | unsigned long flags; |
2333 | int ret, idx; | 2723 | int ret, idx; |
2334 | bool changed; | ||
2335 | 2724 | ||
2336 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | 2725 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
2337 | if (!ctrl) { | 2726 | if (!ctrl) { |
@@ -2350,17 +2739,15 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2350 | ctrl->lport = lport; | 2739 | ctrl->lport = lport; |
2351 | ctrl->rport = rport; | 2740 | ctrl->rport = rport; |
2352 | ctrl->dev = lport->dev; | 2741 | ctrl->dev = lport->dev; |
2353 | ctrl->state = FCCTRL_INIT; | ||
2354 | ctrl->cnum = idx; | 2742 | ctrl->cnum = idx; |
2355 | 2743 | ||
2356 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); | ||
2357 | if (ret) | ||
2358 | goto out_free_ida; | ||
2359 | |||
2360 | get_device(ctrl->dev); | 2744 | get_device(ctrl->dev); |
2361 | kref_init(&ctrl->ref); | 2745 | kref_init(&ctrl->ref); |
2362 | 2746 | ||
2363 | INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work); | 2747 | INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); |
2748 | INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); | ||
2749 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); | ||
2750 | ctrl->reconnect_delay = opts->reconnect_delay; | ||
2364 | spin_lock_init(&ctrl->lock); | 2751 | spin_lock_init(&ctrl->lock); |
2365 | 2752 | ||
2366 | /* io queue count */ | 2753 | /* io queue count */ |
@@ -2377,87 +2764,87 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2377 | ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue), | 2764 | ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue), |
2378 | GFP_KERNEL); | 2765 | GFP_KERNEL); |
2379 | if (!ctrl->queues) | 2766 | if (!ctrl->queues) |
2380 | goto out_uninit_ctrl; | 2767 | goto out_free_ida; |
2381 | |||
2382 | ret = nvme_fc_configure_admin_queue(ctrl); | ||
2383 | if (ret) | ||
2384 | goto out_uninit_ctrl; | ||
2385 | |||
2386 | /* sanity checks */ | ||
2387 | |||
2388 | /* FC-NVME does not have other data in the capsule */ | ||
2389 | if (ctrl->ctrl.icdoff) { | ||
2390 | dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", | ||
2391 | ctrl->ctrl.icdoff); | ||
2392 | goto out_remove_admin_queue; | ||
2393 | } | ||
2394 | |||
2395 | /* FC-NVME supports normal SGL Data Block Descriptors */ | ||
2396 | 2768 | ||
2397 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | 2769 | memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); |
2398 | /* warn if maxcmd is lower than queue_size */ | 2770 | ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; |
2399 | dev_warn(ctrl->ctrl.device, | 2771 | ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH; |
2400 | "queue_size %zu > ctrl maxcmd %u, reducing " | 2772 | ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ |
2401 | "to queue_size\n", | 2773 | ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; |
2402 | opts->queue_size, ctrl->ctrl.maxcmd); | 2774 | ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + |
2403 | opts->queue_size = ctrl->ctrl.maxcmd; | 2775 | (SG_CHUNK_SIZE * |
2404 | } | 2776 | sizeof(struct scatterlist)) + |
2777 | ctrl->lport->ops->fcprqst_priv_sz; | ||
2778 | ctrl->admin_tag_set.driver_data = ctrl; | ||
2779 | ctrl->admin_tag_set.nr_hw_queues = 1; | ||
2780 | ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; | ||
2405 | 2781 | ||
2406 | ret = nvme_fc_init_aen_ops(ctrl); | 2782 | ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); |
2407 | if (ret) | 2783 | if (ret) |
2408 | goto out_exit_aen_ops; | 2784 | goto out_free_queues; |
2409 | 2785 | ||
2410 | if (ctrl->queue_count > 1) { | 2786 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
2411 | ret = nvme_fc_create_io_queues(ctrl); | 2787 | if (IS_ERR(ctrl->ctrl.admin_q)) { |
2412 | if (ret) | 2788 | ret = PTR_ERR(ctrl->ctrl.admin_q); |
2413 | goto out_exit_aen_ops; | 2789 | goto out_free_admin_tag_set; |
2414 | } | 2790 | } |
2415 | 2791 | ||
2416 | spin_lock_irqsave(&ctrl->lock, flags); | 2792 | /* |
2417 | ctrl->state = FCCTRL_ACTIVE; | 2793 | * Would have been nice to init io queues tag set as well. |
2418 | spin_unlock_irqrestore(&ctrl->lock, flags); | 2794 | * However, we require interaction from the controller |
2419 | 2795 | * for max io queue count before we can do so. | |
2420 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 2796 | * Defer this to the connect path. |
2421 | WARN_ON_ONCE(!changed); | 2797 | */ |
2422 | 2798 | ||
2423 | dev_info(ctrl->ctrl.device, | 2799 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); |
2424 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | 2800 | if (ret) |
2425 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | 2801 | goto out_cleanup_admin_q; |
2426 | 2802 | ||
2427 | kref_get(&ctrl->ctrl.kref); | 2803 | /* at this point, teardown path changes to ref counting on nvme ctrl */ |
2428 | 2804 | ||
2429 | spin_lock_irqsave(&rport->lock, flags); | 2805 | spin_lock_irqsave(&rport->lock, flags); |
2430 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | 2806 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); |
2431 | spin_unlock_irqrestore(&rport->lock, flags); | 2807 | spin_unlock_irqrestore(&rport->lock, flags); |
2432 | 2808 | ||
2433 | if (opts->nr_io_queues) { | 2809 | ret = nvme_fc_create_association(ctrl); |
2434 | nvme_queue_scan(&ctrl->ctrl); | 2810 | if (ret) { |
2435 | nvme_queue_async_events(&ctrl->ctrl); | 2811 | ctrl->ctrl.opts = NULL; |
2812 | /* initiate nvme ctrl ref counting teardown */ | ||
2813 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
2814 | nvme_put_ctrl(&ctrl->ctrl); | ||
2815 | |||
2816 | /* as we're past the point where we transition to the ref | ||
2817 | * counting teardown path, if we return a bad pointer here, | ||
2818 | * the calling routine, thinking it's prior to the | ||
2819 | * transition, will do an rport put. Since the teardown | ||
2820 | * path also does a rport put, we do an extra get here to | ||
2821 | * so proper order/teardown happens. | ||
2822 | */ | ||
2823 | nvme_fc_rport_get(rport); | ||
2824 | |||
2825 | if (ret > 0) | ||
2826 | ret = -EIO; | ||
2827 | return ERR_PTR(ret); | ||
2436 | } | 2828 | } |
2437 | 2829 | ||
2438 | return &ctrl->ctrl; | 2830 | dev_info(ctrl->ctrl.device, |
2831 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | ||
2832 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | ||
2439 | 2833 | ||
2440 | out_exit_aen_ops: | 2834 | return &ctrl->ctrl; |
2441 | nvme_fc_exit_aen_ops(ctrl); | ||
2442 | out_remove_admin_queue: | ||
2443 | /* send a Disconnect(association) LS to fc-nvme target */ | ||
2444 | nvme_fc_xmt_disconnect_assoc(ctrl); | ||
2445 | nvme_stop_keep_alive(&ctrl->ctrl); | ||
2446 | nvme_fc_destroy_admin_queue(ctrl); | ||
2447 | out_uninit_ctrl: | ||
2448 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
2449 | nvme_put_ctrl(&ctrl->ctrl); | ||
2450 | if (ret > 0) | ||
2451 | ret = -EIO; | ||
2452 | /* exit via here will follow ctlr ref point callbacks to free */ | ||
2453 | return ERR_PTR(ret); | ||
2454 | 2835 | ||
2836 | out_cleanup_admin_q: | ||
2837 | blk_cleanup_queue(ctrl->ctrl.admin_q); | ||
2838 | out_free_admin_tag_set: | ||
2839 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | ||
2840 | out_free_queues: | ||
2841 | kfree(ctrl->queues); | ||
2455 | out_free_ida: | 2842 | out_free_ida: |
2843 | put_device(ctrl->dev); | ||
2456 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); | 2844 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
2457 | out_free_ctrl: | 2845 | out_free_ctrl: |
2458 | kfree(ctrl); | 2846 | kfree(ctrl); |
2459 | out_fail: | 2847 | out_fail: |
2460 | nvme_fc_rport_put(rport); | ||
2461 | /* exit via here doesn't follow ctlr ref points */ | 2848 | /* exit via here doesn't follow ctlr ref points */ |
2462 | return ERR_PTR(ret); | 2849 | return ERR_PTR(ret); |
2463 | } | 2850 | } |
@@ -2529,6 +2916,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |||
2529 | { | 2916 | { |
2530 | struct nvme_fc_lport *lport; | 2917 | struct nvme_fc_lport *lport; |
2531 | struct nvme_fc_rport *rport; | 2918 | struct nvme_fc_rport *rport; |
2919 | struct nvme_ctrl *ctrl; | ||
2532 | struct nvmet_fc_traddr laddr = { 0L, 0L }; | 2920 | struct nvmet_fc_traddr laddr = { 0L, 0L }; |
2533 | struct nvmet_fc_traddr raddr = { 0L, 0L }; | 2921 | struct nvmet_fc_traddr raddr = { 0L, 0L }; |
2534 | unsigned long flags; | 2922 | unsigned long flags; |
@@ -2560,7 +2948,10 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |||
2560 | 2948 | ||
2561 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | 2949 | spin_unlock_irqrestore(&nvme_fc_lock, flags); |
2562 | 2950 | ||
2563 | return __nvme_fc_create_ctrl(dev, opts, lport, rport); | 2951 | ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); |
2952 | if (IS_ERR(ctrl)) | ||
2953 | nvme_fc_rport_put(rport); | ||
2954 | return ctrl; | ||
2564 | } | 2955 | } |
2565 | } | 2956 | } |
2566 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | 2957 | spin_unlock_irqrestore(&nvme_fc_lock, flags); |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index de61a4a03d78..e4e4e60b1224 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -483,7 +483,7 @@ static void nvme_nvm_end_io(struct request *rq, int error) | |||
483 | { | 483 | { |
484 | struct nvm_rq *rqd = rq->end_io_data; | 484 | struct nvm_rq *rqd = rq->end_io_data; |
485 | 485 | ||
486 | rqd->ppa_status = nvme_req(rq)->result.u64; | 486 | rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64); |
487 | rqd->error = nvme_req(rq)->status; | 487 | rqd->error = nvme_req(rq)->status; |
488 | nvm_end_io(rqd); | 488 | nvm_end_io(rqd); |
489 | 489 | ||
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index f49ae2758bb7..1f7671e631dd 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c | |||
@@ -1609,7 +1609,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
1609 | struct nvme_command c; | 1609 | struct nvme_command c; |
1610 | u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); | 1610 | u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); |
1611 | u16 control; | 1611 | u16 control; |
1612 | u32 max_blocks = queue_max_hw_sectors(ns->queue); | 1612 | u32 max_blocks = queue_max_hw_sectors(ns->queue) >> (ns->lba_shift - 9); |
1613 | 1613 | ||
1614 | num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); | 1614 | num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); |
1615 | 1615 | ||
@@ -2138,15 +2138,6 @@ static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
2138 | return res; | 2138 | return res; |
2139 | } | 2139 | } |
2140 | 2140 | ||
2141 | static int nvme_trans_security_protocol(struct nvme_ns *ns, | ||
2142 | struct sg_io_hdr *hdr, | ||
2143 | u8 *cmd) | ||
2144 | { | ||
2145 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, | ||
2146 | ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, | ||
2147 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); | ||
2148 | } | ||
2149 | |||
2150 | static int nvme_trans_synchronize_cache(struct nvme_ns *ns, | 2141 | static int nvme_trans_synchronize_cache(struct nvme_ns *ns, |
2151 | struct sg_io_hdr *hdr) | 2142 | struct sg_io_hdr *hdr) |
2152 | { | 2143 | { |
@@ -2414,10 +2405,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) | |||
2414 | case REQUEST_SENSE: | 2405 | case REQUEST_SENSE: |
2415 | retcode = nvme_trans_request_sense(ns, hdr, cmd); | 2406 | retcode = nvme_trans_request_sense(ns, hdr, cmd); |
2416 | break; | 2407 | break; |
2417 | case SECURITY_PROTOCOL_IN: | ||
2418 | case SECURITY_PROTOCOL_OUT: | ||
2419 | retcode = nvme_trans_security_protocol(ns, hdr, cmd); | ||
2420 | break; | ||
2421 | case SYNCHRONIZE_CACHE: | 2408 | case SYNCHRONIZE_CACHE: |
2422 | retcode = nvme_trans_synchronize_cache(ns, hdr); | 2409 | retcode = nvme_trans_synchronize_cache(ns, hdr); |
2423 | break; | 2410 | break; |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 074bd3743b5f..62eba29c85fb 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -119,7 +119,7 @@ struct nvmet_fc_tgt_queue { | |||
119 | u16 qid; | 119 | u16 qid; |
120 | u16 sqsize; | 120 | u16 sqsize; |
121 | u16 ersp_ratio; | 121 | u16 ersp_ratio; |
122 | u16 sqhd; | 122 | __le16 sqhd; |
123 | int cpu; | 123 | int cpu; |
124 | atomic_t connected; | 124 | atomic_t connected; |
125 | atomic_t sqtail; | 125 | atomic_t sqtail; |
@@ -1058,7 +1058,7 @@ EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); | |||
1058 | 1058 | ||
1059 | 1059 | ||
1060 | static void | 1060 | static void |
1061 | nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd) | 1061 | nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) |
1062 | { | 1062 | { |
1063 | struct fcnvme_ls_acc_hdr *acc = buf; | 1063 | struct fcnvme_ls_acc_hdr *acc = buf; |
1064 | 1064 | ||
@@ -1700,7 +1700,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, | |||
1700 | xfr_length != fod->total_length || | 1700 | xfr_length != fod->total_length || |
1701 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || | 1701 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
1702 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || | 1702 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
1703 | queue_90percent_full(fod->queue, cqe->sq_head)) | 1703 | queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) |
1704 | send_ersp = true; | 1704 | send_ersp = true; |
1705 | 1705 | ||
1706 | /* re-set the fields */ | 1706 | /* re-set the fields */ |
@@ -2055,7 +2055,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) | |||
2055 | /* | 2055 | /* |
2056 | * Actual processing routine for received FC-NVME LS Requests from the LLD | 2056 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
2057 | */ | 2057 | */ |
2058 | void | 2058 | static void |
2059 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, | 2059 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
2060 | struct nvmet_fc_fcp_iod *fod) | 2060 | struct nvmet_fc_fcp_iod *fod) |
2061 | { | 2061 | { |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index aaa3dbe22bd5..15551ef79c8c 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -666,7 +666,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) | |||
666 | #define FCLOOP_SGL_SEGS 256 | 666 | #define FCLOOP_SGL_SEGS 256 |
667 | #define FCLOOP_DMABOUND_4G 0xFFFFFFFF | 667 | #define FCLOOP_DMABOUND_4G 0xFFFFFFFF |
668 | 668 | ||
669 | struct nvme_fc_port_template fctemplate = { | 669 | static struct nvme_fc_port_template fctemplate = { |
670 | .localport_delete = fcloop_localport_delete, | 670 | .localport_delete = fcloop_localport_delete, |
671 | .remoteport_delete = fcloop_remoteport_delete, | 671 | .remoteport_delete = fcloop_remoteport_delete, |
672 | .create_queue = fcloop_create_queue, | 672 | .create_queue = fcloop_create_queue, |
@@ -686,7 +686,7 @@ struct nvme_fc_port_template fctemplate = { | |||
686 | .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), | 686 | .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), |
687 | }; | 687 | }; |
688 | 688 | ||
689 | struct nvmet_fc_target_template tgttemplate = { | 689 | static struct nvmet_fc_target_template tgttemplate = { |
690 | .targetport_delete = fcloop_targetport_delete, | 690 | .targetport_delete = fcloop_targetport_delete, |
691 | .xmt_ls_rsp = fcloop_xmt_ls_rsp, | 691 | .xmt_ls_rsp = fcloop_xmt_ls_rsp, |
692 | .fcp_op = fcloop_fcp_op, | 692 | .fcp_op = fcloop_fcp_op, |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 257bbdd0f0b8..6d7840b096e6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -56,7 +56,7 @@ struct lpfc_sli2_slim; | |||
56 | #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ | 56 | #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ |
57 | #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ | 57 | #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ |
58 | #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ | 58 | #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ |
59 | #define LPFC_MIN_NVME_SEG_CNT 254 | 59 | #define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */ |
60 | 60 | ||
61 | #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ | 61 | #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ |
62 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ | 62 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ |
@@ -474,6 +474,8 @@ struct lpfc_vport { | |||
474 | unsigned long rcv_buffer_time_stamp; | 474 | unsigned long rcv_buffer_time_stamp; |
475 | uint32_t vport_flag; | 475 | uint32_t vport_flag; |
476 | #define STATIC_VPORT 1 | 476 | #define STATIC_VPORT 1 |
477 | #define FAWWPN_SET 2 | ||
478 | #define FAWWPN_PARAM_CHG 4 | ||
477 | 479 | ||
478 | uint16_t fdmi_num_disc; | 480 | uint16_t fdmi_num_disc; |
479 | uint32_t fdmi_hba_mask; | 481 | uint32_t fdmi_hba_mask; |
@@ -781,6 +783,7 @@ struct lpfc_hba { | |||
781 | uint32_t cfg_nvmet_fb_size; | 783 | uint32_t cfg_nvmet_fb_size; |
782 | uint32_t cfg_total_seg_cnt; | 784 | uint32_t cfg_total_seg_cnt; |
783 | uint32_t cfg_sg_seg_cnt; | 785 | uint32_t cfg_sg_seg_cnt; |
786 | uint32_t cfg_nvme_seg_cnt; | ||
784 | uint32_t cfg_sg_dma_buf_size; | 787 | uint32_t cfg_sg_dma_buf_size; |
785 | uint64_t cfg_soft_wwnn; | 788 | uint64_t cfg_soft_wwnn; |
786 | uint64_t cfg_soft_wwpn; | 789 | uint64_t cfg_soft_wwpn; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 22819afbaef5..513fd07715cd 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -2292,6 +2292,8 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, | |||
2292 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2292 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2293 | struct lpfc_hba *phba = vport->phba; | 2293 | struct lpfc_hba *phba = vport->phba; |
2294 | unsigned int cnt = count; | 2294 | unsigned int cnt = count; |
2295 | uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; | ||
2296 | u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0]; | ||
2295 | 2297 | ||
2296 | /* | 2298 | /* |
2297 | * We're doing a simple sanity check for soft_wwpn setting. | 2299 | * We're doing a simple sanity check for soft_wwpn setting. |
@@ -2305,6 +2307,12 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, | |||
2305 | * here. The intent is to protect against the random user or | 2307 | * here. The intent is to protect against the random user or |
2306 | * application that is just writing attributes. | 2308 | * application that is just writing attributes. |
2307 | */ | 2309 | */ |
2310 | if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) { | ||
2311 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2312 | "0051 "LPFC_DRIVER_NAME" soft wwpn can not" | ||
2313 | " be enabled: fawwpn is enabled\n"); | ||
2314 | return -EINVAL; | ||
2315 | } | ||
2308 | 2316 | ||
2309 | /* count may include a LF at end of string */ | 2317 | /* count may include a LF at end of string */ |
2310 | if (buf[cnt-1] == '\n') | 2318 | if (buf[cnt-1] == '\n') |
@@ -3335,7 +3343,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, | |||
3335 | * percentage will go to NVME. | 3343 | * percentage will go to NVME. |
3336 | */ | 3344 | */ |
3337 | LPFC_ATTR_R(xri_split, 50, 10, 90, | 3345 | LPFC_ATTR_R(xri_split, 50, 10, 90, |
3338 | "Division of XRI resources between SCSI and NVME"); | 3346 | "Division of XRI resources between SCSI and NVME"); |
3339 | 3347 | ||
3340 | /* | 3348 | /* |
3341 | # lpfc_log_verbose: Only turn this flag on if you are willing to risk being | 3349 | # lpfc_log_verbose: Only turn this flag on if you are willing to risk being |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 18157d2840a3..a1686c2d863c 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -2486,6 +2486,10 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) | |||
2486 | mbox, *rpi); | 2486 | mbox, *rpi); |
2487 | else { | 2487 | else { |
2488 | *rpi = lpfc_sli4_alloc_rpi(phba); | 2488 | *rpi = lpfc_sli4_alloc_rpi(phba); |
2489 | if (*rpi == LPFC_RPI_ALLOC_ERROR) { | ||
2490 | mempool_free(mbox, phba->mbox_mem_pool); | ||
2491 | return -EBUSY; | ||
2492 | } | ||
2489 | status = lpfc_reg_rpi(phba, phba->pport->vpi, | 2493 | status = lpfc_reg_rpi(phba, phba->pport->vpi, |
2490 | phba->pport->fc_myDID, | 2494 | phba->pport->fc_myDID, |
2491 | (uint8_t *)&phba->pport->fc_sparam, | 2495 | (uint8_t *)&phba->pport->fc_sparam, |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 54e6ac42fbcd..944b32ca4931 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -24,6 +24,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); | |||
24 | 24 | ||
25 | struct fc_rport; | 25 | struct fc_rport; |
26 | struct fc_frame_header; | 26 | struct fc_frame_header; |
27 | struct lpfc_nvmet_rcv_ctx; | ||
27 | void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); | 28 | void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); |
28 | void lpfc_sli_read_link_ste(struct lpfc_hba *); | 29 | void lpfc_sli_read_link_ste(struct lpfc_hba *); |
29 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); | 30 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); |
@@ -99,7 +100,7 @@ void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); | |||
99 | 100 | ||
100 | int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, | 101 | int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, |
101 | struct lpfc_iocbq *, struct lpfc_nodelist *); | 102 | struct lpfc_iocbq *, struct lpfc_nodelist *); |
102 | void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); | 103 | struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did); |
103 | struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); | 104 | struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); |
104 | int lpfc_nlp_put(struct lpfc_nodelist *); | 105 | int lpfc_nlp_put(struct lpfc_nodelist *); |
105 | int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); | 106 | int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); |
@@ -245,6 +246,10 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); | |||
245 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); | 246 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); |
246 | struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); | 247 | struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); |
247 | void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); | 248 | void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); |
249 | void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, | ||
250 | struct lpfc_dmabuf *mp); | ||
251 | int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, | ||
252 | struct fc_frame_header *fc_hdr); | ||
248 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, | 253 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, |
249 | uint16_t); | 254 | uint16_t); |
250 | int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | 255 | int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, |
@@ -302,6 +307,8 @@ int lpfc_sli_check_eratt(struct lpfc_hba *); | |||
302 | void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, | 307 | void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, |
303 | struct lpfc_sli_ring *, uint32_t); | 308 | struct lpfc_sli_ring *, uint32_t); |
304 | void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); | 309 | void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *); |
310 | void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, | ||
311 | struct fc_frame_header *fc_hdr, bool aborted); | ||
305 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 312 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
306 | void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); | 313 | void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); |
307 | int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, | 314 | int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index d3e9af983015..1487406aea77 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -537,19 +537,53 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) | |||
537 | } | 537 | } |
538 | } | 538 | } |
539 | 539 | ||
540 | static void | ||
541 | lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) | ||
542 | { | ||
543 | struct lpfc_hba *phba = vport->phba; | ||
544 | struct lpfc_nodelist *ndlp = NULL; | ||
545 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
546 | |||
547 | /* | ||
548 | * To conserve rpi's, filter out addresses for other | ||
549 | * vports on the same physical HBAs. | ||
550 | */ | ||
551 | if (Did != vport->fc_myDID && | ||
552 | (!lpfc_find_vport_by_did(phba, Did) || | ||
553 | vport->cfg_peer_port_login)) { | ||
554 | if (!phba->nvmet_support) { | ||
555 | /* FCPI/NVMEI path. Process Did */ | ||
556 | lpfc_prep_node_fc4type(vport, Did, fc4_type); | ||
557 | return; | ||
558 | } | ||
559 | /* NVMET path. NVMET only cares about NVMEI nodes. */ | ||
560 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | ||
561 | if (ndlp->nlp_type != NLP_NVME_INITIATOR || | ||
562 | ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) | ||
563 | continue; | ||
564 | spin_lock_irq(shost->host_lock); | ||
565 | if (ndlp->nlp_DID == Did) | ||
566 | ndlp->nlp_flag &= ~NLP_NVMET_RECOV; | ||
567 | else | ||
568 | ndlp->nlp_flag |= NLP_NVMET_RECOV; | ||
569 | spin_unlock_irq(shost->host_lock); | ||
570 | } | ||
571 | } | ||
572 | } | ||
573 | |||
540 | static int | 574 | static int |
541 | lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, | 575 | lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, |
542 | uint32_t Size) | 576 | uint32_t Size) |
543 | { | 577 | { |
544 | struct lpfc_hba *phba = vport->phba; | ||
545 | struct lpfc_sli_ct_request *Response = | 578 | struct lpfc_sli_ct_request *Response = |
546 | (struct lpfc_sli_ct_request *) mp->virt; | 579 | (struct lpfc_sli_ct_request *) mp->virt; |
547 | struct lpfc_nodelist *ndlp = NULL; | ||
548 | struct lpfc_dmabuf *mlast, *next_mp; | 580 | struct lpfc_dmabuf *mlast, *next_mp; |
549 | uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; | 581 | uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; |
550 | uint32_t Did, CTentry; | 582 | uint32_t Did, CTentry; |
551 | int Cnt; | 583 | int Cnt; |
552 | struct list_head head; | 584 | struct list_head head; |
585 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
586 | struct lpfc_nodelist *ndlp = NULL; | ||
553 | 587 | ||
554 | lpfc_set_disctmo(vport); | 588 | lpfc_set_disctmo(vport); |
555 | vport->num_disc_nodes = 0; | 589 | vport->num_disc_nodes = 0; |
@@ -574,19 +608,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, | |||
574 | /* Get next DID from NameServer List */ | 608 | /* Get next DID from NameServer List */ |
575 | CTentry = *ctptr++; | 609 | CTentry = *ctptr++; |
576 | Did = ((be32_to_cpu(CTentry)) & Mask_DID); | 610 | Did = ((be32_to_cpu(CTentry)) & Mask_DID); |
577 | 611 | lpfc_ns_rsp_audit_did(vport, Did, fc4_type); | |
578 | ndlp = NULL; | ||
579 | |||
580 | /* | ||
581 | * Check for rscn processing or not | ||
582 | * To conserve rpi's, filter out addresses for other | ||
583 | * vports on the same physical HBAs. | ||
584 | */ | ||
585 | if ((Did != vport->fc_myDID) && | ||
586 | ((lpfc_find_vport_by_did(phba, Did) == NULL) || | ||
587 | vport->cfg_peer_port_login)) | ||
588 | lpfc_prep_node_fc4type(vport, Did, fc4_type); | ||
589 | |||
590 | if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) | 612 | if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) |
591 | goto nsout1; | 613 | goto nsout1; |
592 | 614 | ||
@@ -596,6 +618,22 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, | |||
596 | 618 | ||
597 | } | 619 | } |
598 | 620 | ||
621 | /* All GID_FT entries processed. If the driver is running in | ||
622 | * in target mode, put impacted nodes into recovery and drop | ||
623 | * the RPI to flush outstanding IO. | ||
624 | */ | ||
625 | if (vport->phba->nvmet_support) { | ||
626 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | ||
627 | if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) | ||
628 | continue; | ||
629 | lpfc_disc_state_machine(vport, ndlp, NULL, | ||
630 | NLP_EVT_DEVICE_RECOVERY); | ||
631 | spin_lock_irq(shost->host_lock); | ||
632 | ndlp->nlp_flag &= ~NLP_NVMET_RECOV; | ||
633 | spin_lock_irq(shost->host_lock); | ||
634 | } | ||
635 | } | ||
636 | |||
599 | nsout1: | 637 | nsout1: |
600 | list_del(&head); | 638 | list_del(&head); |
601 | return 0; | 639 | return 0; |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 913eed822cb8..fce549a91911 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -745,73 +745,102 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
745 | { | 745 | { |
746 | struct lpfc_hba *phba = vport->phba; | 746 | struct lpfc_hba *phba = vport->phba; |
747 | struct lpfc_nvmet_tgtport *tgtp; | 747 | struct lpfc_nvmet_tgtport *tgtp; |
748 | struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; | ||
748 | int len = 0; | 749 | int len = 0; |
750 | int cnt; | ||
749 | 751 | ||
750 | if (phba->nvmet_support) { | 752 | if (phba->nvmet_support) { |
751 | if (!phba->targetport) | 753 | if (!phba->targetport) |
752 | return len; | 754 | return len; |
753 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 755 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
754 | len += snprintf(buf+len, size-len, | 756 | len += snprintf(buf + len, size - len, |
755 | "\nNVME Targetport Statistics\n"); | 757 | "\nNVME Targetport Statistics\n"); |
756 | 758 | ||
757 | len += snprintf(buf+len, size-len, | 759 | len += snprintf(buf + len, size - len, |
758 | "LS: Rcv %08x Drop %08x Abort %08x\n", | 760 | "LS: Rcv %08x Drop %08x Abort %08x\n", |
759 | atomic_read(&tgtp->rcv_ls_req_in), | 761 | atomic_read(&tgtp->rcv_ls_req_in), |
760 | atomic_read(&tgtp->rcv_ls_req_drop), | 762 | atomic_read(&tgtp->rcv_ls_req_drop), |
761 | atomic_read(&tgtp->xmt_ls_abort)); | 763 | atomic_read(&tgtp->xmt_ls_abort)); |
762 | if (atomic_read(&tgtp->rcv_ls_req_in) != | 764 | if (atomic_read(&tgtp->rcv_ls_req_in) != |
763 | atomic_read(&tgtp->rcv_ls_req_out)) { | 765 | atomic_read(&tgtp->rcv_ls_req_out)) { |
764 | len += snprintf(buf+len, size-len, | 766 | len += snprintf(buf + len, size - len, |
765 | "Rcv LS: in %08x != out %08x\n", | 767 | "Rcv LS: in %08x != out %08x\n", |
766 | atomic_read(&tgtp->rcv_ls_req_in), | 768 | atomic_read(&tgtp->rcv_ls_req_in), |
767 | atomic_read(&tgtp->rcv_ls_req_out)); | 769 | atomic_read(&tgtp->rcv_ls_req_out)); |
768 | } | 770 | } |
769 | 771 | ||
770 | len += snprintf(buf+len, size-len, | 772 | len += snprintf(buf + len, size - len, |
771 | "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", | 773 | "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", |
772 | atomic_read(&tgtp->xmt_ls_rsp), | 774 | atomic_read(&tgtp->xmt_ls_rsp), |
773 | atomic_read(&tgtp->xmt_ls_drop), | 775 | atomic_read(&tgtp->xmt_ls_drop), |
774 | atomic_read(&tgtp->xmt_ls_rsp_cmpl), | 776 | atomic_read(&tgtp->xmt_ls_rsp_cmpl), |
775 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 777 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
776 | 778 | ||
777 | len += snprintf(buf+len, size-len, | 779 | len += snprintf(buf + len, size - len, |
778 | "FCP: Rcv %08x Drop %08x\n", | 780 | "FCP: Rcv %08x Drop %08x\n", |
779 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 781 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
780 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 782 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
781 | 783 | ||
782 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 784 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
783 | atomic_read(&tgtp->rcv_fcp_cmd_out)) { | 785 | atomic_read(&tgtp->rcv_fcp_cmd_out)) { |
784 | len += snprintf(buf+len, size-len, | 786 | len += snprintf(buf + len, size - len, |
785 | "Rcv FCP: in %08x != out %08x\n", | 787 | "Rcv FCP: in %08x != out %08x\n", |
786 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 788 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
787 | atomic_read(&tgtp->rcv_fcp_cmd_out)); | 789 | atomic_read(&tgtp->rcv_fcp_cmd_out)); |
788 | } | 790 | } |
789 | 791 | ||
790 | len += snprintf(buf+len, size-len, | 792 | len += snprintf(buf + len, size - len, |
791 | "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n", | 793 | "FCP Rsp: read %08x readrsp %08x " |
794 | "write %08x rsp %08x\n", | ||
792 | atomic_read(&tgtp->xmt_fcp_read), | 795 | atomic_read(&tgtp->xmt_fcp_read), |
793 | atomic_read(&tgtp->xmt_fcp_read_rsp), | 796 | atomic_read(&tgtp->xmt_fcp_read_rsp), |
794 | atomic_read(&tgtp->xmt_fcp_write), | 797 | atomic_read(&tgtp->xmt_fcp_write), |
795 | atomic_read(&tgtp->xmt_fcp_rsp)); | 798 | atomic_read(&tgtp->xmt_fcp_rsp)); |
796 | 799 | ||
797 | len += snprintf(buf+len, size-len, | 800 | len += snprintf(buf + len, size - len, |
798 | "FCP Rsp: abort %08x drop %08x\n", | 801 | "FCP Rsp: abort %08x drop %08x\n", |
799 | atomic_read(&tgtp->xmt_fcp_abort), | 802 | atomic_read(&tgtp->xmt_fcp_abort), |
800 | atomic_read(&tgtp->xmt_fcp_drop)); | 803 | atomic_read(&tgtp->xmt_fcp_drop)); |
801 | 804 | ||
802 | len += snprintf(buf+len, size-len, | 805 | len += snprintf(buf + len, size - len, |
803 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", | 806 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", |
804 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), | 807 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), |
805 | atomic_read(&tgtp->xmt_fcp_rsp_error), | 808 | atomic_read(&tgtp->xmt_fcp_rsp_error), |
806 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); | 809 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); |
807 | 810 | ||
808 | len += snprintf(buf+len, size-len, | 811 | len += snprintf(buf + len, size - len, |
809 | "ABORT: Xmt %08x Err %08x Cmpl %08x", | 812 | "ABORT: Xmt %08x Err %08x Cmpl %08x", |
810 | atomic_read(&tgtp->xmt_abort_rsp), | 813 | atomic_read(&tgtp->xmt_abort_rsp), |
811 | atomic_read(&tgtp->xmt_abort_rsp_error), | 814 | atomic_read(&tgtp->xmt_abort_rsp_error), |
812 | atomic_read(&tgtp->xmt_abort_cmpl)); | 815 | atomic_read(&tgtp->xmt_abort_cmpl)); |
813 | 816 | ||
814 | len += snprintf(buf+len, size-len, "\n"); | 817 | len += snprintf(buf + len, size - len, "\n"); |
818 | |||
819 | cnt = 0; | ||
820 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
821 | list_for_each_entry_safe(ctxp, next_ctxp, | ||
822 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | ||
823 | list) { | ||
824 | cnt++; | ||
825 | } | ||
826 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
827 | if (cnt) { | ||
828 | len += snprintf(buf + len, size - len, | ||
829 | "ABORT: %d ctx entries\n", cnt); | ||
830 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
831 | list_for_each_entry_safe(ctxp, next_ctxp, | ||
832 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | ||
833 | list) { | ||
834 | if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) | ||
835 | break; | ||
836 | len += snprintf(buf + len, size - len, | ||
837 | "Entry: oxid %x state %x " | ||
838 | "flag %x\n", | ||
839 | ctxp->oxid, ctxp->state, | ||
840 | ctxp->flag); | ||
841 | } | ||
842 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
843 | } | ||
815 | } else { | 844 | } else { |
816 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) | 845 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
817 | return len; | 846 | return len; |
@@ -3128,8 +3157,6 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, | |||
3128 | datqp->queue_id, datqp->entry_count, | 3157 | datqp->queue_id, datqp->entry_count, |
3129 | datqp->entry_size, datqp->host_index, | 3158 | datqp->entry_size, datqp->host_index, |
3130 | datqp->hba_index); | 3159 | datqp->hba_index); |
3131 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); | ||
3132 | |||
3133 | return len; | 3160 | return len; |
3134 | } | 3161 | } |
3135 | 3162 | ||
@@ -5700,10 +5727,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
5700 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 5727 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
5701 | struct lpfc_hba *phba = vport->phba; | 5728 | struct lpfc_hba *phba = vport->phba; |
5702 | 5729 | ||
5703 | if (vport->disc_trc) { | 5730 | kfree(vport->disc_trc); |
5704 | kfree(vport->disc_trc); | 5731 | vport->disc_trc = NULL; |
5705 | vport->disc_trc = NULL; | ||
5706 | } | ||
5707 | 5732 | ||
5708 | debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ | 5733 | debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ |
5709 | vport->debug_disc_trc = NULL; | 5734 | vport->debug_disc_trc = NULL; |
@@ -5770,10 +5795,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
5770 | debugfs_remove(phba->debug_readRef); /* readRef */ | 5795 | debugfs_remove(phba->debug_readRef); /* readRef */ |
5771 | phba->debug_readRef = NULL; | 5796 | phba->debug_readRef = NULL; |
5772 | 5797 | ||
5773 | if (phba->slow_ring_trc) { | 5798 | kfree(phba->slow_ring_trc); |
5774 | kfree(phba->slow_ring_trc); | 5799 | phba->slow_ring_trc = NULL; |
5775 | phba->slow_ring_trc = NULL; | ||
5776 | } | ||
5777 | 5800 | ||
5778 | /* slow_ring_trace */ | 5801 | /* slow_ring_trace */ |
5779 | debugfs_remove(phba->debug_slow_ring_trc); | 5802 | debugfs_remove(phba->debug_slow_ring_trc); |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f4ff99d95db3..9d5a379f4b15 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -157,6 +157,7 @@ struct lpfc_node_rrq { | |||
157 | #define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ | 157 | #define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ |
158 | #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ | 158 | #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ |
159 | #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ | 159 | #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ |
160 | #define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ | ||
160 | #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ | 161 | #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ |
161 | #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ | 162 | #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ |
162 | #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ | 163 | #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a5ca37e45fb6..67827e397431 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -603,9 +603,11 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport, | |||
603 | memcmp(&vport->fabric_portname, &sp->portName, | 603 | memcmp(&vport->fabric_portname, &sp->portName, |
604 | sizeof(struct lpfc_name)) || | 604 | sizeof(struct lpfc_name)) || |
605 | memcmp(&vport->fabric_nodename, &sp->nodeName, | 605 | memcmp(&vport->fabric_nodename, &sp->nodeName, |
606 | sizeof(struct lpfc_name))) | 606 | sizeof(struct lpfc_name)) || |
607 | (vport->vport_flag & FAWWPN_PARAM_CHG)) { | ||
607 | fabric_param_changed = 1; | 608 | fabric_param_changed = 1; |
608 | 609 | vport->vport_flag &= ~FAWWPN_PARAM_CHG; | |
610 | } | ||
609 | /* | 611 | /* |
610 | * Word 1 Bit 31 in common service parameter is overloaded. | 612 | * Word 1 Bit 31 in common service parameter is overloaded. |
611 | * Word 1 Bit 31 in FLOGI request is multiple NPort request | 613 | * Word 1 Bit 31 in FLOGI request is multiple NPort request |
@@ -895,10 +897,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
895 | * Cannot find existing Fabric ndlp, so allocate a | 897 | * Cannot find existing Fabric ndlp, so allocate a |
896 | * new one | 898 | * new one |
897 | */ | 899 | */ |
898 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 900 | ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); |
899 | if (!ndlp) | 901 | if (!ndlp) |
900 | goto fail; | 902 | goto fail; |
901 | lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); | ||
902 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 903 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
903 | ndlp = lpfc_enable_node(vport, ndlp, | 904 | ndlp = lpfc_enable_node(vport, ndlp, |
904 | NLP_STE_UNUSED_NODE); | 905 | NLP_STE_UNUSED_NODE); |
@@ -1364,7 +1365,6 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) | |||
1364 | int | 1365 | int |
1365 | lpfc_initial_flogi(struct lpfc_vport *vport) | 1366 | lpfc_initial_flogi(struct lpfc_vport *vport) |
1366 | { | 1367 | { |
1367 | struct lpfc_hba *phba = vport->phba; | ||
1368 | struct lpfc_nodelist *ndlp; | 1368 | struct lpfc_nodelist *ndlp; |
1369 | 1369 | ||
1370 | vport->port_state = LPFC_FLOGI; | 1370 | vport->port_state = LPFC_FLOGI; |
@@ -1374,10 +1374,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
1374 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 1374 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
1375 | if (!ndlp) { | 1375 | if (!ndlp) { |
1376 | /* Cannot find existing Fabric ndlp, so allocate a new one */ | 1376 | /* Cannot find existing Fabric ndlp, so allocate a new one */ |
1377 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 1377 | ndlp = lpfc_nlp_init(vport, Fabric_DID); |
1378 | if (!ndlp) | 1378 | if (!ndlp) |
1379 | return 0; | 1379 | return 0; |
1380 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | ||
1381 | /* Set the node type */ | 1380 | /* Set the node type */ |
1382 | ndlp->nlp_type |= NLP_FABRIC; | 1381 | ndlp->nlp_type |= NLP_FABRIC; |
1383 | /* Put ndlp onto node list */ | 1382 | /* Put ndlp onto node list */ |
@@ -1418,17 +1417,15 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
1418 | int | 1417 | int |
1419 | lpfc_initial_fdisc(struct lpfc_vport *vport) | 1418 | lpfc_initial_fdisc(struct lpfc_vport *vport) |
1420 | { | 1419 | { |
1421 | struct lpfc_hba *phba = vport->phba; | ||
1422 | struct lpfc_nodelist *ndlp; | 1420 | struct lpfc_nodelist *ndlp; |
1423 | 1421 | ||
1424 | /* First look for the Fabric ndlp */ | 1422 | /* First look for the Fabric ndlp */ |
1425 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 1423 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
1426 | if (!ndlp) { | 1424 | if (!ndlp) { |
1427 | /* Cannot find existing Fabric ndlp, so allocate a new one */ | 1425 | /* Cannot find existing Fabric ndlp, so allocate a new one */ |
1428 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 1426 | ndlp = lpfc_nlp_init(vport, Fabric_DID); |
1429 | if (!ndlp) | 1427 | if (!ndlp) |
1430 | return 0; | 1428 | return 0; |
1431 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | ||
1432 | /* Put ndlp onto node list */ | 1429 | /* Put ndlp onto node list */ |
1433 | lpfc_enqueue_node(vport, ndlp); | 1430 | lpfc_enqueue_node(vport, ndlp); |
1434 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 1431 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
@@ -1564,14 +1561,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
1564 | phba->active_rrq_pool); | 1561 | phba->active_rrq_pool); |
1565 | return ndlp; | 1562 | return ndlp; |
1566 | } | 1563 | } |
1567 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); | 1564 | new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); |
1568 | if (!new_ndlp) { | 1565 | if (!new_ndlp) { |
1569 | if (active_rrqs_xri_bitmap) | 1566 | if (active_rrqs_xri_bitmap) |
1570 | mempool_free(active_rrqs_xri_bitmap, | 1567 | mempool_free(active_rrqs_xri_bitmap, |
1571 | phba->active_rrq_pool); | 1568 | phba->active_rrq_pool); |
1572 | return ndlp; | 1569 | return ndlp; |
1573 | } | 1570 | } |
1574 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); | ||
1575 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { | 1571 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { |
1576 | rc = memcmp(&ndlp->nlp_portname, name, | 1572 | rc = memcmp(&ndlp->nlp_portname, name, |
1577 | sizeof(struct lpfc_name)); | 1573 | sizeof(struct lpfc_name)); |
@@ -2845,10 +2841,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2845 | 2841 | ||
2846 | ndlp = lpfc_findnode_did(vport, nportid); | 2842 | ndlp = lpfc_findnode_did(vport, nportid); |
2847 | if (!ndlp) { | 2843 | if (!ndlp) { |
2848 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 2844 | ndlp = lpfc_nlp_init(vport, nportid); |
2849 | if (!ndlp) | 2845 | if (!ndlp) |
2850 | return 1; | 2846 | return 1; |
2851 | lpfc_nlp_init(vport, ndlp, nportid); | ||
2852 | lpfc_enqueue_node(vport, ndlp); | 2847 | lpfc_enqueue_node(vport, ndlp); |
2853 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 2848 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
2854 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | 2849 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
@@ -2938,10 +2933,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2938 | 2933 | ||
2939 | ndlp = lpfc_findnode_did(vport, nportid); | 2934 | ndlp = lpfc_findnode_did(vport, nportid); |
2940 | if (!ndlp) { | 2935 | if (!ndlp) { |
2941 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 2936 | ndlp = lpfc_nlp_init(vport, nportid); |
2942 | if (!ndlp) | 2937 | if (!ndlp) |
2943 | return 1; | 2938 | return 1; |
2944 | lpfc_nlp_init(vport, ndlp, nportid); | ||
2945 | lpfc_enqueue_node(vport, ndlp); | 2939 | lpfc_enqueue_node(vport, ndlp); |
2946 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 2940 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
2947 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | 2941 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
@@ -4403,7 +4397,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
4403 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | 4397 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); |
4404 | memset(pcmd, 0, cmdsize); | 4398 | memset(pcmd, 0, cmdsize); |
4405 | 4399 | ||
4406 | *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); | 4400 | *((uint32_t *)(pcmd)) = elsrspcmd; |
4407 | pcmd += sizeof(uint32_t); | 4401 | pcmd += sizeof(uint32_t); |
4408 | 4402 | ||
4409 | /* For PRLI, remainder of payload is PRLI parameter page */ | 4403 | /* For PRLI, remainder of payload is PRLI parameter page */ |
@@ -5867,8 +5861,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |||
5867 | (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || | 5861 | (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || |
5868 | !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) | 5862 | !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) |
5869 | continue; | 5863 | continue; |
5864 | |||
5865 | /* NVME Target mode does not do RSCN Recovery. */ | ||
5870 | if (vport->phba->nvmet_support) | 5866 | if (vport->phba->nvmet_support) |
5871 | continue; | 5867 | continue; |
5868 | |||
5872 | lpfc_disc_state_machine(vport, ndlp, NULL, | 5869 | lpfc_disc_state_machine(vport, ndlp, NULL, |
5873 | NLP_EVT_DEVICE_RECOVERY); | 5870 | NLP_EVT_DEVICE_RECOVERY); |
5874 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | 5871 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
@@ -6133,7 +6130,6 @@ int | |||
6133 | lpfc_els_handle_rscn(struct lpfc_vport *vport) | 6130 | lpfc_els_handle_rscn(struct lpfc_vport *vport) |
6134 | { | 6131 | { |
6135 | struct lpfc_nodelist *ndlp; | 6132 | struct lpfc_nodelist *ndlp; |
6136 | struct lpfc_hba *phba = vport->phba; | ||
6137 | 6133 | ||
6138 | /* Ignore RSCN if the port is being torn down. */ | 6134 | /* Ignore RSCN if the port is being torn down. */ |
6139 | if (vport->load_flag & FC_UNLOADING) { | 6135 | if (vport->load_flag & FC_UNLOADING) { |
@@ -6157,22 +6153,16 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
6157 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 6153 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
6158 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) | 6154 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
6159 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 6155 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { |
6160 | /* Good ndlp, issue CT Request to NameServer */ | 6156 | /* Good ndlp, issue CT Request to NameServer. Need to |
6157 | * know how many gidfts were issued. If none, then just | ||
6158 | * flush the RSCN. Otherwise, the outstanding requests | ||
6159 | * need to complete. | ||
6160 | */ | ||
6161 | vport->gidft_inp = 0; | 6161 | vport->gidft_inp = 0; |
6162 | if (lpfc_issue_gidft(vport) == 0) | 6162 | if (lpfc_issue_gidft(vport) > 0) |
6163 | /* Wait for NameServer query cmpl before we can | ||
6164 | * continue | ||
6165 | */ | ||
6166 | return 1; | 6163 | return 1; |
6167 | } else { | 6164 | } else { |
6168 | /* If login to NameServer does not exist, issue one */ | 6165 | /* Nameserver login in question. Revalidate. */ |
6169 | /* Good status, issue PLOGI to NameServer */ | ||
6170 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | ||
6171 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) | ||
6172 | /* Wait for NameServer login cmpl before we can | ||
6173 | continue */ | ||
6174 | return 1; | ||
6175 | |||
6176 | if (ndlp) { | 6166 | if (ndlp) { |
6177 | ndlp = lpfc_enable_node(vport, ndlp, | 6167 | ndlp = lpfc_enable_node(vport, ndlp, |
6178 | NLP_STE_PLOGI_ISSUE); | 6168 | NLP_STE_PLOGI_ISSUE); |
@@ -6182,12 +6172,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
6182 | } | 6172 | } |
6183 | ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; | 6173 | ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; |
6184 | } else { | 6174 | } else { |
6185 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 6175 | ndlp = lpfc_nlp_init(vport, NameServer_DID); |
6186 | if (!ndlp) { | 6176 | if (!ndlp) { |
6187 | lpfc_els_flush_rscn(vport); | 6177 | lpfc_els_flush_rscn(vport); |
6188 | return 0; | 6178 | return 0; |
6189 | } | 6179 | } |
6190 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | ||
6191 | ndlp->nlp_prev_state = ndlp->nlp_state; | 6180 | ndlp->nlp_prev_state = ndlp->nlp_state; |
6192 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 6181 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
6193 | } | 6182 | } |
@@ -7746,11 +7735,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7746 | ndlp = lpfc_findnode_did(vport, did); | 7735 | ndlp = lpfc_findnode_did(vport, did); |
7747 | if (!ndlp) { | 7736 | if (!ndlp) { |
7748 | /* Cannot find existing Fabric ndlp, so allocate a new one */ | 7737 | /* Cannot find existing Fabric ndlp, so allocate a new one */ |
7749 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 7738 | ndlp = lpfc_nlp_init(vport, did); |
7750 | if (!ndlp) | 7739 | if (!ndlp) |
7751 | goto dropit; | 7740 | goto dropit; |
7752 | |||
7753 | lpfc_nlp_init(vport, ndlp, did); | ||
7754 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 7741 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
7755 | newnode = 1; | 7742 | newnode = 1; |
7756 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) | 7743 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) |
@@ -8193,7 +8180,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
8193 | static void | 8180 | static void |
8194 | lpfc_start_fdmi(struct lpfc_vport *vport) | 8181 | lpfc_start_fdmi(struct lpfc_vport *vport) |
8195 | { | 8182 | { |
8196 | struct lpfc_hba *phba = vport->phba; | ||
8197 | struct lpfc_nodelist *ndlp; | 8183 | struct lpfc_nodelist *ndlp; |
8198 | 8184 | ||
8199 | /* If this is the first time, allocate an ndlp and initialize | 8185 | /* If this is the first time, allocate an ndlp and initialize |
@@ -8202,9 +8188,8 @@ lpfc_start_fdmi(struct lpfc_vport *vport) | |||
8202 | */ | 8188 | */ |
8203 | ndlp = lpfc_findnode_did(vport, FDMI_DID); | 8189 | ndlp = lpfc_findnode_did(vport, FDMI_DID); |
8204 | if (!ndlp) { | 8190 | if (!ndlp) { |
8205 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 8191 | ndlp = lpfc_nlp_init(vport, FDMI_DID); |
8206 | if (ndlp) { | 8192 | if (ndlp) { |
8207 | lpfc_nlp_init(vport, ndlp, FDMI_DID); | ||
8208 | ndlp->nlp_type |= NLP_FABRIC; | 8193 | ndlp->nlp_type |= NLP_FABRIC; |
8209 | } else { | 8194 | } else { |
8210 | return; | 8195 | return; |
@@ -8257,7 +8242,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
8257 | 8242 | ||
8258 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 8243 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
8259 | if (!ndlp) { | 8244 | if (!ndlp) { |
8260 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 8245 | ndlp = lpfc_nlp_init(vport, NameServer_DID); |
8261 | if (!ndlp) { | 8246 | if (!ndlp) { |
8262 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { | 8247 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
8263 | lpfc_disc_start(vport); | 8248 | lpfc_disc_start(vport); |
@@ -8268,7 +8253,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
8268 | "0251 NameServer login: no memory\n"); | 8253 | "0251 NameServer login: no memory\n"); |
8269 | return; | 8254 | return; |
8270 | } | 8255 | } |
8271 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | ||
8272 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 8256 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
8273 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | 8257 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
8274 | if (!ndlp) { | 8258 | if (!ndlp) { |
@@ -8771,7 +8755,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
8771 | pcmd += sizeof(uint32_t); /* Node Name */ | 8755 | pcmd += sizeof(uint32_t); /* Node Name */ |
8772 | pcmd += sizeof(uint32_t); /* Node Name */ | 8756 | pcmd += sizeof(uint32_t); /* Node Name */ |
8773 | memcpy(pcmd, &vport->fc_nodename, 8); | 8757 | memcpy(pcmd, &vport->fc_nodename, 8); |
8774 | 8758 | memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); | |
8775 | lpfc_set_disctmo(vport); | 8759 | lpfc_set_disctmo(vport); |
8776 | 8760 | ||
8777 | phba->fc_stat.elsXmitFDISC++; | 8761 | phba->fc_stat.elsXmitFDISC++; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 180b072beef6..0482c5580331 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -3002,6 +3002,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3002 | MAILBOX_t *mb = &pmb->u.mb; | 3002 | MAILBOX_t *mb = &pmb->u.mb; |
3003 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; | 3003 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; |
3004 | struct lpfc_vport *vport = pmb->vport; | 3004 | struct lpfc_vport *vport = pmb->vport; |
3005 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
3005 | struct serv_parm *sp = &vport->fc_sparam; | 3006 | struct serv_parm *sp = &vport->fc_sparam; |
3006 | uint32_t ed_tov; | 3007 | uint32_t ed_tov; |
3007 | 3008 | ||
@@ -3031,6 +3032,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3031 | } | 3032 | } |
3032 | 3033 | ||
3033 | lpfc_update_vport_wwn(vport); | 3034 | lpfc_update_vport_wwn(vport); |
3035 | fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); | ||
3034 | if (vport->port_type == LPFC_PHYSICAL_PORT) { | 3036 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
3035 | memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); | 3037 | memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); |
3036 | memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); | 3038 | memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); |
@@ -3309,6 +3311,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3309 | struct lpfc_sli_ring *pring; | 3311 | struct lpfc_sli_ring *pring; |
3310 | MAILBOX_t *mb = &pmb->u.mb; | 3312 | MAILBOX_t *mb = &pmb->u.mb; |
3311 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | 3313 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
3314 | uint8_t attn_type; | ||
3312 | 3315 | ||
3313 | /* Unblock ELS traffic */ | 3316 | /* Unblock ELS traffic */ |
3314 | pring = lpfc_phba_elsring(phba); | 3317 | pring = lpfc_phba_elsring(phba); |
@@ -3325,6 +3328,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3325 | } | 3328 | } |
3326 | 3329 | ||
3327 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; | 3330 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; |
3331 | attn_type = bf_get(lpfc_mbx_read_top_att_type, la); | ||
3328 | 3332 | ||
3329 | memcpy(&phba->alpa_map[0], mp->virt, 128); | 3333 | memcpy(&phba->alpa_map[0], mp->virt, 128); |
3330 | 3334 | ||
@@ -3337,7 +3341,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3337 | 3341 | ||
3338 | if (phba->fc_eventTag <= la->eventTag) { | 3342 | if (phba->fc_eventTag <= la->eventTag) { |
3339 | phba->fc_stat.LinkMultiEvent++; | 3343 | phba->fc_stat.LinkMultiEvent++; |
3340 | if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) | 3344 | if (attn_type == LPFC_ATT_LINK_UP) |
3341 | if (phba->fc_eventTag != 0) | 3345 | if (phba->fc_eventTag != 0) |
3342 | lpfc_linkdown(phba); | 3346 | lpfc_linkdown(phba); |
3343 | } | 3347 | } |
@@ -3353,7 +3357,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3353 | } | 3357 | } |
3354 | 3358 | ||
3355 | phba->link_events++; | 3359 | phba->link_events++; |
3356 | if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && | 3360 | if ((attn_type == LPFC_ATT_LINK_UP) && |
3357 | !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { | 3361 | !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { |
3358 | phba->fc_stat.LinkUp++; | 3362 | phba->fc_stat.LinkUp++; |
3359 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 3363 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
@@ -3379,8 +3383,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3379 | phba->wait_4_mlo_maint_flg); | 3383 | phba->wait_4_mlo_maint_flg); |
3380 | } | 3384 | } |
3381 | lpfc_mbx_process_link_up(phba, la); | 3385 | lpfc_mbx_process_link_up(phba, la); |
3382 | } else if (bf_get(lpfc_mbx_read_top_att_type, la) == | 3386 | } else if (attn_type == LPFC_ATT_LINK_DOWN || |
3383 | LPFC_ATT_LINK_DOWN) { | 3387 | attn_type == LPFC_ATT_UNEXP_WWPN) { |
3384 | phba->fc_stat.LinkDown++; | 3388 | phba->fc_stat.LinkDown++; |
3385 | if (phba->link_flag & LS_LOOPBACK_MODE) | 3389 | if (phba->link_flag & LS_LOOPBACK_MODE) |
3386 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3390 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -3389,6 +3393,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3389 | "Data: x%x x%x x%x\n", | 3393 | "Data: x%x x%x x%x\n", |
3390 | la->eventTag, phba->fc_eventTag, | 3394 | la->eventTag, phba->fc_eventTag, |
3391 | phba->pport->port_state, vport->fc_flag); | 3395 | phba->pport->port_state, vport->fc_flag); |
3396 | else if (attn_type == LPFC_ATT_UNEXP_WWPN) | ||
3397 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
3398 | "1313 Link Down UNEXP WWPN Event x%x received " | ||
3399 | "Data: x%x x%x x%x x%x x%x\n", | ||
3400 | la->eventTag, phba->fc_eventTag, | ||
3401 | phba->pport->port_state, vport->fc_flag, | ||
3402 | bf_get(lpfc_mbx_read_top_mm, la), | ||
3403 | bf_get(lpfc_mbx_read_top_fa, la)); | ||
3392 | else | 3404 | else |
3393 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3405 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
3394 | "1305 Link Down Event x%x received " | 3406 | "1305 Link Down Event x%x received " |
@@ -3399,8 +3411,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3399 | bf_get(lpfc_mbx_read_top_fa, la)); | 3411 | bf_get(lpfc_mbx_read_top_fa, la)); |
3400 | lpfc_mbx_issue_link_down(phba); | 3412 | lpfc_mbx_issue_link_down(phba); |
3401 | } | 3413 | } |
3402 | if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) && | 3414 | if (phba->sli.sli_flag & LPFC_MENLO_MAINT && |
3403 | ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) { | 3415 | attn_type == LPFC_ATT_LINK_UP) { |
3404 | if (phba->link_state != LPFC_LINK_DOWN) { | 3416 | if (phba->link_state != LPFC_LINK_DOWN) { |
3405 | phba->fc_stat.LinkDown++; | 3417 | phba->fc_stat.LinkDown++; |
3406 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3418 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -4136,7 +4148,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4136 | int old_state, int new_state) | 4148 | int old_state, int new_state) |
4137 | { | 4149 | { |
4138 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 4150 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
4139 | struct lpfc_hba *phba = vport->phba; | ||
4140 | 4151 | ||
4141 | if (new_state == NLP_STE_UNMAPPED_NODE) { | 4152 | if (new_state == NLP_STE_UNMAPPED_NODE) { |
4142 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; | 4153 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
@@ -4155,14 +4166,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4155 | lpfc_unregister_remote_port(ndlp); | 4166 | lpfc_unregister_remote_port(ndlp); |
4156 | } | 4167 | } |
4157 | 4168 | ||
4158 | /* Notify the NVME transport of this rport's loss */ | 4169 | /* Notify the NVME transport of this rport's loss on the |
4159 | if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || | 4170 | * Initiator. For NVME Target, should upcall transport |
4160 | (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && | 4171 | * in the else clause when API available. |
4161 | (vport->phba->nvmet_support == 0) && | 4172 | */ |
4162 | ((ndlp->nlp_fc4_type & NLP_FC4_NVME) || | 4173 | if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { |
4163 | (ndlp->nlp_DID == Fabric_DID))) { | ||
4164 | vport->phba->nport_event_cnt++; | 4174 | vport->phba->nport_event_cnt++; |
4165 | lpfc_nvme_unregister_port(vport, ndlp); | 4175 | if (vport->phba->nvmet_support == 0) |
4176 | lpfc_nvme_unregister_port(vport, ndlp); | ||
4166 | } | 4177 | } |
4167 | } | 4178 | } |
4168 | 4179 | ||
@@ -4368,10 +4379,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4368 | uint32_t did; | 4379 | uint32_t did; |
4369 | unsigned long flags; | 4380 | unsigned long flags; |
4370 | unsigned long *active_rrqs_xri_bitmap = NULL; | 4381 | unsigned long *active_rrqs_xri_bitmap = NULL; |
4382 | int rpi = LPFC_RPI_ALLOC_ERROR; | ||
4371 | 4383 | ||
4372 | if (!ndlp) | 4384 | if (!ndlp) |
4373 | return NULL; | 4385 | return NULL; |
4374 | 4386 | ||
4387 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
4388 | rpi = lpfc_sli4_alloc_rpi(vport->phba); | ||
4389 | if (rpi == LPFC_RPI_ALLOC_ERROR) | ||
4390 | return NULL; | ||
4391 | } | ||
4392 | |||
4375 | spin_lock_irqsave(&phba->ndlp_lock, flags); | 4393 | spin_lock_irqsave(&phba->ndlp_lock, flags); |
4376 | /* The ndlp should not be in memory free mode */ | 4394 | /* The ndlp should not be in memory free mode */ |
4377 | if (NLP_CHK_FREE_REQ(ndlp)) { | 4395 | if (NLP_CHK_FREE_REQ(ndlp)) { |
@@ -4381,7 +4399,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4381 | "usgmap:x%x refcnt:%d\n", | 4399 | "usgmap:x%x refcnt:%d\n", |
4382 | (void *)ndlp, ndlp->nlp_usg_map, | 4400 | (void *)ndlp, ndlp->nlp_usg_map, |
4383 | kref_read(&ndlp->kref)); | 4401 | kref_read(&ndlp->kref)); |
4384 | return NULL; | 4402 | goto free_rpi; |
4385 | } | 4403 | } |
4386 | /* The ndlp should not already be in active mode */ | 4404 | /* The ndlp should not already be in active mode */ |
4387 | if (NLP_CHK_NODE_ACT(ndlp)) { | 4405 | if (NLP_CHK_NODE_ACT(ndlp)) { |
@@ -4391,7 +4409,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4391 | "usgmap:x%x refcnt:%d\n", | 4409 | "usgmap:x%x refcnt:%d\n", |
4392 | (void *)ndlp, ndlp->nlp_usg_map, | 4410 | (void *)ndlp, ndlp->nlp_usg_map, |
4393 | kref_read(&ndlp->kref)); | 4411 | kref_read(&ndlp->kref)); |
4394 | return NULL; | 4412 | goto free_rpi; |
4395 | } | 4413 | } |
4396 | 4414 | ||
4397 | /* Keep the original DID */ | 4415 | /* Keep the original DID */ |
@@ -4409,7 +4427,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4409 | 4427 | ||
4410 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | 4428 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); |
4411 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { | 4429 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
4412 | ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); | 4430 | ndlp->nlp_rpi = rpi; |
4413 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, | 4431 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
4414 | "0008 rpi:%x DID:%x flg:%x refcnt:%d " | 4432 | "0008 rpi:%x DID:%x flg:%x refcnt:%d " |
4415 | "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, | 4433 | "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, |
@@ -4426,6 +4444,11 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4426 | "node enable: did:x%x", | 4444 | "node enable: did:x%x", |
4427 | ndlp->nlp_DID, 0, 0); | 4445 | ndlp->nlp_DID, 0, 0); |
4428 | return ndlp; | 4446 | return ndlp; |
4447 | |||
4448 | free_rpi: | ||
4449 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4450 | lpfc_sli4_free_rpi(vport->phba, rpi); | ||
4451 | return NULL; | ||
4429 | } | 4452 | } |
4430 | 4453 | ||
4431 | void | 4454 | void |
@@ -5104,65 +5127,82 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | |||
5104 | 5127 | ||
5105 | ndlp = lpfc_findnode_did(vport, did); | 5128 | ndlp = lpfc_findnode_did(vport, did); |
5106 | if (!ndlp) { | 5129 | if (!ndlp) { |
5130 | if (vport->phba->nvmet_support) | ||
5131 | return NULL; | ||
5107 | if ((vport->fc_flag & FC_RSCN_MODE) != 0 && | 5132 | if ((vport->fc_flag & FC_RSCN_MODE) != 0 && |
5108 | lpfc_rscn_payload_check(vport, did) == 0) | 5133 | lpfc_rscn_payload_check(vport, did) == 0) |
5109 | return NULL; | 5134 | return NULL; |
5110 | ndlp = (struct lpfc_nodelist *) | 5135 | ndlp = lpfc_nlp_init(vport, did); |
5111 | mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); | ||
5112 | if (!ndlp) | 5136 | if (!ndlp) |
5113 | return NULL; | 5137 | return NULL; |
5114 | lpfc_nlp_init(vport, ndlp, did); | ||
5115 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 5138 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
5116 | if (vport->phba->nvmet_support) | ||
5117 | return ndlp; | ||
5118 | spin_lock_irq(shost->host_lock); | 5139 | spin_lock_irq(shost->host_lock); |
5119 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 5140 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5120 | spin_unlock_irq(shost->host_lock); | 5141 | spin_unlock_irq(shost->host_lock); |
5121 | return ndlp; | 5142 | return ndlp; |
5122 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 5143 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
5144 | if (vport->phba->nvmet_support) | ||
5145 | return NULL; | ||
5123 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); | 5146 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); |
5124 | if (!ndlp) | 5147 | if (!ndlp) |
5125 | return NULL; | 5148 | return NULL; |
5126 | if (vport->phba->nvmet_support) | ||
5127 | return ndlp; | ||
5128 | spin_lock_irq(shost->host_lock); | 5149 | spin_lock_irq(shost->host_lock); |
5129 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 5150 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5130 | spin_unlock_irq(shost->host_lock); | 5151 | spin_unlock_irq(shost->host_lock); |
5131 | return ndlp; | 5152 | return ndlp; |
5132 | } | 5153 | } |
5133 | 5154 | ||
5155 | /* The NVME Target does not want to actively manage an rport. | ||
5156 | * The goal is to allow the target to reset its state and clear | ||
5157 | * pending IO in preparation for the initiator to recover. | ||
5158 | */ | ||
5134 | if ((vport->fc_flag & FC_RSCN_MODE) && | 5159 | if ((vport->fc_flag & FC_RSCN_MODE) && |
5135 | !(vport->fc_flag & FC_NDISC_ACTIVE)) { | 5160 | !(vport->fc_flag & FC_NDISC_ACTIVE)) { |
5136 | if (lpfc_rscn_payload_check(vport, did)) { | 5161 | if (lpfc_rscn_payload_check(vport, did)) { |
5137 | /* If we've already received a PLOGI from this NPort | ||
5138 | * we don't need to try to discover it again. | ||
5139 | */ | ||
5140 | if (ndlp->nlp_flag & NLP_RCV_PLOGI) | ||
5141 | return NULL; | ||
5142 | 5162 | ||
5143 | /* Since this node is marked for discovery, | 5163 | /* Since this node is marked for discovery, |
5144 | * delay timeout is not needed. | 5164 | * delay timeout is not needed. |
5145 | */ | 5165 | */ |
5146 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | 5166 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
5167 | |||
5168 | /* NVME Target mode waits until rport is known to be | ||
5169 | * impacted by the RSCN before it transitions. No | ||
5170 | * active management - just go to NPR provided the | ||
5171 | * node had a valid login. | ||
5172 | */ | ||
5147 | if (vport->phba->nvmet_support) | 5173 | if (vport->phba->nvmet_support) |
5148 | return ndlp; | 5174 | return ndlp; |
5175 | |||
5176 | /* If we've already received a PLOGI from this NPort | ||
5177 | * we don't need to try to discover it again. | ||
5178 | */ | ||
5179 | if (ndlp->nlp_flag & NLP_RCV_PLOGI) | ||
5180 | return NULL; | ||
5181 | |||
5149 | spin_lock_irq(shost->host_lock); | 5182 | spin_lock_irq(shost->host_lock); |
5150 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 5183 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5151 | spin_unlock_irq(shost->host_lock); | 5184 | spin_unlock_irq(shost->host_lock); |
5152 | } else | 5185 | } else |
5153 | ndlp = NULL; | 5186 | ndlp = NULL; |
5154 | } else { | 5187 | } else { |
5155 | /* If we've already received a PLOGI from this NPort, | 5188 | /* If the initiator received a PLOGI from this NPort or if the |
5156 | * or we are already in the process of discovery on it, | 5189 | * initiator is already in the process of discovery on it, |
5157 | * we don't need to try to discover it again. | 5190 | * there's no need to try to discover it again. |
5158 | */ | 5191 | */ |
5159 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || | 5192 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || |
5160 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || | 5193 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
5161 | ndlp->nlp_flag & NLP_RCV_PLOGI) | 5194 | (!vport->phba->nvmet_support && |
5195 | ndlp->nlp_flag & NLP_RCV_PLOGI)) | ||
5162 | return NULL; | 5196 | return NULL; |
5163 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 5197 | |
5164 | if (vport->phba->nvmet_support) | 5198 | if (vport->phba->nvmet_support) |
5165 | return ndlp; | 5199 | return ndlp; |
5200 | |||
5201 | /* Moving to NPR state clears unsolicited flags and | ||
5202 | * allows for rediscovery | ||
5203 | */ | ||
5204 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | ||
5205 | |||
5166 | spin_lock_irq(shost->host_lock); | 5206 | spin_lock_irq(shost->host_lock); |
5167 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 5207 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
5168 | spin_unlock_irq(shost->host_lock); | 5208 | spin_unlock_irq(shost->host_lock); |
@@ -5887,16 +5927,31 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) | |||
5887 | return NULL; | 5927 | return NULL; |
5888 | } | 5928 | } |
5889 | 5929 | ||
5890 | void | 5930 | struct lpfc_nodelist * |
5891 | lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 5931 | lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) |
5892 | uint32_t did) | ||
5893 | { | 5932 | { |
5933 | struct lpfc_nodelist *ndlp; | ||
5934 | int rpi = LPFC_RPI_ALLOC_ERROR; | ||
5935 | |||
5936 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { | ||
5937 | rpi = lpfc_sli4_alloc_rpi(vport->phba); | ||
5938 | if (rpi == LPFC_RPI_ALLOC_ERROR) | ||
5939 | return NULL; | ||
5940 | } | ||
5941 | |||
5942 | ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); | ||
5943 | if (!ndlp) { | ||
5944 | if (vport->phba->sli_rev == LPFC_SLI_REV4) | ||
5945 | lpfc_sli4_free_rpi(vport->phba, rpi); | ||
5946 | return NULL; | ||
5947 | } | ||
5948 | |||
5894 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); | 5949 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); |
5895 | 5950 | ||
5896 | lpfc_initialize_node(vport, ndlp, did); | 5951 | lpfc_initialize_node(vport, ndlp, did); |
5897 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 5952 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
5898 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { | 5953 | if (vport->phba->sli_rev == LPFC_SLI_REV4) { |
5899 | ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); | 5954 | ndlp->nlp_rpi = rpi; |
5900 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, | 5955 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, |
5901 | "0007 rpi:%x DID:%x flg:%x refcnt:%d " | 5956 | "0007 rpi:%x DID:%x flg:%x refcnt:%d " |
5902 | "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, | 5957 | "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, |
@@ -5918,7 +5973,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
5918 | "node init: did:x%x", | 5973 | "node init: did:x%x", |
5919 | ndlp->nlp_DID, 0, 0); | 5974 | ndlp->nlp_DID, 0, 0); |
5920 | 5975 | ||
5921 | return; | 5976 | return ndlp; |
5922 | } | 5977 | } |
5923 | 5978 | ||
5924 | /* This routine releases all resources associated with a specifc NPort's ndlp | 5979 | /* This routine releases all resources associated with a specifc NPort's ndlp |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 15ca21484150..26a5647e057e 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -509,6 +509,8 @@ struct class_parms { | |||
509 | uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */ | 509 | uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */ |
510 | }; | 510 | }; |
511 | 511 | ||
512 | #define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/ | ||
513 | |||
512 | struct serv_parm { /* Structure is in Big Endian format */ | 514 | struct serv_parm { /* Structure is in Big Endian format */ |
513 | struct csp cmn; | 515 | struct csp cmn; |
514 | struct lpfc_name portName; | 516 | struct lpfc_name portName; |
@@ -2885,6 +2887,7 @@ struct lpfc_mbx_read_top { | |||
2885 | #define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */ | 2887 | #define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */ |
2886 | #define LPFC_ATT_LINK_UP 0x01 /* Link is up */ | 2888 | #define LPFC_ATT_LINK_UP 0x01 /* Link is up */ |
2887 | #define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */ | 2889 | #define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */ |
2890 | #define LPFC_ATT_UNEXP_WWPN 0x06 /* Link is down Unexpected WWWPN */ | ||
2888 | uint32_t word3; | 2891 | uint32_t word3; |
2889 | #define lpfc_mbx_read_top_alpa_granted_SHIFT 24 | 2892 | #define lpfc_mbx_read_top_alpa_granted_SHIFT 24 |
2890 | #define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF | 2893 | #define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 15277705cb6b..1d12f2be36bc 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -2720,6 +2720,9 @@ struct lpfc_mbx_request_features { | |||
2720 | #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 | 2720 | #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 |
2721 | #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 | 2721 | #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 |
2722 | #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 | 2722 | #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 |
2723 | #define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9 | ||
2724 | #define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001 | ||
2725 | #define lpfc_mbx_rq_ftr_rq_iaar_WORD word2 | ||
2723 | #define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 | 2726 | #define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 |
2724 | #define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 | 2727 | #define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 |
2725 | #define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 | 2728 | #define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 |
@@ -3853,6 +3856,7 @@ struct lpfc_acqe_fc_la { | |||
3853 | #define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 | 3856 | #define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 |
3854 | #define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 | 3857 | #define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 |
3855 | #define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 | 3858 | #define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 |
3859 | #define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6 | ||
3856 | #define lpfc_acqe_fc_la_port_type_SHIFT 6 | 3860 | #define lpfc_acqe_fc_la_port_type_SHIFT 6 |
3857 | #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 | 3861 | #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 |
3858 | #define lpfc_acqe_fc_la_port_type_WORD word0 | 3862 | #define lpfc_acqe_fc_la_port_type_WORD word0 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6cc561b04211..90ae354a9c45 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -42,6 +42,10 @@ | |||
42 | #include <scsi/scsi_device.h> | 42 | #include <scsi/scsi_device.h> |
43 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
44 | #include <scsi/scsi_transport_fc.h> | 44 | #include <scsi/scsi_transport_fc.h> |
45 | #include <scsi/scsi_tcq.h> | ||
46 | #include <scsi/fc/fc_fs.h> | ||
47 | |||
48 | #include <linux/nvme-fc-driver.h> | ||
45 | 49 | ||
46 | #include "lpfc_hw4.h" | 50 | #include "lpfc_hw4.h" |
47 | #include "lpfc_hw.h" | 51 | #include "lpfc_hw.h" |
@@ -52,6 +56,7 @@ | |||
52 | #include "lpfc.h" | 56 | #include "lpfc.h" |
53 | #include "lpfc_scsi.h" | 57 | #include "lpfc_scsi.h" |
54 | #include "lpfc_nvme.h" | 58 | #include "lpfc_nvme.h" |
59 | #include "lpfc_nvmet.h" | ||
55 | #include "lpfc_logmsg.h" | 60 | #include "lpfc_logmsg.h" |
56 | #include "lpfc_crtn.h" | 61 | #include "lpfc_crtn.h" |
57 | #include "lpfc_vport.h" | 62 | #include "lpfc_vport.h" |
@@ -335,6 +340,9 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | |||
335 | void | 340 | void |
336 | lpfc_update_vport_wwn(struct lpfc_vport *vport) | 341 | lpfc_update_vport_wwn(struct lpfc_vport *vport) |
337 | { | 342 | { |
343 | uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; | ||
344 | u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; | ||
345 | |||
338 | /* If the soft name exists then update it using the service params */ | 346 | /* If the soft name exists then update it using the service params */ |
339 | if (vport->phba->cfg_soft_wwnn) | 347 | if (vport->phba->cfg_soft_wwnn) |
340 | u64_to_wwn(vport->phba->cfg_soft_wwnn, | 348 | u64_to_wwn(vport->phba->cfg_soft_wwnn, |
@@ -354,9 +362,25 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport) | |||
354 | memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, | 362 | memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, |
355 | sizeof(struct lpfc_name)); | 363 | sizeof(struct lpfc_name)); |
356 | 364 | ||
357 | if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) | 365 | /* |
366 | * If the port name has changed, then set the Param changes flag | ||
367 | * to unreg the login | ||
368 | */ | ||
369 | if (vport->fc_portname.u.wwn[0] != 0 && | ||
370 | memcmp(&vport->fc_portname, &vport->fc_sparam.portName, | ||
371 | sizeof(struct lpfc_name))) | ||
372 | vport->vport_flag |= FAWWPN_PARAM_CHG; | ||
373 | |||
374 | if (vport->fc_portname.u.wwn[0] == 0 || | ||
375 | vport->phba->cfg_soft_wwpn || | ||
376 | (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || | ||
377 | vport->vport_flag & FAWWPN_SET) { | ||
358 | memcpy(&vport->fc_portname, &vport->fc_sparam.portName, | 378 | memcpy(&vport->fc_portname, &vport->fc_sparam.portName, |
359 | sizeof(struct lpfc_name)); | 379 | sizeof(struct lpfc_name)); |
380 | vport->vport_flag &= ~FAWWPN_SET; | ||
381 | if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) | ||
382 | vport->vport_flag |= FAWWPN_SET; | ||
383 | } | ||
360 | else | 384 | else |
361 | memcpy(&vport->fc_sparam.portName, &vport->fc_portname, | 385 | memcpy(&vport->fc_sparam.portName, &vport->fc_portname, |
362 | sizeof(struct lpfc_name)); | 386 | sizeof(struct lpfc_name)); |
@@ -1003,8 +1027,10 @@ static int | |||
1003 | lpfc_hba_down_post_s4(struct lpfc_hba *phba) | 1027 | lpfc_hba_down_post_s4(struct lpfc_hba *phba) |
1004 | { | 1028 | { |
1005 | struct lpfc_scsi_buf *psb, *psb_next; | 1029 | struct lpfc_scsi_buf *psb, *psb_next; |
1030 | struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; | ||
1006 | LIST_HEAD(aborts); | 1031 | LIST_HEAD(aborts); |
1007 | LIST_HEAD(nvme_aborts); | 1032 | LIST_HEAD(nvme_aborts); |
1033 | LIST_HEAD(nvmet_aborts); | ||
1008 | unsigned long iflag = 0; | 1034 | unsigned long iflag = 0; |
1009 | struct lpfc_sglq *sglq_entry = NULL; | 1035 | struct lpfc_sglq *sglq_entry = NULL; |
1010 | 1036 | ||
@@ -1027,16 +1053,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) | |||
1027 | list_for_each_entry(sglq_entry, | 1053 | list_for_each_entry(sglq_entry, |
1028 | &phba->sli4_hba.lpfc_abts_els_sgl_list, list) | 1054 | &phba->sli4_hba.lpfc_abts_els_sgl_list, list) |
1029 | sglq_entry->state = SGL_FREED; | 1055 | sglq_entry->state = SGL_FREED; |
1030 | list_for_each_entry(sglq_entry, | ||
1031 | &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list) | ||
1032 | sglq_entry->state = SGL_FREED; | ||
1033 | 1056 | ||
1034 | list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, | 1057 | list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, |
1035 | &phba->sli4_hba.lpfc_els_sgl_list); | 1058 | &phba->sli4_hba.lpfc_els_sgl_list); |
1036 | 1059 | ||
1037 | if (phba->sli4_hba.nvme_wq) | ||
1038 | list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list, | ||
1039 | &phba->sli4_hba.lpfc_nvmet_sgl_list); | ||
1040 | 1060 | ||
1041 | spin_unlock(&phba->sli4_hba.sgl_list_lock); | 1061 | spin_unlock(&phba->sli4_hba.sgl_list_lock); |
1042 | /* abts_scsi_buf_list_lock required because worker thread uses this | 1062 | /* abts_scsi_buf_list_lock required because worker thread uses this |
@@ -1053,6 +1073,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) | |||
1053 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); | 1073 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
1054 | list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, | 1074 | list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, |
1055 | &nvme_aborts); | 1075 | &nvme_aborts); |
1076 | list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | ||
1077 | &nvmet_aborts); | ||
1056 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | 1078 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
1057 | } | 1079 | } |
1058 | 1080 | ||
@@ -1066,13 +1088,20 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) | |||
1066 | list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); | 1088 | list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); |
1067 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); | 1089 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); |
1068 | 1090 | ||
1069 | list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { | 1091 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
1070 | psb->pCmd = NULL; | 1092 | list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { |
1071 | psb->status = IOSTAT_SUCCESS; | 1093 | psb->pCmd = NULL; |
1094 | psb->status = IOSTAT_SUCCESS; | ||
1095 | } | ||
1096 | spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); | ||
1097 | list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); | ||
1098 | spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); | ||
1099 | |||
1100 | list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { | ||
1101 | ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); | ||
1102 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | ||
1103 | } | ||
1072 | } | 1104 | } |
1073 | spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); | ||
1074 | list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); | ||
1075 | spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); | ||
1076 | 1105 | ||
1077 | lpfc_sli4_free_sp_events(phba); | 1106 | lpfc_sli4_free_sp_events(phba); |
1078 | return 0; | 1107 | return 0; |
@@ -2874,34 +2903,38 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) | |||
2874 | { | 2903 | { |
2875 | struct lpfc_nodelist *ndlp, *next_ndlp; | 2904 | struct lpfc_nodelist *ndlp, *next_ndlp; |
2876 | struct lpfc_vport **vports; | 2905 | struct lpfc_vport **vports; |
2877 | int i; | 2906 | int i, rpi; |
2907 | unsigned long flags; | ||
2878 | 2908 | ||
2879 | if (phba->sli_rev != LPFC_SLI_REV4) | 2909 | if (phba->sli_rev != LPFC_SLI_REV4) |
2880 | return; | 2910 | return; |
2881 | 2911 | ||
2882 | vports = lpfc_create_vport_work_array(phba); | 2912 | vports = lpfc_create_vport_work_array(phba); |
2883 | if (vports != NULL) { | 2913 | if (vports == NULL) |
2884 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 2914 | return; |
2885 | if (vports[i]->load_flag & FC_UNLOADING) | ||
2886 | continue; | ||
2887 | 2915 | ||
2888 | list_for_each_entry_safe(ndlp, next_ndlp, | 2916 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
2889 | &vports[i]->fc_nodes, | 2917 | if (vports[i]->load_flag & FC_UNLOADING) |
2890 | nlp_listp) { | 2918 | continue; |
2891 | if (NLP_CHK_NODE_ACT(ndlp)) { | 2919 | |
2892 | ndlp->nlp_rpi = | 2920 | list_for_each_entry_safe(ndlp, next_ndlp, |
2893 | lpfc_sli4_alloc_rpi(phba); | 2921 | &vports[i]->fc_nodes, |
2894 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, | 2922 | nlp_listp) { |
2895 | LOG_NODE, | 2923 | if (!NLP_CHK_NODE_ACT(ndlp)) |
2896 | "0009 rpi:%x DID:%x " | 2924 | continue; |
2897 | "flg:%x map:%x %p\n", | 2925 | rpi = lpfc_sli4_alloc_rpi(phba); |
2898 | ndlp->nlp_rpi, | 2926 | if (rpi == LPFC_RPI_ALLOC_ERROR) { |
2899 | ndlp->nlp_DID, | 2927 | spin_lock_irqsave(&phba->ndlp_lock, flags); |
2900 | ndlp->nlp_flag, | 2928 | NLP_CLR_NODE_ACT(ndlp); |
2901 | ndlp->nlp_usg_map, | 2929 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); |
2902 | ndlp); | 2930 | continue; |
2903 | } | ||
2904 | } | 2931 | } |
2932 | ndlp->nlp_rpi = rpi; | ||
2933 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, | ||
2934 | "0009 rpi:%x DID:%x " | ||
2935 | "flg:%x map:%x %p\n", ndlp->nlp_rpi, | ||
2936 | ndlp->nlp_DID, ndlp->nlp_flag, | ||
2937 | ndlp->nlp_usg_map, ndlp); | ||
2905 | } | 2938 | } |
2906 | } | 2939 | } |
2907 | lpfc_destroy_vport_work_array(phba, vports); | 2940 | lpfc_destroy_vport_work_array(phba, vports); |
@@ -3508,6 +3541,12 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) | |||
3508 | spin_unlock(&phba->scsi_buf_list_put_lock); | 3541 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3509 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3542 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3510 | 3543 | ||
3544 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3545 | "6060 Current allocated SCSI xri-sgl count:%d, " | ||
3546 | "maximum SCSI xri count:%d (split:%d)\n", | ||
3547 | phba->sli4_hba.scsi_xri_cnt, | ||
3548 | phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); | ||
3549 | |||
3511 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { | 3550 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { |
3512 | /* max scsi xri shrinked below the allocated scsi buffers */ | 3551 | /* max scsi xri shrinked below the allocated scsi buffers */ |
3513 | scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - | 3552 | scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - |
@@ -4508,9 +4547,15 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | |||
4508 | /* Parse and translate link attention fields */ | 4547 | /* Parse and translate link attention fields */ |
4509 | la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; | 4548 | la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; |
4510 | la->eventTag = acqe_fc->event_tag; | 4549 | la->eventTag = acqe_fc->event_tag; |
4511 | bf_set(lpfc_mbx_read_top_att_type, la, | ||
4512 | LPFC_FC_LA_TYPE_LINK_DOWN); | ||
4513 | 4550 | ||
4551 | if (phba->sli4_hba.link_state.status == | ||
4552 | LPFC_FC_LA_TYPE_UNEXP_WWPN) { | ||
4553 | bf_set(lpfc_mbx_read_top_att_type, la, | ||
4554 | LPFC_FC_LA_TYPE_UNEXP_WWPN); | ||
4555 | } else { | ||
4556 | bf_set(lpfc_mbx_read_top_att_type, la, | ||
4557 | LPFC_FC_LA_TYPE_LINK_DOWN); | ||
4558 | } | ||
4514 | /* Invoke the mailbox command callback function */ | 4559 | /* Invoke the mailbox command callback function */ |
4515 | lpfc_mbx_cmpl_read_topology(phba, pmb); | 4560 | lpfc_mbx_cmpl_read_topology(phba, pmb); |
4516 | 4561 | ||
@@ -4716,10 +4761,9 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) | |||
4716 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 4761 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
4717 | if (!ndlp) { | 4762 | if (!ndlp) { |
4718 | /* Cannot find existing Fabric ndlp, so allocate a new one */ | 4763 | /* Cannot find existing Fabric ndlp, so allocate a new one */ |
4719 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 4764 | ndlp = lpfc_nlp_init(vport, Fabric_DID); |
4720 | if (!ndlp) | 4765 | if (!ndlp) |
4721 | return 0; | 4766 | return 0; |
4722 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | ||
4723 | /* Set the node type */ | 4767 | /* Set the node type */ |
4724 | ndlp->nlp_type |= NLP_FABRIC; | 4768 | ndlp->nlp_type |= NLP_FABRIC; |
4725 | /* Put ndlp onto node list */ | 4769 | /* Put ndlp onto node list */ |
@@ -5778,6 +5822,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5778 | /* Initialize the Abort nvme buffer list used by driver */ | 5822 | /* Initialize the Abort nvme buffer list used by driver */ |
5779 | spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); | 5823 | spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); |
5780 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); | 5824 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); |
5825 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); | ||
5781 | /* Fast-path XRI aborted CQ Event work queue list */ | 5826 | /* Fast-path XRI aborted CQ Event work queue list */ |
5782 | INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); | 5827 | INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); |
5783 | } | 5828 | } |
@@ -5809,6 +5854,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5809 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); | 5854 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); |
5810 | INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); | 5855 | INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); |
5811 | 5856 | ||
5857 | /* Initialize mboxq lists. If the early init routines fail | ||
5858 | * these lists need to be correctly initialized. | ||
5859 | */ | ||
5860 | INIT_LIST_HEAD(&phba->sli.mboxq); | ||
5861 | INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); | ||
5862 | |||
5812 | /* initialize optic_state to 0xFF */ | 5863 | /* initialize optic_state to 0xFF */ |
5813 | phba->sli4_hba.lnk_info.optic_state = 0xff; | 5864 | phba->sli4_hba.lnk_info.optic_state = 0xff; |
5814 | 5865 | ||
@@ -5874,6 +5925,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
5874 | "READ_NV, mbxStatus x%x\n", | 5925 | "READ_NV, mbxStatus x%x\n", |
5875 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), | 5926 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
5876 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); | 5927 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
5928 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
5877 | rc = -EIO; | 5929 | rc = -EIO; |
5878 | goto out_free_bsmbx; | 5930 | goto out_free_bsmbx; |
5879 | } | 5931 | } |
@@ -6398,7 +6450,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) | |||
6398 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); | 6450 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); |
6399 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); | 6451 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); |
6400 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); | 6452 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); |
6401 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); | 6453 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
6402 | 6454 | ||
6403 | /* els xri-sgl book keeping */ | 6455 | /* els xri-sgl book keeping */ |
6404 | phba->sli4_hba.els_xri_cnt = 0; | 6456 | phba->sli4_hba.els_xri_cnt = 0; |
@@ -7799,7 +7851,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) | |||
7799 | 7851 | ||
7800 | /* Create Fast Path FCP WQs */ | 7852 | /* Create Fast Path FCP WQs */ |
7801 | wqesize = (phba->fcp_embed_io) ? | 7853 | wqesize = (phba->fcp_embed_io) ? |
7802 | LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; | 7854 | LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; |
7803 | qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); | 7855 | qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); |
7804 | if (!qdesc) { | 7856 | if (!qdesc) { |
7805 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7857 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -7830,7 +7882,7 @@ int | |||
7830 | lpfc_sli4_queue_create(struct lpfc_hba *phba) | 7882 | lpfc_sli4_queue_create(struct lpfc_hba *phba) |
7831 | { | 7883 | { |
7832 | struct lpfc_queue *qdesc; | 7884 | struct lpfc_queue *qdesc; |
7833 | int idx, io_channel, max; | 7885 | int idx, io_channel; |
7834 | 7886 | ||
7835 | /* | 7887 | /* |
7836 | * Create HBA Record arrays. | 7888 | * Create HBA Record arrays. |
@@ -7991,15 +8043,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
7991 | if (lpfc_alloc_nvme_wq_cq(phba, idx)) | 8043 | if (lpfc_alloc_nvme_wq_cq(phba, idx)) |
7992 | goto out_error; | 8044 | goto out_error; |
7993 | 8045 | ||
7994 | /* allocate MRQ CQs */ | ||
7995 | max = phba->cfg_nvme_io_channel; | ||
7996 | if (max < phba->cfg_nvmet_mrq) | ||
7997 | max = phba->cfg_nvmet_mrq; | ||
7998 | |||
7999 | for (idx = 0; idx < max; idx++) | ||
8000 | if (lpfc_alloc_nvme_wq_cq(phba, idx)) | ||
8001 | goto out_error; | ||
8002 | |||
8003 | if (phba->nvmet_support) { | 8046 | if (phba->nvmet_support) { |
8004 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { | 8047 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { |
8005 | qdesc = lpfc_sli4_queue_alloc(phba, | 8048 | qdesc = lpfc_sli4_queue_alloc(phba, |
@@ -8221,11 +8264,11 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
8221 | 8264 | ||
8222 | /* Release FCP cqs */ | 8265 | /* Release FCP cqs */ |
8223 | lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, | 8266 | lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, |
8224 | phba->cfg_fcp_io_channel); | 8267 | phba->cfg_fcp_io_channel); |
8225 | 8268 | ||
8226 | /* Release FCP wqs */ | 8269 | /* Release FCP wqs */ |
8227 | lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, | 8270 | lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, |
8228 | phba->cfg_fcp_io_channel); | 8271 | phba->cfg_fcp_io_channel); |
8229 | 8272 | ||
8230 | /* Release FCP CQ mapping array */ | 8273 | /* Release FCP CQ mapping array */ |
8231 | lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); | 8274 | lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); |
@@ -8571,15 +8614,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
8571 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8614 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8572 | "0528 %s not allocated\n", | 8615 | "0528 %s not allocated\n", |
8573 | phba->sli4_hba.mbx_cq ? | 8616 | phba->sli4_hba.mbx_cq ? |
8574 | "Mailbox WQ" : "Mailbox CQ"); | 8617 | "Mailbox WQ" : "Mailbox CQ"); |
8575 | rc = -ENOMEM; | 8618 | rc = -ENOMEM; |
8576 | goto out_destroy; | 8619 | goto out_destroy; |
8577 | } | 8620 | } |
8578 | 8621 | ||
8579 | rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], | 8622 | rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], |
8580 | phba->sli4_hba.mbx_cq, | 8623 | phba->sli4_hba.mbx_cq, |
8581 | phba->sli4_hba.mbx_wq, | 8624 | phba->sli4_hba.mbx_wq, |
8582 | NULL, 0, LPFC_MBOX); | 8625 | NULL, 0, LPFC_MBOX); |
8583 | if (rc) { | 8626 | if (rc) { |
8584 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8627 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8585 | "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", | 8628 | "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", |
@@ -9934,17 +9977,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) | |||
9934 | { | 9977 | { |
9935 | int wait_time = 0; | 9978 | int wait_time = 0; |
9936 | int nvme_xri_cmpl = 1; | 9979 | int nvme_xri_cmpl = 1; |
9980 | int nvmet_xri_cmpl = 1; | ||
9937 | int fcp_xri_cmpl = 1; | 9981 | int fcp_xri_cmpl = 1; |
9938 | int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); | 9982 | int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); |
9939 | int nvmet_xri_cmpl = | ||
9940 | list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); | ||
9941 | 9983 | ||
9942 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) | 9984 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) |
9943 | fcp_xri_cmpl = | 9985 | fcp_xri_cmpl = |
9944 | list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); | 9986 | list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); |
9945 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) | 9987 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
9946 | nvme_xri_cmpl = | 9988 | nvme_xri_cmpl = |
9947 | list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); | 9989 | list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); |
9990 | nvmet_xri_cmpl = | ||
9991 | list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); | ||
9992 | } | ||
9948 | 9993 | ||
9949 | while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || | 9994 | while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || |
9950 | !nvmet_xri_cmpl) { | 9995 | !nvmet_xri_cmpl) { |
@@ -9970,9 +10015,12 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) | |||
9970 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); | 10015 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); |
9971 | wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; | 10016 | wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; |
9972 | } | 10017 | } |
9973 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) | 10018 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
9974 | nvme_xri_cmpl = list_empty( | 10019 | nvme_xri_cmpl = list_empty( |
9975 | &phba->sli4_hba.lpfc_abts_nvme_buf_list); | 10020 | &phba->sli4_hba.lpfc_abts_nvme_buf_list); |
10021 | nvmet_xri_cmpl = list_empty( | ||
10022 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); | ||
10023 | } | ||
9976 | 10024 | ||
9977 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) | 10025 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) |
9978 | fcp_xri_cmpl = list_empty( | 10026 | fcp_xri_cmpl = list_empty( |
@@ -9981,8 +10029,6 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) | |||
9981 | els_xri_cmpl = | 10029 | els_xri_cmpl = |
9982 | list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); | 10030 | list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); |
9983 | 10031 | ||
9984 | nvmet_xri_cmpl = | ||
9985 | list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list); | ||
9986 | } | 10032 | } |
9987 | } | 10033 | } |
9988 | 10034 | ||
@@ -10048,9 +10094,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) | |||
10048 | /* Stop kthread signal shall trigger work_done one more time */ | 10094 | /* Stop kthread signal shall trigger work_done one more time */ |
10049 | kthread_stop(phba->worker_thread); | 10095 | kthread_stop(phba->worker_thread); |
10050 | 10096 | ||
10097 | /* Unset the queues shared with the hardware then release all | ||
10098 | * allocated resources. | ||
10099 | */ | ||
10100 | lpfc_sli4_queue_unset(phba); | ||
10101 | lpfc_sli4_queue_destroy(phba); | ||
10102 | |||
10051 | /* Reset SLI4 HBA FCoE function */ | 10103 | /* Reset SLI4 HBA FCoE function */ |
10052 | lpfc_pci_function_reset(phba); | 10104 | lpfc_pci_function_reset(phba); |
10053 | lpfc_sli4_queue_destroy(phba); | ||
10054 | 10105 | ||
10055 | /* Stop the SLI4 device port */ | 10106 | /* Stop the SLI4 device port */ |
10056 | phba->pport->work_port_events = 0; | 10107 | phba->pport->work_port_events = 0; |
@@ -10306,6 +10357,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
10306 | } | 10357 | } |
10307 | 10358 | ||
10308 | /* Initialize and populate the iocb list per host */ | 10359 | /* Initialize and populate the iocb list per host */ |
10360 | |||
10309 | error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); | 10361 | error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); |
10310 | if (error) { | 10362 | if (error) { |
10311 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 10363 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -11051,7 +11103,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11051 | struct lpfc_hba *phba; | 11103 | struct lpfc_hba *phba; |
11052 | struct lpfc_vport *vport = NULL; | 11104 | struct lpfc_vport *vport = NULL; |
11053 | struct Scsi_Host *shost = NULL; | 11105 | struct Scsi_Host *shost = NULL; |
11054 | int error; | 11106 | int error, cnt; |
11055 | uint32_t cfg_mode, intr_mode; | 11107 | uint32_t cfg_mode, intr_mode; |
11056 | 11108 | ||
11057 | /* Allocate memory for HBA structure */ | 11109 | /* Allocate memory for HBA structure */ |
@@ -11085,12 +11137,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11085 | goto out_unset_pci_mem_s4; | 11137 | goto out_unset_pci_mem_s4; |
11086 | } | 11138 | } |
11087 | 11139 | ||
11088 | /* Initialize and populate the iocb list per host */ | 11140 | cnt = phba->cfg_iocb_cnt * 1024; |
11141 | if (phba->nvmet_support) | ||
11142 | cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq; | ||
11089 | 11143 | ||
11144 | /* Initialize and populate the iocb list per host */ | ||
11090 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 11145 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
11091 | "2821 initialize iocb list %d.\n", | 11146 | "2821 initialize iocb list %d total %d\n", |
11092 | phba->cfg_iocb_cnt*1024); | 11147 | phba->cfg_iocb_cnt, cnt); |
11093 | error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); | 11148 | error = lpfc_init_iocb_list(phba, cnt); |
11094 | 11149 | ||
11095 | if (error) { | 11150 | if (error) { |
11096 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 11151 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -11177,7 +11232,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11177 | if ((phba->nvmet_support == 0) && | 11232 | if ((phba->nvmet_support == 0) && |
11178 | (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { | 11233 | (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
11179 | /* Create NVME binding with nvme_fc_transport. This | 11234 | /* Create NVME binding with nvme_fc_transport. This |
11180 | * ensures the vport is initialized. | 11235 | * ensures the vport is initialized. If the localport |
11236 | * create fails, it should not unload the driver to | ||
11237 | * support field issues. | ||
11181 | */ | 11238 | */ |
11182 | error = lpfc_nvme_create_localport(vport); | 11239 | error = lpfc_nvme_create_localport(vport); |
11183 | if (error) { | 11240 | if (error) { |
@@ -11185,7 +11242,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
11185 | "6004 NVME registration failed, " | 11242 | "6004 NVME registration failed, " |
11186 | "error x%x\n", | 11243 | "error x%x\n", |
11187 | error); | 11244 | error); |
11188 | goto out_disable_intr; | ||
11189 | } | 11245 | } |
11190 | } | 11246 | } |
11191 | 11247 | ||
@@ -11984,6 +12040,7 @@ int | |||
11984 | lpfc_fof_queue_create(struct lpfc_hba *phba) | 12040 | lpfc_fof_queue_create(struct lpfc_hba *phba) |
11985 | { | 12041 | { |
11986 | struct lpfc_queue *qdesc; | 12042 | struct lpfc_queue *qdesc; |
12043 | uint32_t wqesize; | ||
11987 | 12044 | ||
11988 | /* Create FOF EQ */ | 12045 | /* Create FOF EQ */ |
11989 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, | 12046 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, |
@@ -12004,8 +12061,11 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) | |||
12004 | phba->sli4_hba.oas_cq = qdesc; | 12061 | phba->sli4_hba.oas_cq = qdesc; |
12005 | 12062 | ||
12006 | /* Create OAS WQ */ | 12063 | /* Create OAS WQ */ |
12007 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, | 12064 | wqesize = (phba->fcp_embed_io) ? |
12065 | LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; | ||
12066 | qdesc = lpfc_sli4_queue_alloc(phba, wqesize, | ||
12008 | phba->sli4_hba.wq_ecount); | 12067 | phba->sli4_hba.wq_ecount); |
12068 | |||
12009 | if (!qdesc) | 12069 | if (!qdesc) |
12010 | goto out_error; | 12070 | goto out_error; |
12011 | 12071 | ||
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a928f5187fa4..ce25a18367b5 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -2083,9 +2083,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) | |||
2083 | if (phba->max_vpi && phba->cfg_enable_npiv) | 2083 | if (phba->max_vpi && phba->cfg_enable_npiv) |
2084 | bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); | 2084 | bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); |
2085 | 2085 | ||
2086 | if (phba->nvmet_support) | 2086 | if (phba->nvmet_support) { |
2087 | bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); | 2087 | bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); |
2088 | 2088 | /* iaab/iaar NOT set for now */ | |
2089 | bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); | ||
2090 | bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); | ||
2091 | } | ||
2089 | return; | 2092 | return; |
2090 | } | 2093 | } |
2091 | 2094 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 061626bdf701..8777c2d5f50d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -361,8 +361,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
361 | case NLP_STE_PRLI_ISSUE: | 361 | case NLP_STE_PRLI_ISSUE: |
362 | case NLP_STE_UNMAPPED_NODE: | 362 | case NLP_STE_UNMAPPED_NODE: |
363 | case NLP_STE_MAPPED_NODE: | 363 | case NLP_STE_MAPPED_NODE: |
364 | /* lpfc_plogi_confirm_nport skips fabric did, handle it here */ | 364 | /* For initiators, lpfc_plogi_confirm_nport skips fabric did. |
365 | if (!(ndlp->nlp_type & NLP_FABRIC)) { | 365 | * For target mode, execute implicit logo. |
366 | * Fabric nodes go into NPR. | ||
367 | */ | ||
368 | if (!(ndlp->nlp_type & NLP_FABRIC) && | ||
369 | !(phba->nvmet_support)) { | ||
366 | lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, | 370 | lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, |
367 | ndlp, NULL); | 371 | ndlp, NULL); |
368 | return 1; | 372 | return 1; |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 0024de1c6c1f..8008c8205fb6 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
@@ -401,6 +401,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, | |||
401 | struct lpfc_nodelist *ndlp; | 401 | struct lpfc_nodelist *ndlp; |
402 | struct ulp_bde64 *bpl; | 402 | struct ulp_bde64 *bpl; |
403 | struct lpfc_dmabuf *bmp; | 403 | struct lpfc_dmabuf *bmp; |
404 | uint16_t ntype, nstate; | ||
404 | 405 | ||
405 | /* there are two dma buf in the request, actually there is one and | 406 | /* there are two dma buf in the request, actually there is one and |
406 | * the second one is just the start address + cmd size. | 407 | * the second one is just the start address + cmd size. |
@@ -417,11 +418,26 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, | |||
417 | vport = lport->vport; | 418 | vport = lport->vport; |
418 | 419 | ||
419 | ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); | 420 | ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); |
420 | if (!ndlp) { | 421 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
421 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | 422 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
422 | "6043 Could not find node for DID %x\n", | 423 | "6051 DID x%06x not an active rport.\n", |
423 | pnvme_rport->port_id); | 424 | pnvme_rport->port_id); |
424 | return 1; | 425 | return -ENODEV; |
426 | } | ||
427 | |||
428 | /* The remote node has to be a mapped nvme target or an | ||
429 | * unmapped nvme initiator or it's an error. | ||
430 | */ | ||
431 | ntype = ndlp->nlp_type; | ||
432 | nstate = ndlp->nlp_state; | ||
433 | if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || | ||
434 | (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { | ||
435 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, | ||
436 | "6088 DID x%06x not ready for " | ||
437 | "IO. State x%x, Type x%x\n", | ||
438 | pnvme_rport->port_id, | ||
439 | ndlp->nlp_state, ndlp->nlp_type); | ||
440 | return -ENODEV; | ||
425 | } | 441 | } |
426 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | 442 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
427 | if (!bmp) { | 443 | if (!bmp) { |
@@ -456,7 +472,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, | |||
456 | 472 | ||
457 | /* Expand print to include key fields. */ | 473 | /* Expand print to include key fields. */ |
458 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | 474 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
459 | "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d " | 475 | "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d " |
460 | "rsplen:%d %pad %pad\n", | 476 | "rsplen:%d %pad %pad\n", |
461 | pnvme_lport, pnvme_rport, | 477 | pnvme_lport, pnvme_rport, |
462 | pnvme_lsreq, pnvme_lsreq->rqstlen, | 478 | pnvme_lsreq, pnvme_lsreq->rqstlen, |
@@ -745,6 +761,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, | |||
745 | struct nvme_fc_cmd_iu *cp; | 761 | struct nvme_fc_cmd_iu *cp; |
746 | struct lpfc_nvme_rport *rport; | 762 | struct lpfc_nvme_rport *rport; |
747 | struct lpfc_nodelist *ndlp; | 763 | struct lpfc_nodelist *ndlp; |
764 | struct lpfc_nvme_fcpreq_priv *freqpriv; | ||
748 | unsigned long flags; | 765 | unsigned long flags; |
749 | uint32_t code; | 766 | uint32_t code; |
750 | uint16_t cid, sqhd, data; | 767 | uint16_t cid, sqhd, data; |
@@ -772,9 +789,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, | |||
772 | ndlp = rport->ndlp; | 789 | ndlp = rport->ndlp; |
773 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { | 790 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
774 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, | 791 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, |
775 | "6061 rport %p, ndlp %p, DID x%06x ndlp " | 792 | "6061 rport %p, DID x%06x node not ready.\n", |
776 | "not ready.\n", | 793 | rport, rport->remoteport->port_id); |
777 | rport, ndlp, rport->remoteport->port_id); | ||
778 | 794 | ||
779 | ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); | 795 | ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); |
780 | if (!ndlp) { | 796 | if (!ndlp) { |
@@ -853,15 +869,18 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, | |||
853 | break; | 869 | break; |
854 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, | 870 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
855 | "6081 NVME Completion Protocol Error: " | 871 | "6081 NVME Completion Protocol Error: " |
856 | "status x%x result x%x placed x%x\n", | 872 | "xri %x status x%x result x%x " |
873 | "placed x%x\n", | ||
874 | lpfc_ncmd->cur_iocbq.sli4_xritag, | ||
857 | lpfc_ncmd->status, lpfc_ncmd->result, | 875 | lpfc_ncmd->status, lpfc_ncmd->result, |
858 | wcqe->total_data_placed); | 876 | wcqe->total_data_placed); |
859 | break; | 877 | break; |
860 | default: | 878 | default: |
861 | out_err: | 879 | out_err: |
862 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, | 880 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
863 | "6072 NVME Completion Error: " | 881 | "6072 NVME Completion Error: xri %x " |
864 | "status x%x result x%x placed x%x\n", | 882 | "status x%x result x%x placed x%x\n", |
883 | lpfc_ncmd->cur_iocbq.sli4_xritag, | ||
865 | lpfc_ncmd->status, lpfc_ncmd->result, | 884 | lpfc_ncmd->status, lpfc_ncmd->result, |
866 | wcqe->total_data_placed); | 885 | wcqe->total_data_placed); |
867 | nCmd->transferred_length = 0; | 886 | nCmd->transferred_length = 0; |
@@ -900,6 +919,8 @@ out_err: | |||
900 | phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; | 919 | phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; |
901 | } | 920 | } |
902 | #endif | 921 | #endif |
922 | freqpriv = nCmd->private; | ||
923 | freqpriv->nvme_buf = NULL; | ||
903 | nCmd->done(nCmd); | 924 | nCmd->done(nCmd); |
904 | 925 | ||
905 | spin_lock_irqsave(&phba->hbalock, flags); | 926 | spin_lock_irqsave(&phba->hbalock, flags); |
@@ -1099,12 +1120,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, | |||
1099 | 1120 | ||
1100 | first_data_sgl = sgl; | 1121 | first_data_sgl = sgl; |
1101 | lpfc_ncmd->seg_cnt = nCmd->sg_cnt; | 1122 | lpfc_ncmd->seg_cnt = nCmd->sg_cnt; |
1102 | if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) { | 1123 | if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) { |
1103 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1124 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1104 | "6058 Too many sg segments from " | 1125 | "6058 Too many sg segments from " |
1105 | "NVME Transport. Max %d, " | 1126 | "NVME Transport. Max %d, " |
1106 | "nvmeIO sg_cnt %d\n", | 1127 | "nvmeIO sg_cnt %d\n", |
1107 | phba->cfg_sg_seg_cnt, | 1128 | phba->cfg_nvme_seg_cnt, |
1108 | lpfc_ncmd->seg_cnt); | 1129 | lpfc_ncmd->seg_cnt); |
1109 | lpfc_ncmd->seg_cnt = 0; | 1130 | lpfc_ncmd->seg_cnt = 0; |
1110 | return 1; | 1131 | return 1; |
@@ -1196,6 +1217,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1196 | struct lpfc_nvme_buf *lpfc_ncmd; | 1217 | struct lpfc_nvme_buf *lpfc_ncmd; |
1197 | struct lpfc_nvme_rport *rport; | 1218 | struct lpfc_nvme_rport *rport; |
1198 | struct lpfc_nvme_qhandle *lpfc_queue_info; | 1219 | struct lpfc_nvme_qhandle *lpfc_queue_info; |
1220 | struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; | ||
1199 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1221 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1200 | uint64_t start = 0; | 1222 | uint64_t start = 0; |
1201 | #endif | 1223 | #endif |
@@ -1274,7 +1296,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1274 | * Do not let the IO hang out forever. There is no midlayer issuing | 1296 | * Do not let the IO hang out forever. There is no midlayer issuing |
1275 | * an abort so inform the FW of the maximum IO pending time. | 1297 | * an abort so inform the FW of the maximum IO pending time. |
1276 | */ | 1298 | */ |
1277 | pnvme_fcreq->private = (void *)lpfc_ncmd; | 1299 | freqpriv->nvme_buf = lpfc_ncmd; |
1278 | lpfc_ncmd->nvmeCmd = pnvme_fcreq; | 1300 | lpfc_ncmd->nvmeCmd = pnvme_fcreq; |
1279 | lpfc_ncmd->nrport = rport; | 1301 | lpfc_ncmd->nrport = rport; |
1280 | lpfc_ncmd->ndlp = ndlp; | 1302 | lpfc_ncmd->ndlp = ndlp; |
@@ -1404,6 +1426,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1404 | struct lpfc_nvme_buf *lpfc_nbuf; | 1426 | struct lpfc_nvme_buf *lpfc_nbuf; |
1405 | struct lpfc_iocbq *abts_buf; | 1427 | struct lpfc_iocbq *abts_buf; |
1406 | struct lpfc_iocbq *nvmereq_wqe; | 1428 | struct lpfc_iocbq *nvmereq_wqe; |
1429 | struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; | ||
1407 | union lpfc_wqe *abts_wqe; | 1430 | union lpfc_wqe *abts_wqe; |
1408 | unsigned long flags; | 1431 | unsigned long flags; |
1409 | int ret_val; | 1432 | int ret_val; |
@@ -1414,7 +1437,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1414 | phba = vport->phba; | 1437 | phba = vport->phba; |
1415 | 1438 | ||
1416 | /* Announce entry to new IO submit field. */ | 1439 | /* Announce entry to new IO submit field. */ |
1417 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, | 1440 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
1418 | "6002 Abort Request to rport DID x%06x " | 1441 | "6002 Abort Request to rport DID x%06x " |
1419 | "for nvme_fc_req %p\n", | 1442 | "for nvme_fc_req %p\n", |
1420 | pnvme_rport->port_id, | 1443 | pnvme_rport->port_id, |
@@ -1444,7 +1467,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1444 | /* The remote node has to be ready to send an abort. */ | 1467 | /* The remote node has to be ready to send an abort. */ |
1445 | if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && | 1468 | if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && |
1446 | !(ndlp->nlp_type & NLP_NVME_TARGET)) { | 1469 | !(ndlp->nlp_type & NLP_NVME_TARGET)) { |
1447 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS, | 1470 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1448 | "6048 rport %p, DID x%06x not ready for " | 1471 | "6048 rport %p, DID x%06x not ready for " |
1449 | "IO. State x%x, Type x%x\n", | 1472 | "IO. State x%x, Type x%x\n", |
1450 | rport, pnvme_rport->port_id, | 1473 | rport, pnvme_rport->port_id, |
@@ -1459,27 +1482,28 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1459 | /* driver queued commands are in process of being flushed */ | 1482 | /* driver queued commands are in process of being flushed */ |
1460 | if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { | 1483 | if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { |
1461 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1484 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1462 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1485 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1463 | "6139 Driver in reset cleanup - flushing " | 1486 | "6139 Driver in reset cleanup - flushing " |
1464 | "NVME Req now. hba_flag x%x\n", | 1487 | "NVME Req now. hba_flag x%x\n", |
1465 | phba->hba_flag); | 1488 | phba->hba_flag); |
1466 | return; | 1489 | return; |
1467 | } | 1490 | } |
1468 | 1491 | ||
1469 | lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private; | 1492 | lpfc_nbuf = freqpriv->nvme_buf; |
1470 | if (!lpfc_nbuf) { | 1493 | if (!lpfc_nbuf) { |
1471 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1494 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1472 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1495 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1473 | "6140 NVME IO req has no matching lpfc nvme " | 1496 | "6140 NVME IO req has no matching lpfc nvme " |
1474 | "io buffer. Skipping abort req.\n"); | 1497 | "io buffer. Skipping abort req.\n"); |
1475 | return; | 1498 | return; |
1476 | } else if (!lpfc_nbuf->nvmeCmd) { | 1499 | } else if (!lpfc_nbuf->nvmeCmd) { |
1477 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1500 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1478 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1501 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1479 | "6141 lpfc NVME IO req has no nvme_fcreq " | 1502 | "6141 lpfc NVME IO req has no nvme_fcreq " |
1480 | "io buffer. Skipping abort req.\n"); | 1503 | "io buffer. Skipping abort req.\n"); |
1481 | return; | 1504 | return; |
1482 | } | 1505 | } |
1506 | nvmereq_wqe = &lpfc_nbuf->cur_iocbq; | ||
1483 | 1507 | ||
1484 | /* | 1508 | /* |
1485 | * The lpfc_nbuf and the mapped nvme_fcreq in the driver's | 1509 | * The lpfc_nbuf and the mapped nvme_fcreq in the driver's |
@@ -1490,23 +1514,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1490 | */ | 1514 | */ |
1491 | if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { | 1515 | if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { |
1492 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1516 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1493 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1517 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1494 | "6143 NVME req mismatch: " | 1518 | "6143 NVME req mismatch: " |
1495 | "lpfc_nbuf %p nvmeCmd %p, " | 1519 | "lpfc_nbuf %p nvmeCmd %p, " |
1496 | "pnvme_fcreq %p. Skipping Abort\n", | 1520 | "pnvme_fcreq %p. Skipping Abort xri x%x\n", |
1497 | lpfc_nbuf, lpfc_nbuf->nvmeCmd, | 1521 | lpfc_nbuf, lpfc_nbuf->nvmeCmd, |
1498 | pnvme_fcreq); | 1522 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
1499 | return; | 1523 | return; |
1500 | } | 1524 | } |
1501 | 1525 | ||
1502 | /* Don't abort IOs no longer on the pending queue. */ | 1526 | /* Don't abort IOs no longer on the pending queue. */ |
1503 | nvmereq_wqe = &lpfc_nbuf->cur_iocbq; | ||
1504 | if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { | 1527 | if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
1505 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1528 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1506 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1529 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1507 | "6142 NVME IO req %p not queued - skipping " | 1530 | "6142 NVME IO req %p not queued - skipping " |
1508 | "abort req\n", | 1531 | "abort req xri x%x\n", |
1509 | pnvme_fcreq); | 1532 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
1510 | return; | 1533 | return; |
1511 | } | 1534 | } |
1512 | 1535 | ||
@@ -1517,21 +1540,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1517 | /* Outstanding abort is in progress */ | 1540 | /* Outstanding abort is in progress */ |
1518 | if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { | 1541 | if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { |
1519 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1542 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1520 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1543 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1521 | "6144 Outstanding NVME I/O Abort Request " | 1544 | "6144 Outstanding NVME I/O Abort Request " |
1522 | "still pending on nvme_fcreq %p, " | 1545 | "still pending on nvme_fcreq %p, " |
1523 | "lpfc_ncmd %p\n", | 1546 | "lpfc_ncmd %p xri x%x\n", |
1524 | pnvme_fcreq, lpfc_nbuf); | 1547 | pnvme_fcreq, lpfc_nbuf, |
1548 | nvmereq_wqe->sli4_xritag); | ||
1525 | return; | 1549 | return; |
1526 | } | 1550 | } |
1527 | 1551 | ||
1528 | abts_buf = __lpfc_sli_get_iocbq(phba); | 1552 | abts_buf = __lpfc_sli_get_iocbq(phba); |
1529 | if (!abts_buf) { | 1553 | if (!abts_buf) { |
1530 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1554 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1531 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1555 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1532 | "6136 No available abort wqes. Skipping " | 1556 | "6136 No available abort wqes. Skipping " |
1533 | "Abts req for nvme_fcreq %p.\n", | 1557 | "Abts req for nvme_fcreq %p xri x%x\n", |
1534 | pnvme_fcreq); | 1558 | pnvme_fcreq, nvmereq_wqe->sli4_xritag); |
1535 | return; | 1559 | return; |
1536 | } | 1560 | } |
1537 | 1561 | ||
@@ -1580,7 +1604,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1580 | ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); | 1604 | ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); |
1581 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1605 | spin_unlock_irqrestore(&phba->hbalock, flags); |
1582 | if (ret_val == IOCB_ERROR) { | 1606 | if (ret_val == IOCB_ERROR) { |
1583 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1607 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, |
1584 | "6137 Failed abts issue_wqe with status x%x " | 1608 | "6137 Failed abts issue_wqe with status x%x " |
1585 | "for nvme_fcreq %p.\n", | 1609 | "for nvme_fcreq %p.\n", |
1586 | ret_val, pnvme_fcreq); | 1610 | ret_val, pnvme_fcreq); |
@@ -1588,8 +1612,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, | |||
1588 | return; | 1612 | return; |
1589 | } | 1613 | } |
1590 | 1614 | ||
1591 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, | 1615 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, |
1592 | "6138 Transport Abort NVME Request Issued for\n" | 1616 | "6138 Transport Abort NVME Request Issued for " |
1593 | "ox_id x%x on reqtag x%x\n", | 1617 | "ox_id x%x on reqtag x%x\n", |
1594 | nvmereq_wqe->sli4_xritag, | 1618 | nvmereq_wqe->sli4_xritag, |
1595 | abts_buf->iotag); | 1619 | abts_buf->iotag); |
@@ -1618,7 +1642,7 @@ static struct nvme_fc_port_template lpfc_nvme_template = { | |||
1618 | .local_priv_sz = sizeof(struct lpfc_nvme_lport), | 1642 | .local_priv_sz = sizeof(struct lpfc_nvme_lport), |
1619 | .remote_priv_sz = sizeof(struct lpfc_nvme_rport), | 1643 | .remote_priv_sz = sizeof(struct lpfc_nvme_rport), |
1620 | .lsrqst_priv_sz = 0, | 1644 | .lsrqst_priv_sz = 0, |
1621 | .fcprqst_priv_sz = 0, | 1645 | .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), |
1622 | }; | 1646 | }; |
1623 | 1647 | ||
1624 | /** | 1648 | /** |
@@ -2049,7 +2073,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
2049 | if (lpfc_test_rrq_active(phba, ndlp, | 2073 | if (lpfc_test_rrq_active(phba, ndlp, |
2050 | lpfc_ncmd->cur_iocbq.sli4_lxritag)) | 2074 | lpfc_ncmd->cur_iocbq.sli4_lxritag)) |
2051 | continue; | 2075 | continue; |
2052 | list_del(&lpfc_ncmd->list); | 2076 | list_del_init(&lpfc_ncmd->list); |
2053 | found = 1; | 2077 | found = 1; |
2054 | break; | 2078 | break; |
2055 | } | 2079 | } |
@@ -2064,7 +2088,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
2064 | if (lpfc_test_rrq_active( | 2088 | if (lpfc_test_rrq_active( |
2065 | phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) | 2089 | phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) |
2066 | continue; | 2090 | continue; |
2067 | list_del(&lpfc_ncmd->list); | 2091 | list_del_init(&lpfc_ncmd->list); |
2068 | found = 1; | 2092 | found = 1; |
2069 | break; | 2093 | break; |
2070 | } | 2094 | } |
@@ -2092,6 +2116,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) | |||
2092 | 2116 | ||
2093 | lpfc_ncmd->nonsg_phys = 0; | 2117 | lpfc_ncmd->nonsg_phys = 0; |
2094 | if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { | 2118 | if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { |
2119 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
2120 | "6310 XB release deferred for " | ||
2121 | "ox_id x%x on reqtag x%x\n", | ||
2122 | lpfc_ncmd->cur_iocbq.sli4_xritag, | ||
2123 | lpfc_ncmd->cur_iocbq.iotag); | ||
2124 | |||
2095 | spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, | 2125 | spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, |
2096 | iflag); | 2126 | iflag); |
2097 | lpfc_ncmd->nvmeCmd = NULL; | 2127 | lpfc_ncmd->nvmeCmd = NULL; |
@@ -2142,8 +2172,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) | |||
2142 | nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); | 2172 | nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); |
2143 | nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); | 2173 | nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); |
2144 | 2174 | ||
2145 | /* For now need + 1 to get around NVME transport logic */ | 2175 | /* Limit to LPFC_MAX_NVME_SEG_CNT. |
2146 | lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1; | 2176 | * For now need + 1 to get around NVME transport logic. |
2177 | */ | ||
2178 | if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { | ||
2179 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT, | ||
2180 | "6300 Reducing sg segment cnt to %d\n", | ||
2181 | LPFC_MAX_NVME_SEG_CNT); | ||
2182 | phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; | ||
2183 | } else { | ||
2184 | phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; | ||
2185 | } | ||
2186 | lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; | ||
2147 | lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; | 2187 | lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; |
2148 | 2188 | ||
2149 | /* localport is allocated from the stack, but the registration | 2189 | /* localport is allocated from the stack, but the registration |
@@ -2249,12 +2289,23 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) | |||
2249 | void | 2289 | void |
2250 | lpfc_nvme_update_localport(struct lpfc_vport *vport) | 2290 | lpfc_nvme_update_localport(struct lpfc_vport *vport) |
2251 | { | 2291 | { |
2292 | #if (IS_ENABLED(CONFIG_NVME_FC)) | ||
2252 | struct nvme_fc_local_port *localport; | 2293 | struct nvme_fc_local_port *localport; |
2253 | struct lpfc_nvme_lport *lport; | 2294 | struct lpfc_nvme_lport *lport; |
2254 | 2295 | ||
2255 | localport = vport->localport; | 2296 | localport = vport->localport; |
2297 | if (!localport) { | ||
2298 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, | ||
2299 | "6710 Update NVME fail. No localport\n"); | ||
2300 | return; | ||
2301 | } | ||
2256 | lport = (struct lpfc_nvme_lport *)localport->private; | 2302 | lport = (struct lpfc_nvme_lport *)localport->private; |
2257 | 2303 | if (!lport) { | |
2304 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, | ||
2305 | "6171 Update NVME fail. localP %p, No lport\n", | ||
2306 | localport); | ||
2307 | return; | ||
2308 | } | ||
2258 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, | 2309 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, |
2259 | "6012 Update NVME lport %p did x%x\n", | 2310 | "6012 Update NVME lport %p did x%x\n", |
2260 | localport, vport->fc_myDID); | 2311 | localport, vport->fc_myDID); |
@@ -2268,7 +2319,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) | |||
2268 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | 2319 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
2269 | "6030 bound lport %p to DID x%06x\n", | 2320 | "6030 bound lport %p to DID x%06x\n", |
2270 | lport, localport->port_id); | 2321 | lport, localport->port_id); |
2271 | 2322 | #endif | |
2272 | } | 2323 | } |
2273 | 2324 | ||
2274 | int | 2325 | int |
@@ -2409,6 +2460,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2409 | struct lpfc_nvme_lport *lport; | 2460 | struct lpfc_nvme_lport *lport; |
2410 | struct lpfc_nvme_rport *rport; | 2461 | struct lpfc_nvme_rport *rport; |
2411 | struct nvme_fc_remote_port *remoteport; | 2462 | struct nvme_fc_remote_port *remoteport; |
2463 | unsigned long wait_tmo; | ||
2412 | 2464 | ||
2413 | localport = vport->localport; | 2465 | localport = vport->localport; |
2414 | 2466 | ||
@@ -2451,11 +2503,12 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2451 | * before proceeding. This guarantees the transport and driver | 2503 | * before proceeding. This guarantees the transport and driver |
2452 | * have completed the unreg process. | 2504 | * have completed the unreg process. |
2453 | */ | 2505 | */ |
2454 | ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5); | 2506 | wait_tmo = msecs_to_jiffies(5000); |
2507 | ret = wait_for_completion_timeout(&rport->rport_unreg_done, | ||
2508 | wait_tmo); | ||
2455 | if (ret == 0) { | 2509 | if (ret == 0) { |
2456 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | 2510 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
2457 | "6169 Unreg nvme wait failed %d\n", | 2511 | "6169 Unreg nvme wait timeout\n"); |
2458 | ret); | ||
2459 | } | 2512 | } |
2460 | } | 2513 | } |
2461 | return; | 2514 | return; |
@@ -2463,7 +2516,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2463 | input_err: | 2516 | input_err: |
2464 | #endif | 2517 | #endif |
2465 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, | 2518 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, |
2466 | "6168: State error: lport %p, rport%p FCID x%06x\n", | 2519 | "6168 State error: lport %p, rport%p FCID x%06x\n", |
2467 | vport->localport, ndlp->rport, ndlp->nlp_DID); | 2520 | vport->localport, ndlp->rport, ndlp->nlp_DID); |
2468 | } | 2521 | } |
2469 | 2522 | ||
@@ -2494,7 +2547,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, | |||
2494 | &phba->sli4_hba.lpfc_abts_nvme_buf_list, | 2547 | &phba->sli4_hba.lpfc_abts_nvme_buf_list, |
2495 | list) { | 2548 | list) { |
2496 | if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { | 2549 | if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { |
2497 | list_del(&lpfc_ncmd->list); | 2550 | list_del_init(&lpfc_ncmd->list); |
2498 | lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; | 2551 | lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; |
2499 | lpfc_ncmd->status = IOSTAT_SUCCESS; | 2552 | lpfc_ncmd->status = IOSTAT_SUCCESS; |
2500 | spin_unlock( | 2553 | spin_unlock( |
@@ -2510,6 +2563,12 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, | |||
2510 | rxid, 1); | 2563 | rxid, 1); |
2511 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); | 2564 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); |
2512 | } | 2565 | } |
2566 | |||
2567 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
2568 | "6311 XRI Aborted xri x%x tag x%x " | ||
2569 | "released\n", | ||
2570 | xri, lpfc_ncmd->cur_iocbq.iotag); | ||
2571 | |||
2513 | lpfc_release_nvme_buf(phba, lpfc_ncmd); | 2572 | lpfc_release_nvme_buf(phba, lpfc_ncmd); |
2514 | if (rrq_empty) | 2573 | if (rrq_empty) |
2515 | lpfc_worker_wake_up(phba); | 2574 | lpfc_worker_wake_up(phba); |
@@ -2518,4 +2577,8 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, | |||
2518 | } | 2577 | } |
2519 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | 2578 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
2520 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2579 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2580 | |||
2581 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
2582 | "6312 XRI Aborted xri x%x not found\n", xri); | ||
2583 | |||
2521 | } | 2584 | } |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 1347deb8dd6c..ec32f45daa66 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h | |||
@@ -21,12 +21,7 @@ | |||
21 | * included with this package. * | 21 | * included with this package. * |
22 | ********************************************************************/ | 22 | ********************************************************************/ |
23 | 23 | ||
24 | #define LPFC_NVME_MIN_SEGS 16 | 24 | #define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */ |
25 | #define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */ | ||
26 | #define LPFC_NVME_MAX_SEGS 510 | ||
27 | #define LPFC_NVMET_MIN_POSTBUF 16 | ||
28 | #define LPFC_NVMET_DEFAULT_POSTBUF 1024 | ||
29 | #define LPFC_NVMET_MAX_POSTBUF 4096 | ||
30 | #define LPFC_NVME_WQSIZE 256 | 25 | #define LPFC_NVME_WQSIZE 256 |
31 | 26 | ||
32 | #define LPFC_NVME_ERSP_LEN 0x20 | 27 | #define LPFC_NVME_ERSP_LEN 0x20 |
@@ -102,3 +97,7 @@ struct lpfc_nvme_buf { | |||
102 | uint64_t ts_data_nvme; | 97 | uint64_t ts_data_nvme; |
103 | #endif | 98 | #endif |
104 | }; | 99 | }; |
100 | |||
101 | struct lpfc_nvme_fcpreq_priv { | ||
102 | struct lpfc_nvme_buf *nvme_buf; | ||
103 | }; | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index b2333b3889c7..94434e621c33 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
@@ -71,6 +71,26 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, | |||
71 | struct lpfc_nvmet_rcv_ctx *, | 71 | struct lpfc_nvmet_rcv_ctx *, |
72 | uint32_t, uint16_t); | 72 | uint32_t, uint16_t); |
73 | 73 | ||
74 | void | ||
75 | lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) | ||
76 | { | ||
77 | unsigned long iflag; | ||
78 | |||
79 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | ||
80 | "6313 NVMET Defer ctx release xri x%x flg x%x\n", | ||
81 | ctxp->oxid, ctxp->flag); | ||
82 | |||
83 | spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); | ||
84 | if (ctxp->flag & LPFC_NVMET_CTX_RLS) { | ||
85 | spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, | ||
86 | iflag); | ||
87 | return; | ||
88 | } | ||
89 | ctxp->flag |= LPFC_NVMET_CTX_RLS; | ||
90 | list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); | ||
91 | spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); | ||
92 | } | ||
93 | |||
74 | /** | 94 | /** |
75 | * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response | 95 | * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response |
76 | * @phba: Pointer to HBA context object. | 96 | * @phba: Pointer to HBA context object. |
@@ -139,6 +159,11 @@ lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, | |||
139 | struct lpfc_dmabuf *mp) | 159 | struct lpfc_dmabuf *mp) |
140 | { | 160 | { |
141 | if (ctxp) { | 161 | if (ctxp) { |
162 | if (ctxp->flag) | ||
163 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
164 | "6314 rq_post ctx xri x%x flag x%x\n", | ||
165 | ctxp->oxid, ctxp->flag); | ||
166 | |||
142 | if (ctxp->txrdy) { | 167 | if (ctxp->txrdy) { |
143 | pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, | 168 | pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, |
144 | ctxp->txrdy_phys); | 169 | ctxp->txrdy_phys); |
@@ -337,39 +362,55 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
337 | #endif | 362 | #endif |
338 | 363 | ||
339 | ctxp = cmdwqe->context2; | 364 | ctxp = cmdwqe->context2; |
365 | ctxp->flag &= ~LPFC_NVMET_IO_INP; | ||
366 | |||
340 | rsp = &ctxp->ctx.fcp_req; | 367 | rsp = &ctxp->ctx.fcp_req; |
341 | op = rsp->op; | 368 | op = rsp->op; |
342 | ctxp->flag &= ~LPFC_NVMET_IO_INP; | ||
343 | 369 | ||
344 | status = bf_get(lpfc_wcqe_c_status, wcqe); | 370 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
345 | result = wcqe->parameter; | 371 | result = wcqe->parameter; |
346 | 372 | ||
347 | if (!phba->targetport) | 373 | if (phba->targetport) |
348 | goto out; | 374 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
375 | else | ||
376 | tgtp = NULL; | ||
349 | 377 | ||
350 | lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", | 378 | lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", |
351 | ctxp->oxid, op, status); | 379 | ctxp->oxid, op, status); |
352 | 380 | ||
353 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | ||
354 | if (status) { | 381 | if (status) { |
355 | rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; | 382 | rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; |
356 | rsp->transferred_length = 0; | 383 | rsp->transferred_length = 0; |
357 | atomic_inc(&tgtp->xmt_fcp_rsp_error); | 384 | if (tgtp) |
385 | atomic_inc(&tgtp->xmt_fcp_rsp_error); | ||
386 | |||
387 | /* pick up SLI4 exhange busy condition */ | ||
388 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { | ||
389 | ctxp->flag |= LPFC_NVMET_XBUSY; | ||
390 | |||
391 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
392 | "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n", | ||
393 | ctxp->oxid, status, result); | ||
394 | } else { | ||
395 | ctxp->flag &= ~LPFC_NVMET_XBUSY; | ||
396 | } | ||
397 | |||
358 | } else { | 398 | } else { |
359 | rsp->fcp_error = NVME_SC_SUCCESS; | 399 | rsp->fcp_error = NVME_SC_SUCCESS; |
360 | if (op == NVMET_FCOP_RSP) | 400 | if (op == NVMET_FCOP_RSP) |
361 | rsp->transferred_length = rsp->rsplen; | 401 | rsp->transferred_length = rsp->rsplen; |
362 | else | 402 | else |
363 | rsp->transferred_length = rsp->transfer_length; | 403 | rsp->transferred_length = rsp->transfer_length; |
364 | atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); | 404 | if (tgtp) |
405 | atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); | ||
365 | } | 406 | } |
366 | 407 | ||
367 | out: | ||
368 | if ((op == NVMET_FCOP_READDATA_RSP) || | 408 | if ((op == NVMET_FCOP_READDATA_RSP) || |
369 | (op == NVMET_FCOP_RSP)) { | 409 | (op == NVMET_FCOP_RSP)) { |
370 | /* Sanity check */ | 410 | /* Sanity check */ |
371 | ctxp->state = LPFC_NVMET_STE_DONE; | 411 | ctxp->state = LPFC_NVMET_STE_DONE; |
372 | ctxp->entry_cnt++; | 412 | ctxp->entry_cnt++; |
413 | |||
373 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 414 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
374 | if (phba->ktime_on) { | 415 | if (phba->ktime_on) { |
375 | if (rsp->op == NVMET_FCOP_READDATA_RSP) { | 416 | if (rsp->op == NVMET_FCOP_READDATA_RSP) { |
@@ -517,7 +558,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, | |||
517 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | 558 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
518 | struct lpfc_hba *phba = ctxp->phba; | 559 | struct lpfc_hba *phba = ctxp->phba; |
519 | struct lpfc_iocbq *nvmewqeq; | 560 | struct lpfc_iocbq *nvmewqeq; |
520 | unsigned long iflags; | ||
521 | int rc; | 561 | int rc; |
522 | 562 | ||
523 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 563 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
@@ -543,10 +583,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, | |||
543 | #endif | 583 | #endif |
544 | 584 | ||
545 | /* Sanity check */ | 585 | /* Sanity check */ |
546 | if (ctxp->state == LPFC_NVMET_STE_ABORT) { | 586 | if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || |
587 | (ctxp->state == LPFC_NVMET_STE_ABORT)) { | ||
547 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); | 588 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); |
548 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 589 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
549 | "6102 Bad state IO x%x aborted\n", | 590 | "6102 IO xri x%x aborted\n", |
550 | ctxp->oxid); | 591 | ctxp->oxid); |
551 | rc = -ENXIO; | 592 | rc = -ENXIO; |
552 | goto aerr; | 593 | goto aerr; |
@@ -571,10 +612,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, | |||
571 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", | 612 | lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", |
572 | ctxp->oxid, rsp->op, rsp->rsplen); | 613 | ctxp->oxid, rsp->op, rsp->rsplen); |
573 | 614 | ||
574 | /* For now we take hbalock */ | ||
575 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
576 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); | 615 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); |
577 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
578 | if (rc == WQE_SUCCESS) { | 616 | if (rc == WQE_SUCCESS) { |
579 | ctxp->flag |= LPFC_NVMET_IO_INP; | 617 | ctxp->flag |= LPFC_NVMET_IO_INP; |
580 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 618 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
@@ -619,16 +657,27 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, | |||
619 | struct lpfc_nvmet_rcv_ctx *ctxp = | 657 | struct lpfc_nvmet_rcv_ctx *ctxp = |
620 | container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | 658 | container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
621 | struct lpfc_hba *phba = ctxp->phba; | 659 | struct lpfc_hba *phba = ctxp->phba; |
660 | unsigned long flags; | ||
622 | 661 | ||
623 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 662 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
624 | "6103 Abort op: oxri x%x %d cnt %d\n", | 663 | "6103 Abort op: oxri x%x flg x%x cnt %d\n", |
625 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); | 664 | ctxp->oxid, ctxp->flag, ctxp->entry_cnt); |
626 | 665 | ||
627 | lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x state x%x cnt x%x\n", | 666 | lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " |
628 | ctxp->oxid, ctxp->state, ctxp->entry_cnt); | 667 | "xri x%x flg x%x cnt x%x\n", |
668 | ctxp->oxid, ctxp->flag, ctxp->entry_cnt); | ||
629 | 669 | ||
630 | atomic_inc(&lpfc_nvmep->xmt_fcp_abort); | 670 | atomic_inc(&lpfc_nvmep->xmt_fcp_abort); |
631 | ctxp->entry_cnt++; | 671 | ctxp->entry_cnt++; |
672 | spin_lock_irqsave(&ctxp->ctxlock, flags); | ||
673 | |||
674 | /* Since iaab/iaar are NOT set, we need to check | ||
675 | * if the firmware is in process of aborting IO | ||
676 | */ | ||
677 | if (ctxp->flag & LPFC_NVMET_XBUSY) { | ||
678 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
679 | return; | ||
680 | } | ||
632 | ctxp->flag |= LPFC_NVMET_ABORT_OP; | 681 | ctxp->flag |= LPFC_NVMET_ABORT_OP; |
633 | if (ctxp->flag & LPFC_NVMET_IO_INP) | 682 | if (ctxp->flag & LPFC_NVMET_IO_INP) |
634 | lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, | 683 | lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
@@ -636,13 +685,13 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, | |||
636 | else | 685 | else |
637 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, | 686 | lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, |
638 | ctxp->oxid); | 687 | ctxp->oxid); |
688 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
639 | } | 689 | } |
640 | 690 | ||
641 | static void | 691 | static void |
642 | lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | 692 | lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, |
643 | struct nvmefc_tgt_fcp_req *rsp) | 693 | struct nvmefc_tgt_fcp_req *rsp) |
644 | { | 694 | { |
645 | struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; | ||
646 | struct lpfc_nvmet_rcv_ctx *ctxp = | 695 | struct lpfc_nvmet_rcv_ctx *ctxp = |
647 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | 696 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); |
648 | struct lpfc_hba *phba = ctxp->phba; | 697 | struct lpfc_hba *phba = ctxp->phba; |
@@ -650,27 +699,20 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
650 | bool aborting = false; | 699 | bool aborting = false; |
651 | 700 | ||
652 | spin_lock_irqsave(&ctxp->ctxlock, flags); | 701 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
653 | if (ctxp->flag & LPFC_NVMET_ABORT_OP) { | 702 | if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || |
703 | (ctxp->flag & LPFC_NVMET_XBUSY)) { | ||
654 | aborting = true; | 704 | aborting = true; |
655 | ctxp->flag |= LPFC_NVMET_CTX_RLS; | ||
656 | } | ||
657 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
658 | |||
659 | if (aborting) | ||
660 | /* let the abort path do the real release */ | 705 | /* let the abort path do the real release */ |
661 | return; | 706 | lpfc_nvmet_defer_release(phba, ctxp); |
662 | |||
663 | /* Sanity check */ | ||
664 | if (ctxp->state != LPFC_NVMET_STE_DONE) { | ||
665 | atomic_inc(&lpfc_nvmep->xmt_fcp_drop); | ||
666 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | ||
667 | "6117 Bad state IO x%x aborted\n", | ||
668 | ctxp->oxid); | ||
669 | } | 707 | } |
708 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
670 | 709 | ||
671 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, | 710 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, |
672 | ctxp->state, 0); | 711 | ctxp->state, 0); |
673 | 712 | ||
713 | if (aborting) | ||
714 | return; | ||
715 | |||
674 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 716 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); |
675 | } | 717 | } |
676 | 718 | ||
@@ -708,8 +750,19 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
708 | pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); | 750 | pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); |
709 | pinfo.port_id = vport->fc_myDID; | 751 | pinfo.port_id = vport->fc_myDID; |
710 | 752 | ||
753 | /* Limit to LPFC_MAX_NVME_SEG_CNT. | ||
754 | * For now need + 1 to get around NVME transport logic. | ||
755 | */ | ||
756 | if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { | ||
757 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, | ||
758 | "6400 Reducing sg segment cnt to %d\n", | ||
759 | LPFC_MAX_NVME_SEG_CNT); | ||
760 | phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; | ||
761 | } else { | ||
762 | phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; | ||
763 | } | ||
764 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; | ||
711 | lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; | 765 | lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; |
712 | lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt; | ||
713 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | | 766 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | |
714 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | | 767 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | |
715 | NVMET_FCTGTFEAT_CMD_IN_ISR | | 768 | NVMET_FCTGTFEAT_CMD_IN_ISR | |
@@ -794,7 +847,120 @@ void | |||
794 | lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, | 847 | lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, |
795 | struct sli4_wcqe_xri_aborted *axri) | 848 | struct sli4_wcqe_xri_aborted *axri) |
796 | { | 849 | { |
797 | /* TODO: work in progress */ | 850 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
851 | uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); | ||
852 | struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; | ||
853 | struct lpfc_nodelist *ndlp; | ||
854 | unsigned long iflag = 0; | ||
855 | int rrq_empty = 0; | ||
856 | bool released = false; | ||
857 | |||
858 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
859 | "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); | ||
860 | |||
861 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) | ||
862 | return; | ||
863 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
864 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
865 | list_for_each_entry_safe(ctxp, next_ctxp, | ||
866 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | ||
867 | list) { | ||
868 | if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) | ||
869 | continue; | ||
870 | |||
871 | /* Check if we already received a free context call | ||
872 | * and we have completed processing an abort situation. | ||
873 | */ | ||
874 | if (ctxp->flag & LPFC_NVMET_CTX_RLS && | ||
875 | !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { | ||
876 | list_del(&ctxp->list); | ||
877 | released = true; | ||
878 | } | ||
879 | ctxp->flag &= ~LPFC_NVMET_XBUSY; | ||
880 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
881 | |||
882 | rrq_empty = list_empty(&phba->active_rrq_list); | ||
883 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
884 | ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); | ||
885 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | ||
886 | (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || | ||
887 | ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { | ||
888 | lpfc_set_rrq_active(phba, ndlp, | ||
889 | ctxp->rqb_buffer->sglq->sli4_lxritag, | ||
890 | rxid, 1); | ||
891 | lpfc_sli4_abts_err_handler(phba, ndlp, axri); | ||
892 | } | ||
893 | |||
894 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
895 | "6318 XB aborted %x flg x%x (%x)\n", | ||
896 | ctxp->oxid, ctxp->flag, released); | ||
897 | if (released) | ||
898 | lpfc_nvmet_rq_post(phba, ctxp, | ||
899 | &ctxp->rqb_buffer->hbuf); | ||
900 | if (rrq_empty) | ||
901 | lpfc_worker_wake_up(phba); | ||
902 | return; | ||
903 | } | ||
904 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
905 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
906 | } | ||
907 | |||
908 | int | ||
909 | lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, | ||
910 | struct fc_frame_header *fc_hdr) | ||
911 | |||
912 | { | ||
913 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) | ||
914 | struct lpfc_hba *phba = vport->phba; | ||
915 | struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; | ||
916 | struct nvmefc_tgt_fcp_req *rsp; | ||
917 | uint16_t xri; | ||
918 | unsigned long iflag = 0; | ||
919 | |||
920 | xri = be16_to_cpu(fc_hdr->fh_ox_id); | ||
921 | |||
922 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
923 | spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
924 | list_for_each_entry_safe(ctxp, next_ctxp, | ||
925 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, | ||
926 | list) { | ||
927 | if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) | ||
928 | continue; | ||
929 | |||
930 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
931 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
932 | |||
933 | spin_lock_irqsave(&ctxp->ctxlock, iflag); | ||
934 | ctxp->flag |= LPFC_NVMET_ABTS_RCV; | ||
935 | spin_unlock_irqrestore(&ctxp->ctxlock, iflag); | ||
936 | |||
937 | lpfc_nvmeio_data(phba, | ||
938 | "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", | ||
939 | xri, smp_processor_id(), 0); | ||
940 | |||
941 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
942 | "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); | ||
943 | |||
944 | rsp = &ctxp->ctx.fcp_req; | ||
945 | nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); | ||
946 | |||
947 | /* Respond with BA_ACC accordingly */ | ||
948 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); | ||
949 | return 0; | ||
950 | } | ||
951 | spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); | ||
952 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
953 | |||
954 | lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", | ||
955 | xri, smp_processor_id(), 1); | ||
956 | |||
957 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
958 | "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); | ||
959 | |||
960 | /* Respond with BA_RJT accordingly */ | ||
961 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); | ||
962 | #endif | ||
963 | return 0; | ||
798 | } | 964 | } |
799 | 965 | ||
800 | void | 966 | void |
@@ -876,7 +1042,6 @@ dropit: | |||
876 | ctxp->wqeq = NULL; | 1042 | ctxp->wqeq = NULL; |
877 | ctxp->state = LPFC_NVMET_STE_RCV; | 1043 | ctxp->state = LPFC_NVMET_STE_RCV; |
878 | ctxp->rqb_buffer = (void *)nvmebuf; | 1044 | ctxp->rqb_buffer = (void *)nvmebuf; |
879 | spin_lock_init(&ctxp->ctxlock); | ||
880 | 1045 | ||
881 | lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", | 1046 | lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", |
882 | oxid, size, sid); | 1047 | oxid, size, sid); |
@@ -985,6 +1150,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
985 | ctxp->rqb_buffer = nvmebuf; | 1150 | ctxp->rqb_buffer = nvmebuf; |
986 | ctxp->entry_cnt = 1; | 1151 | ctxp->entry_cnt = 1; |
987 | ctxp->flag = 0; | 1152 | ctxp->flag = 0; |
1153 | spin_lock_init(&ctxp->ctxlock); | ||
988 | 1154 | ||
989 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1155 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
990 | if (phba->ktime_on) { | 1156 | if (phba->ktime_on) { |
@@ -1007,8 +1173,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1007 | } | 1173 | } |
1008 | #endif | 1174 | #endif |
1009 | 1175 | ||
1010 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n", | 1176 | lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", |
1011 | oxid, size, sid); | 1177 | oxid, size, smp_processor_id()); |
1012 | 1178 | ||
1013 | atomic_inc(&tgtp->rcv_fcp_cmd_in); | 1179 | atomic_inc(&tgtp->rcv_fcp_cmd_in); |
1014 | /* | 1180 | /* |
@@ -1282,11 +1448,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, | |||
1282 | return NULL; | 1448 | return NULL; |
1283 | } | 1449 | } |
1284 | 1450 | ||
1285 | if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) { | 1451 | if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { |
1286 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1452 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1287 | "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " | 1453 | "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " |
1288 | "NPORT x%x oxid:x%x\n", | 1454 | "NPORT x%x oxid:x%x cnt %d\n", |
1289 | ctxp->sid, ctxp->oxid); | 1455 | ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt); |
1290 | return NULL; | 1456 | return NULL; |
1291 | } | 1457 | } |
1292 | 1458 | ||
@@ -1648,18 +1814,27 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1648 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 1814 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1649 | atomic_inc(&tgtp->xmt_abort_cmpl); | 1815 | atomic_inc(&tgtp->xmt_abort_cmpl); |
1650 | 1816 | ||
1651 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | ||
1652 | "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n", | ||
1653 | ctxp->oxid, wcqe->word0, wcqe->total_data_placed, | ||
1654 | result, wcqe->word3); | ||
1655 | |||
1656 | ctxp->state = LPFC_NVMET_STE_DONE; | 1817 | ctxp->state = LPFC_NVMET_STE_DONE; |
1818 | |||
1819 | /* Check if we already received a free context call | ||
1820 | * and we have completed processing an abort situation. | ||
1821 | */ | ||
1657 | spin_lock_irqsave(&ctxp->ctxlock, flags); | 1822 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
1658 | if (ctxp->flag & LPFC_NVMET_CTX_RLS) | 1823 | if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && |
1824 | !(ctxp->flag & LPFC_NVMET_XBUSY)) { | ||
1825 | list_del(&ctxp->list); | ||
1659 | released = true; | 1826 | released = true; |
1827 | } | ||
1660 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 1828 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
1661 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | 1829 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
1662 | 1830 | ||
1831 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | ||
1832 | "6165 ABORT cmpl: xri x%x flg x%x (%d) " | ||
1833 | "WCQE: %08x %08x %08x %08x\n", | ||
1834 | ctxp->oxid, ctxp->flag, released, | ||
1835 | wcqe->word0, wcqe->total_data_placed, | ||
1836 | result, wcqe->word3); | ||
1837 | |||
1663 | /* | 1838 | /* |
1664 | * if transport has released ctx, then can reuse it. Otherwise, | 1839 | * if transport has released ctx, then can reuse it. Otherwise, |
1665 | * will be recycled by transport release call. | 1840 | * will be recycled by transport release call. |
@@ -1670,10 +1845,15 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1670 | cmdwqe->context2 = NULL; | 1845 | cmdwqe->context2 = NULL; |
1671 | cmdwqe->context3 = NULL; | 1846 | cmdwqe->context3 = NULL; |
1672 | lpfc_sli_release_iocbq(phba, cmdwqe); | 1847 | lpfc_sli_release_iocbq(phba, cmdwqe); |
1848 | |||
1849 | /* Since iaab/iaar are NOT set, there is no work left. | ||
1850 | * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted | ||
1851 | * should have been called already. | ||
1852 | */ | ||
1673 | } | 1853 | } |
1674 | 1854 | ||
1675 | /** | 1855 | /** |
1676 | * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS | 1856 | * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS |
1677 | * @phba: Pointer to HBA context object. | 1857 | * @phba: Pointer to HBA context object. |
1678 | * @cmdwqe: Pointer to driver command WQE object. | 1858 | * @cmdwqe: Pointer to driver command WQE object. |
1679 | * @wcqe: Pointer to driver response CQE object. | 1859 | * @wcqe: Pointer to driver response CQE object. |
@@ -1683,8 +1863,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1683 | * The function frees memory resources used for the NVME commands. | 1863 | * The function frees memory resources used for the NVME commands. |
1684 | **/ | 1864 | **/ |
1685 | static void | 1865 | static void |
1686 | lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | 1866 | lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, |
1687 | struct lpfc_wcqe_complete *wcqe) | 1867 | struct lpfc_wcqe_complete *wcqe) |
1688 | { | 1868 | { |
1689 | struct lpfc_nvmet_rcv_ctx *ctxp; | 1869 | struct lpfc_nvmet_rcv_ctx *ctxp; |
1690 | struct lpfc_nvmet_tgtport *tgtp; | 1870 | struct lpfc_nvmet_tgtport *tgtp; |
@@ -1699,35 +1879,55 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, | |||
1699 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 1879 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1700 | atomic_inc(&tgtp->xmt_abort_cmpl); | 1880 | atomic_inc(&tgtp->xmt_abort_cmpl); |
1701 | 1881 | ||
1882 | if (!ctxp) { | ||
1883 | /* if context is clear, related io alrady complete */ | ||
1884 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | ||
1885 | "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", | ||
1886 | wcqe->word0, wcqe->total_data_placed, | ||
1887 | result, wcqe->word3); | ||
1888 | return; | ||
1889 | } | ||
1890 | |||
1891 | /* Sanity check */ | ||
1892 | if (ctxp->state != LPFC_NVMET_STE_ABORT) { | ||
1893 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | ||
1894 | "6112 ABTS Wrong state:%d oxid x%x\n", | ||
1895 | ctxp->state, ctxp->oxid); | ||
1896 | } | ||
1897 | |||
1898 | /* Check if we already received a free context call | ||
1899 | * and we have completed processing an abort situation. | ||
1900 | */ | ||
1901 | ctxp->state = LPFC_NVMET_STE_DONE; | ||
1902 | spin_lock_irqsave(&ctxp->ctxlock, flags); | ||
1903 | if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && | ||
1904 | !(ctxp->flag & LPFC_NVMET_XBUSY)) { | ||
1905 | list_del(&ctxp->list); | ||
1906 | released = true; | ||
1907 | } | ||
1908 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1909 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
1910 | |||
1702 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 1911 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1703 | "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", | 1912 | "6316 ABTS cmpl xri x%x flg x%x (%x) " |
1704 | ctxp, wcqe->word0, wcqe->total_data_placed, | 1913 | "WCQE: %08x %08x %08x %08x\n", |
1914 | ctxp->oxid, ctxp->flag, released, | ||
1915 | wcqe->word0, wcqe->total_data_placed, | ||
1705 | result, wcqe->word3); | 1916 | result, wcqe->word3); |
1917 | /* | ||
1918 | * if transport has released ctx, then can reuse it. Otherwise, | ||
1919 | * will be recycled by transport release call. | ||
1920 | */ | ||
1921 | if (released) | ||
1922 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | ||
1706 | 1923 | ||
1707 | if (ctxp) { | 1924 | cmdwqe->context2 = NULL; |
1708 | /* Sanity check */ | 1925 | cmdwqe->context3 = NULL; |
1709 | if (ctxp->state != LPFC_NVMET_STE_ABORT) { | ||
1710 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | ||
1711 | "6112 ABORT Wrong state:%d oxid x%x\n", | ||
1712 | ctxp->state, ctxp->oxid); | ||
1713 | } | ||
1714 | ctxp->state = LPFC_NVMET_STE_DONE; | ||
1715 | spin_lock_irqsave(&ctxp->ctxlock, flags); | ||
1716 | if (ctxp->flag & LPFC_NVMET_CTX_RLS) | ||
1717 | released = true; | ||
1718 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1719 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
1720 | |||
1721 | /* | ||
1722 | * if transport has released ctx, then can reuse it. Otherwise, | ||
1723 | * will be recycled by transport release call. | ||
1724 | */ | ||
1725 | if (released) | ||
1726 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | ||
1727 | 1926 | ||
1728 | cmdwqe->context2 = NULL; | 1927 | /* Since iaab/iaar are NOT set, there is no work left. |
1729 | cmdwqe->context3 = NULL; | 1928 | * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted |
1730 | } | 1929 | * should have been called already. |
1930 | */ | ||
1731 | } | 1931 | } |
1732 | 1932 | ||
1733 | /** | 1933 | /** |
@@ -1780,10 +1980,14 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, | |||
1780 | struct lpfc_nodelist *ndlp; | 1980 | struct lpfc_nodelist *ndlp; |
1781 | 1981 | ||
1782 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 1982 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1783 | "6067 Abort: sid %x xri x%x/x%x\n", | 1983 | "6067 ABTS: sid %x xri x%x/x%x\n", |
1784 | sid, xri, ctxp->wqeq->sli4_xritag); | 1984 | sid, xri, ctxp->wqeq->sli4_xritag); |
1785 | 1985 | ||
1786 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 1986 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1987 | if (!ctxp->wqeq) { | ||
1988 | ctxp->wqeq = ctxp->rqb_buffer->iocbq; | ||
1989 | ctxp->wqeq->hba_wqidx = 0; | ||
1990 | } | ||
1787 | 1991 | ||
1788 | ndlp = lpfc_findnode_did(phba->pport, sid); | 1992 | ndlp = lpfc_findnode_did(phba->pport, sid); |
1789 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || | 1993 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
@@ -1889,10 +2093,11 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1889 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { | 2093 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
1890 | atomic_inc(&tgtp->xmt_abort_rsp_error); | 2094 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
1891 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, | 2095 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, |
1892 | "6160 Drop ABTS - wrong NDLP state x%x.\n", | 2096 | "6160 Drop ABORT - wrong NDLP state x%x.\n", |
1893 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); | 2097 | (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); |
1894 | 2098 | ||
1895 | /* No failure to an ABTS request. */ | 2099 | /* No failure to an ABTS request. */ |
2100 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1896 | return 0; | 2101 | return 0; |
1897 | } | 2102 | } |
1898 | 2103 | ||
@@ -1900,9 +2105,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1900 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); | 2105 | ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); |
1901 | if (!ctxp->abort_wqeq) { | 2106 | if (!ctxp->abort_wqeq) { |
1902 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, | 2107 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, |
1903 | "6161 Abort failed: No wqeqs: " | 2108 | "6161 ABORT failed: No wqeqs: " |
1904 | "xri: x%x\n", ctxp->oxid); | 2109 | "xri: x%x\n", ctxp->oxid); |
1905 | /* No failure to an ABTS request. */ | 2110 | /* No failure to an ABTS request. */ |
2111 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1906 | return 0; | 2112 | return 0; |
1907 | } | 2113 | } |
1908 | abts_wqeq = ctxp->abort_wqeq; | 2114 | abts_wqeq = ctxp->abort_wqeq; |
@@ -1910,8 +2116,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1910 | ctxp->state = LPFC_NVMET_STE_ABORT; | 2116 | ctxp->state = LPFC_NVMET_STE_ABORT; |
1911 | 2117 | ||
1912 | /* Announce entry to new IO submit field. */ | 2118 | /* Announce entry to new IO submit field. */ |
1913 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | 2119 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
1914 | "6162 Abort Request to rport DID x%06x " | 2120 | "6162 ABORT Request to rport DID x%06x " |
1915 | "for xri x%x x%x\n", | 2121 | "for xri x%x x%x\n", |
1916 | ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); | 2122 | ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); |
1917 | 2123 | ||
@@ -1927,6 +2133,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1927 | "NVME Req now. hba_flag x%x oxid x%x\n", | 2133 | "NVME Req now. hba_flag x%x oxid x%x\n", |
1928 | phba->hba_flag, ctxp->oxid); | 2134 | phba->hba_flag, ctxp->oxid); |
1929 | lpfc_sli_release_iocbq(phba, abts_wqeq); | 2135 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
2136 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1930 | return 0; | 2137 | return 0; |
1931 | } | 2138 | } |
1932 | 2139 | ||
@@ -1938,6 +2145,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1938 | "still pending on oxid x%x\n", | 2145 | "still pending on oxid x%x\n", |
1939 | ctxp->oxid); | 2146 | ctxp->oxid); |
1940 | lpfc_sli_release_iocbq(phba, abts_wqeq); | 2147 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
2148 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1941 | return 0; | 2149 | return 0; |
1942 | } | 2150 | } |
1943 | 2151 | ||
@@ -1985,9 +2193,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, | |||
1985 | if (rc == WQE_SUCCESS) | 2193 | if (rc == WQE_SUCCESS) |
1986 | return 0; | 2194 | return 0; |
1987 | 2195 | ||
2196 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | ||
1988 | lpfc_sli_release_iocbq(phba, abts_wqeq); | 2197 | lpfc_sli_release_iocbq(phba, abts_wqeq); |
1989 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME, | 2198 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
1990 | "6166 Failed abts issue_wqe with status x%x " | 2199 | "6166 Failed ABORT issue_wqe with status x%x " |
1991 | "for oxid x%x.\n", | 2200 | "for oxid x%x.\n", |
1992 | rc, ctxp->oxid); | 2201 | rc, ctxp->oxid); |
1993 | return 1; | 2202 | return 1; |
@@ -2016,8 +2225,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2016 | 2225 | ||
2017 | spin_lock_irqsave(&phba->hbalock, flags); | 2226 | spin_lock_irqsave(&phba->hbalock, flags); |
2018 | abts_wqeq = ctxp->wqeq; | 2227 | abts_wqeq = ctxp->wqeq; |
2019 | abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp; | 2228 | abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; |
2020 | abts_wqeq->iocb_cmpl = 0; | 2229 | abts_wqeq->iocb_cmpl = NULL; |
2021 | abts_wqeq->iocb_flag |= LPFC_IO_NVMET; | 2230 | abts_wqeq->iocb_flag |= LPFC_IO_NVMET; |
2022 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); | 2231 | rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); |
2023 | spin_unlock_irqrestore(&phba->hbalock, flags); | 2232 | spin_unlock_irqrestore(&phba->hbalock, flags); |
@@ -2027,7 +2236,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, | |||
2027 | } | 2236 | } |
2028 | 2237 | ||
2029 | aerr: | 2238 | aerr: |
2030 | lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); | 2239 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; |
2031 | atomic_inc(&tgtp->xmt_abort_rsp_error); | 2240 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
2032 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, | 2241 | lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, |
2033 | "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", | 2242 | "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 02735fc6fd41..128759fe6650 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
@@ -21,9 +21,7 @@ | |||
21 | * included with this package. * | 21 | * included with this package. * |
22 | ********************************************************************/ | 22 | ********************************************************************/ |
23 | 23 | ||
24 | #define LPFC_NVMET_MIN_SEGS 16 | 24 | #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ |
25 | #define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */ | ||
26 | #define LPFC_NVMET_MAX_SEGS 510 | ||
27 | #define LPFC_NVMET_SUCCESS_LEN 12 | 25 | #define LPFC_NVMET_SUCCESS_LEN 12 |
28 | 26 | ||
29 | /* Used for NVME Target */ | 27 | /* Used for NVME Target */ |
@@ -77,6 +75,7 @@ struct lpfc_nvmet_rcv_ctx { | |||
77 | struct nvmefc_tgt_ls_req ls_req; | 75 | struct nvmefc_tgt_ls_req ls_req; |
78 | struct nvmefc_tgt_fcp_req fcp_req; | 76 | struct nvmefc_tgt_fcp_req fcp_req; |
79 | } ctx; | 77 | } ctx; |
78 | struct list_head list; | ||
80 | struct lpfc_hba *phba; | 79 | struct lpfc_hba *phba; |
81 | struct lpfc_iocbq *wqeq; | 80 | struct lpfc_iocbq *wqeq; |
82 | struct lpfc_iocbq *abort_wqeq; | 81 | struct lpfc_iocbq *abort_wqeq; |
@@ -98,10 +97,11 @@ struct lpfc_nvmet_rcv_ctx { | |||
98 | #define LPFC_NVMET_STE_RSP 4 | 97 | #define LPFC_NVMET_STE_RSP 4 |
99 | #define LPFC_NVMET_STE_DONE 5 | 98 | #define LPFC_NVMET_STE_DONE 5 |
100 | uint16_t flag; | 99 | uint16_t flag; |
101 | #define LPFC_NVMET_IO_INP 0x1 | 100 | #define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */ |
102 | #define LPFC_NVMET_ABORT_OP 0x2 | 101 | #define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */ |
103 | #define LPFC_NVMET_CTX_RLS 0x4 | 102 | #define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ |
104 | 103 | #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ | |
104 | #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ | ||
105 | struct rqb_dmabuf *rqb_buffer; | 105 | struct rqb_dmabuf *rqb_buffer; |
106 | 106 | ||
107 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 107 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 1c9fa45df7eb..cf19f4976f5f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -6338,7 +6338,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, | |||
6338 | } | 6338 | } |
6339 | 6339 | ||
6340 | /** | 6340 | /** |
6341 | * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block | 6341 | * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block |
6342 | * @phba: pointer to lpfc hba data structure. | 6342 | * @phba: pointer to lpfc hba data structure. |
6343 | * @pring: Pointer to driver SLI ring object. | 6343 | * @pring: Pointer to driver SLI ring object. |
6344 | * @sgl_list: linked link of sgl buffers to post | 6344 | * @sgl_list: linked link of sgl buffers to post |
@@ -13758,7 +13758,10 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) | |||
13758 | lpfc_free_rq_buffer(queue->phba, queue); | 13758 | lpfc_free_rq_buffer(queue->phba, queue); |
13759 | kfree(queue->rqbp); | 13759 | kfree(queue->rqbp); |
13760 | } | 13760 | } |
13761 | kfree(queue->pring); | 13761 | |
13762 | if (!list_empty(&queue->wq_list)) | ||
13763 | list_del(&queue->wq_list); | ||
13764 | |||
13762 | kfree(queue); | 13765 | kfree(queue); |
13763 | return; | 13766 | return; |
13764 | } | 13767 | } |
@@ -14738,6 +14741,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
14738 | case LPFC_Q_CREATE_VERSION_1: | 14741 | case LPFC_Q_CREATE_VERSION_1: |
14739 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, | 14742 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, |
14740 | wq->entry_count); | 14743 | wq->entry_count); |
14744 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
14745 | LPFC_Q_CREATE_VERSION_1); | ||
14746 | |||
14741 | switch (wq->entry_size) { | 14747 | switch (wq->entry_size) { |
14742 | default: | 14748 | default: |
14743 | case 64: | 14749 | case 64: |
@@ -15561,6 +15567,8 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) | |||
15561 | } | 15567 | } |
15562 | /* Remove wq from any list */ | 15568 | /* Remove wq from any list */ |
15563 | list_del_init(&wq->list); | 15569 | list_del_init(&wq->list); |
15570 | kfree(wq->pring); | ||
15571 | wq->pring = NULL; | ||
15564 | mempool_free(mbox, wq->phba->mbox_mem_pool); | 15572 | mempool_free(mbox, wq->phba->mbox_mem_pool); |
15565 | return status; | 15573 | return status; |
15566 | } | 15574 | } |
@@ -16513,7 +16521,7 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba, | |||
16513 | * This function sends a basic response to a previous unsol sequence abort | 16521 | * This function sends a basic response to a previous unsol sequence abort |
16514 | * event after aborting the sequence handling. | 16522 | * event after aborting the sequence handling. |
16515 | **/ | 16523 | **/ |
16516 | static void | 16524 | void |
16517 | lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, | 16525 | lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, |
16518 | struct fc_frame_header *fc_hdr, bool aborted) | 16526 | struct fc_frame_header *fc_hdr, bool aborted) |
16519 | { | 16527 | { |
@@ -16534,14 +16542,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, | |||
16534 | 16542 | ||
16535 | ndlp = lpfc_findnode_did(vport, sid); | 16543 | ndlp = lpfc_findnode_did(vport, sid); |
16536 | if (!ndlp) { | 16544 | if (!ndlp) { |
16537 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 16545 | ndlp = lpfc_nlp_init(vport, sid); |
16538 | if (!ndlp) { | 16546 | if (!ndlp) { |
16539 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, | 16547 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, |
16540 | "1268 Failed to allocate ndlp for " | 16548 | "1268 Failed to allocate ndlp for " |
16541 | "oxid:x%x SID:x%x\n", oxid, sid); | 16549 | "oxid:x%x SID:x%x\n", oxid, sid); |
16542 | return; | 16550 | return; |
16543 | } | 16551 | } |
16544 | lpfc_nlp_init(vport, ndlp, sid); | ||
16545 | /* Put ndlp onto pport node list */ | 16552 | /* Put ndlp onto pport node list */ |
16546 | lpfc_enqueue_node(vport, ndlp); | 16553 | lpfc_enqueue_node(vport, ndlp); |
16547 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 16554 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
@@ -16690,6 +16697,11 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, | |||
16690 | } | 16697 | } |
16691 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 16698 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
16692 | 16699 | ||
16700 | if (phba->nvmet_support) { | ||
16701 | lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); | ||
16702 | return; | ||
16703 | } | ||
16704 | |||
16693 | /* Respond with BA_ACC or BA_RJT accordingly */ | 16705 | /* Respond with BA_ACC or BA_RJT accordingly */ |
16694 | lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); | 16706 | lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); |
16695 | } | 16707 | } |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 710458cf11d6..da46471337c8 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -620,7 +620,7 @@ struct lpfc_sli4_hba { | |||
620 | struct list_head lpfc_els_sgl_list; | 620 | struct list_head lpfc_els_sgl_list; |
621 | struct list_head lpfc_abts_els_sgl_list; | 621 | struct list_head lpfc_abts_els_sgl_list; |
622 | struct list_head lpfc_nvmet_sgl_list; | 622 | struct list_head lpfc_nvmet_sgl_list; |
623 | struct list_head lpfc_abts_nvmet_sgl_list; | 623 | struct list_head lpfc_abts_nvmet_ctx_list; |
624 | struct list_head lpfc_abts_scsi_buf_list; | 624 | struct list_head lpfc_abts_scsi_buf_list; |
625 | struct list_head lpfc_abts_nvme_buf_list; | 625 | struct list_head lpfc_abts_nvme_buf_list; |
626 | struct lpfc_sglq **lpfc_sglq_active_list; | 626 | struct lpfc_sglq **lpfc_sglq_active_list; |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index d4e95e28f4e3..1c26dc67151b 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -20,7 +20,7 @@ | |||
20 | * included with this package. * | 20 | * included with this package. * |
21 | *******************************************************************/ | 21 | *******************************************************************/ |
22 | 22 | ||
23 | #define LPFC_DRIVER_VERSION "11.2.0.10" | 23 | #define LPFC_DRIVER_VERSION "11.2.0.12" |
24 | #define LPFC_DRIVER_NAME "lpfc" | 24 | #define LPFC_DRIVER_NAME "lpfc" |
25 | 25 | ||
26 | /* Used for SLI 2/3 */ | 26 | /* Used for SLI 2/3 */ |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 9a0339dbc024..c714482bf4c5 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -738,10 +738,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
738 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 738 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
739 | if (!ndlp) { | 739 | if (!ndlp) { |
740 | /* Cannot find existing Fabric ndlp, allocate one */ | 740 | /* Cannot find existing Fabric ndlp, allocate one */ |
741 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 741 | ndlp = lpfc_nlp_init(vport, Fabric_DID); |
742 | if (!ndlp) | 742 | if (!ndlp) |
743 | goto skip_logo; | 743 | goto skip_logo; |
744 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | ||
745 | /* Indicate free memory when release */ | 744 | /* Indicate free memory when release */ |
746 | NLP_SET_FREE_REQ(ndlp); | 745 | NLP_SET_FREE_REQ(ndlp); |
747 | } else { | 746 | } else { |