summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
commit938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch)
tree0854d5f6859d51032f1d853eaa8ab0e8647fb0cb /drivers/target
parentaf7ddd8a627c62a835524b3f5b471edbbbcce025 (diff)
parentda7903092b880b25971ca9103cb0b934a44ace2b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits) scsi: isci: request: mark expected switch fall-through scsi: isci: remote_node_context: mark expected switch fall-throughs scsi: isci: remote_device: Mark expected switch fall-throughs scsi: isci: phy: Mark expected switch fall-through scsi: iscsi: Capture iscsi debug messages using tracepoints scsi: myrb: Mark expected switch fall-throughs scsi: megaraid: fix out-of-bound array accesses scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through scsi: fcoe: remove set but not used variable 'port' scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown() scsi: smartpqi: fix build warnings scsi: smartpqi: update driver version scsi: smartpqi: add ofa support scsi: smartpqi: increase fw status register read timeout scsi: smartpqi: bump driver version scsi: smartpqi: add smp_utils support scsi: smartpqi: correct lun reset issues scsi: smartpqi: correct volume status scsi: smartpqi: do not offline disks for transient did no connect conditions scsi: smartpqi: allow for larger raid maps ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c28
-rw-r--r--drivers/target/loopback/tcm_loop.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c157
-rw-r--r--drivers/target/target_core_device.c111
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c90
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_spc.c28
-rw-r--r--drivers/target/target_core_stat.c34
-rw-r--r--drivers/target/target_core_tmr.c56
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c416
-rw-r--r--drivers/target/target_core_ua.c4
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/target/target_core_xcopy.c13
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
21 files changed, 500 insertions, 570 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c1d5a173553d..984941e036c8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1493,8 +1493,6 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1493 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1493 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1494 iscsit_stop_dataout_timer(cmd); 1494 iscsit_stop_dataout_timer(cmd);
1495 1495
1496 transport_check_aborted_status(se_cmd,
1497 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1498 return iscsit_dump_data_payload(conn, payload_length, 1); 1496 return iscsit_dump_data_payload(conn, payload_length, 1);
1499 } 1497 }
1500 } else { 1498 } else {
@@ -1509,12 +1507,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1509 * TASK_ABORTED status. 1507 * TASK_ABORTED status.
1510 */ 1508 */
1511 if (se_cmd->transport_state & CMD_T_ABORTED) { 1509 if (se_cmd->transport_state & CMD_T_ABORTED) {
1512 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1510 if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1513 if (--cmd->outstanding_r2ts < 1) { 1511 --cmd->outstanding_r2ts < 1)
1514 iscsit_stop_dataout_timer(cmd); 1512 iscsit_stop_dataout_timer(cmd);
1515 transport_check_aborted_status(
1516 se_cmd, 1);
1517 }
1518 1513
1519 return iscsit_dump_data_payload(conn, payload_length, 1); 1514 return iscsit_dump_data_payload(conn, payload_length, 1);
1520 } 1515 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 95d0a22b2ad6..a5481dfeae8d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1343,11 +1343,6 @@ static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
1343 1343
1344/* Start functions for target_core_fabric_ops */ 1344/* Start functions for target_core_fabric_ops */
1345 1345
1346static char *iscsi_get_fabric_name(void)
1347{
1348 return "iSCSI";
1349}
1350
1351static int iscsi_get_cmd_state(struct se_cmd *se_cmd) 1346static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
1352{ 1347{
1353 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1348 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1549,9 +1544,9 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
1549 1544
1550const struct target_core_fabric_ops iscsi_ops = { 1545const struct target_core_fabric_ops iscsi_ops = {
1551 .module = THIS_MODULE, 1546 .module = THIS_MODULE,
1552 .name = "iscsi", 1547 .fabric_alias = "iscsi",
1548 .fabric_name = "iSCSI",
1553 .node_acl_size = sizeof(struct iscsi_node_acl), 1549 .node_acl_size = sizeof(struct iscsi_node_acl),
1554 .get_fabric_name = iscsi_get_fabric_name,
1555 .tpg_get_wwn = lio_tpg_get_endpoint_wwn, 1550 .tpg_get_wwn = lio_tpg_get_endpoint_wwn,
1556 .tpg_get_tag = lio_tpg_get_tag, 1551 .tpg_get_tag = lio_tpg_get_tag,
1557 .tpg_get_default_depth = lio_tpg_get_default_depth, 1552 .tpg_get_default_depth = lio_tpg_get_default_depth,
@@ -1596,4 +1591,6 @@ const struct target_core_fabric_ops iscsi_ops = {
1596 .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, 1591 .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
1597 .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, 1592 .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
1598 .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, 1593 .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
1594
1595 .write_pending_must_be_called = true,
1599}; 1596};
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index a211e8154f4c..1b54a9c70851 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -943,20 +943,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
943 return 0; 943 return 0;
944 } 944 }
945 spin_unlock_bh(&cmd->istate_lock); 945 spin_unlock_bh(&cmd->istate_lock);
946 /* 946 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
947 * Determine if delayed TASK_ABORTED status for WRITEs
948 * should be sent now if no unsolicited data out
949 * payloads are expected, or if the delayed status
950 * should be sent after unsolicited data out with
951 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
952 */
953 if (transport_check_aborted_status(se_cmd,
954 (cmd->unsolicited_data == 0)) != 0)
955 return 0; 947 return 0;
956 /*
957 * Otherwise send CHECK_CONDITION and sense for
958 * exception
959 */
960 return transport_send_check_condition_and_sense(se_cmd, 948 return transport_send_check_condition_and_sense(se_cmd,
961 cmd->sense_reason, 0); 949 cmd->sense_reason, 0);
962 } 950 }
@@ -974,13 +962,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
974 962
975 if (!(cmd->cmd_flags & 963 if (!(cmd->cmd_flags &
976 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 964 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
977 /* 965 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
978 * Send the delayed TASK_ABORTED status for
979 * WRITEs if no more unsolicitied data is
980 * expected.
981 */
982 if (transport_check_aborted_status(se_cmd, 1)
983 != 0)
984 return 0; 966 return 0;
985 967
986 iscsit_set_dataout_sequence_values(cmd); 968 iscsit_set_dataout_sequence_values(cmd);
@@ -995,11 +977,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
995 977
996 if ((cmd->data_direction == DMA_TO_DEVICE) && 978 if ((cmd->data_direction == DMA_TO_DEVICE) &&
997 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 979 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
998 /* 980 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
999 * Send the delayed TASK_ABORTED status for WRITEs if
1000 * no more nsolicitied data is expected.
1001 */
1002 if (transport_check_aborted_status(se_cmd, 1) != 0)
1003 return 0; 981 return 0;
1004 982
1005 iscsit_set_unsoliticed_dataout(cmd); 983 iscsit_set_unsoliticed_dataout(cmd);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index bc8918f382e4..7bd7c0c0db6f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -324,7 +324,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
324 .sg_tablesize = 256, 324 .sg_tablesize = 256,
325 .cmd_per_lun = 1024, 325 .cmd_per_lun = 1024,
326 .max_sectors = 0xFFFF, 326 .max_sectors = 0xFFFF,
327 .use_clustering = DISABLE_CLUSTERING, 327 .dma_boundary = PAGE_SIZE - 1,
328 .slave_alloc = tcm_loop_slave_alloc, 328 .slave_alloc = tcm_loop_slave_alloc,
329 .module = THIS_MODULE, 329 .module = THIS_MODULE,
330 .track_queue_depth = 1, 330 .track_queue_depth = 1,
@@ -460,11 +460,6 @@ static void tcm_loop_release_core_bus(void)
460 pr_debug("Releasing TCM Loop Core BUS\n"); 460 pr_debug("Releasing TCM Loop Core BUS\n");
461} 461}
462 462
463static char *tcm_loop_get_fabric_name(void)
464{
465 return "loopback";
466}
467
468static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 463static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
469{ 464{
470 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 465 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
@@ -1149,8 +1144,7 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1149 1144
1150static const struct target_core_fabric_ops loop_ops = { 1145static const struct target_core_fabric_ops loop_ops = {
1151 .module = THIS_MODULE, 1146 .module = THIS_MODULE,
1152 .name = "loopback", 1147 .fabric_name = "loopback",
1153 .get_fabric_name = tcm_loop_get_fabric_name,
1154 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1148 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1155 .tpg_get_tag = tcm_loop_get_tag, 1149 .tpg_get_tag = tcm_loop_get_tag,
1156 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1150 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3d10189ecedc..08cee13dfb9a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1694,11 +1694,6 @@ static int sbp_check_false(struct se_portal_group *se_tpg)
1694 return 0; 1694 return 0;
1695} 1695}
1696 1696
1697static char *sbp_get_fabric_name(void)
1698{
1699 return "sbp";
1700}
1701
1702static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1697static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1703{ 1698{
1704 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1699 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
@@ -2323,8 +2318,7 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2323 2318
2324static const struct target_core_fabric_ops sbp_ops = { 2319static const struct target_core_fabric_ops sbp_ops = {
2325 .module = THIS_MODULE, 2320 .module = THIS_MODULE,
2326 .name = "sbp", 2321 .fabric_name = "sbp",
2327 .get_fabric_name = sbp_get_fabric_name,
2328 .tpg_get_wwn = sbp_get_fabric_wwn, 2322 .tpg_get_wwn = sbp_get_fabric_wwn,
2329 .tpg_get_tag = sbp_get_tag, 2323 .tpg_get_tag = sbp_get_tag,
2330 .tpg_check_demo_mode = sbp_check_true, 2324 .tpg_check_demo_mode = sbp_check_true,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4f134b0c3e29..6b0d9beacf90 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -451,7 +451,7 @@ static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
451 pr_debug("[%s]: ALUA TG Port not available, " 451 pr_debug("[%s]: ALUA TG Port not available, "
452 "SenseKey: NOT_READY, ASC/ASCQ: " 452 "SenseKey: NOT_READY, ASC/ASCQ: "
453 "0x04/0x%02x\n", 453 "0x04/0x%02x\n",
454 cmd->se_tfo->get_fabric_name(), alua_ascq); 454 cmd->se_tfo->fabric_name, alua_ascq);
455 455
456 cmd->scsi_asc = 0x04; 456 cmd->scsi_asc = 0x04;
457 cmd->scsi_ascq = alua_ascq; 457 cmd->scsi_ascq = alua_ascq;
@@ -1229,13 +1229,13 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1229 1229
1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) { 1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1231 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu", 1231 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1232 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), 1232 db_root, se_tpg->se_tpg_tfo->fabric_name,
1233 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1233 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1234 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), 1234 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1235 lun->unpacked_lun); 1235 lun->unpacked_lun);
1236 } else { 1236 } else {
1237 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu", 1237 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1238 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), 1238 db_root, se_tpg->se_tpg_tfo->fabric_name,
1239 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1239 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1240 lun->unpacked_lun); 1240 lun->unpacked_lun);
1241 } 1241 }
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f6b1549f4142..72016d0dfca5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -172,7 +172,10 @@ static struct target_fabric_configfs *target_core_get_fabric(
172 172
173 mutex_lock(&g_tf_lock); 173 mutex_lock(&g_tf_lock);
174 list_for_each_entry(tf, &g_tf_list, tf_list) { 174 list_for_each_entry(tf, &g_tf_list, tf_list) {
175 if (!strcmp(tf->tf_ops->name, name)) { 175 const char *cmp_name = tf->tf_ops->fabric_alias;
176 if (!cmp_name)
177 cmp_name = tf->tf_ops->fabric_name;
178 if (!strcmp(cmp_name, name)) {
176 atomic_inc(&tf->tf_access_cnt); 179 atomic_inc(&tf->tf_access_cnt);
177 mutex_unlock(&g_tf_lock); 180 mutex_unlock(&g_tf_lock);
178 return tf; 181 return tf;
@@ -249,7 +252,7 @@ static struct config_group *target_core_register_fabric(
249 return ERR_PTR(-EINVAL); 252 return ERR_PTR(-EINVAL);
250 } 253 }
251 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 254 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
252 " %s\n", tf->tf_ops->name); 255 " %s\n", tf->tf_ops->fabric_name);
253 /* 256 /*
254 * On a successful target_core_get_fabric() look, the returned 257 * On a successful target_core_get_fabric() look, the returned
255 * struct target_fabric_configfs *tf will contain a usage reference. 258 * struct target_fabric_configfs *tf will contain a usage reference.
@@ -282,7 +285,7 @@ static void target_core_deregister_fabric(
282 " tf list\n", config_item_name(item)); 285 " tf list\n", config_item_name(item));
283 286
284 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 287 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
285 " %s\n", tf->tf_ops->name); 288 " %s\n", tf->tf_ops->fabric_name);
286 atomic_dec(&tf->tf_access_cnt); 289 atomic_dec(&tf->tf_access_cnt);
287 290
288 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 291 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
@@ -342,17 +345,20 @@ EXPORT_SYMBOL(target_undepend_item);
342 345
343static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) 346static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
344{ 347{
345 if (!tfo->name) { 348 if (tfo->fabric_alias) {
346 pr_err("Missing tfo->name\n"); 349 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
347 return -EINVAL; 350 pr_err("Passed alias: %s exceeds "
351 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
352 return -EINVAL;
353 }
348 } 354 }
349 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { 355 if (!tfo->fabric_name) {
350 pr_err("Passed name: %s exceeds TARGET_FABRIC" 356 pr_err("Missing tfo->fabric_name\n");
351 "_NAME_SIZE\n", tfo->name);
352 return -EINVAL; 357 return -EINVAL;
353 } 358 }
354 if (!tfo->get_fabric_name) { 359 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
355 pr_err("Missing tfo->get_fabric_name()\n"); 360 pr_err("Passed name: %s exceeds "
361 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
356 return -EINVAL; 362 return -EINVAL;
357 } 363 }
358 if (!tfo->tpg_get_wwn) { 364 if (!tfo->tpg_get_wwn) {
@@ -486,7 +492,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
486 492
487 mutex_lock(&g_tf_lock); 493 mutex_lock(&g_tf_lock);
488 list_for_each_entry(t, &g_tf_list, tf_list) { 494 list_for_each_entry(t, &g_tf_list, tf_list) {
489 if (!strcmp(t->tf_ops->name, fo->name)) { 495 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
490 BUG_ON(atomic_read(&t->tf_access_cnt)); 496 BUG_ON(atomic_read(&t->tf_access_cnt));
491 list_del(&t->tf_list); 497 list_del(&t->tf_list);
492 mutex_unlock(&g_tf_lock); 498 mutex_unlock(&g_tf_lock);
@@ -532,9 +538,9 @@ DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
532DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); 538DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
533DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); 539DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
534DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); 540DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
541DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
535DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); 542DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
536DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); 543DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
537DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format);
538DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); 544DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
539DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); 545DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
540DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); 546DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
@@ -592,6 +598,7 @@ static ssize_t _name##_store(struct config_item *item, const char *page, \
592DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); 598DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
593DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); 599DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
594DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); 600DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
601DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
595DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); 602DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
596DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); 603DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
597 604
@@ -613,12 +620,17 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev)
613 const char *configname; 620 const char *configname;
614 621
615 configname = config_item_name(&dev->dev_group.cg_item); 622 configname = config_item_name(&dev->dev_group.cg_item);
616 if (strlen(configname) >= 16) { 623 if (strlen(configname) >= INQUIRY_MODEL_LEN) {
617 pr_warn("dev[%p]: Backstore name '%s' is too long for " 624 pr_warn("dev[%p]: Backstore name '%s' is too long for "
618 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, 625 "INQUIRY_MODEL, truncating to 15 characters\n", dev,
619 configname); 626 configname);
620 } 627 }
621 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); 628 /*
629 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
630 * here without potentially breaking existing setups, so continue to
631 * truncate one byte shorter than what can be carried in INQUIRY.
632 */
633 strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
622} 634}
623 635
624static ssize_t emulate_model_alias_store(struct config_item *item, 636static ssize_t emulate_model_alias_store(struct config_item *item,
@@ -640,11 +652,12 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
640 if (ret < 0) 652 if (ret < 0)
641 return ret; 653 return ret;
642 654
655 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
643 if (flag) { 656 if (flag) {
644 dev_set_t10_wwn_model_alias(dev); 657 dev_set_t10_wwn_model_alias(dev);
645 } else { 658 } else {
646 strncpy(&dev->t10_wwn.model[0], 659 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
647 dev->transport->inquiry_prod, 16); 660 sizeof(dev->t10_wwn.model));
648 } 661 }
649 da->emulate_model_alias = flag; 662 da->emulate_model_alias = flag;
650 return count; 663 return count;
@@ -1116,9 +1129,10 @@ CONFIGFS_ATTR(, emulate_tpu);
1116CONFIGFS_ATTR(, emulate_tpws); 1129CONFIGFS_ATTR(, emulate_tpws);
1117CONFIGFS_ATTR(, emulate_caw); 1130CONFIGFS_ATTR(, emulate_caw);
1118CONFIGFS_ATTR(, emulate_3pc); 1131CONFIGFS_ATTR(, emulate_3pc);
1132CONFIGFS_ATTR(, emulate_pr);
1119CONFIGFS_ATTR(, pi_prot_type); 1133CONFIGFS_ATTR(, pi_prot_type);
1120CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1134CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1121CONFIGFS_ATTR(, pi_prot_format); 1135CONFIGFS_ATTR_WO(, pi_prot_format);
1122CONFIGFS_ATTR(, pi_prot_verify); 1136CONFIGFS_ATTR(, pi_prot_verify);
1123CONFIGFS_ATTR(, enforce_pr_isids); 1137CONFIGFS_ATTR(, enforce_pr_isids);
1124CONFIGFS_ATTR(, is_nonrot); 1138CONFIGFS_ATTR(, is_nonrot);
@@ -1156,6 +1170,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
1156 &attr_emulate_tpws, 1170 &attr_emulate_tpws,
1157 &attr_emulate_caw, 1171 &attr_emulate_caw,
1158 &attr_emulate_3pc, 1172 &attr_emulate_3pc,
1173 &attr_emulate_pr,
1159 &attr_pi_prot_type, 1174 &attr_pi_prot_type,
1160 &attr_hw_pi_prot_type, 1175 &attr_hw_pi_prot_type,
1161 &attr_pi_prot_format, 1176 &attr_pi_prot_format,
@@ -1211,6 +1226,74 @@ static struct t10_wwn *to_t10_wwn(struct config_item *item)
1211} 1226}
1212 1227
1213/* 1228/*
1229 * STANDARD and VPD page 0x83 T10 Vendor Identification
1230 */
1231static ssize_t target_wwn_vendor_id_show(struct config_item *item,
1232 char *page)
1233{
1234 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
1235}
1236
1237static ssize_t target_wwn_vendor_id_store(struct config_item *item,
1238 const char *page, size_t count)
1239{
1240 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1241 struct se_device *dev = t10_wwn->t10_dev;
1242 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1243 unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1244 char *stripped = NULL;
1245 size_t len;
1246 int i;
1247
1248 len = strlcpy(buf, page, sizeof(buf));
1249 if (len < sizeof(buf)) {
1250 /* Strip any newline added from userspace. */
1251 stripped = strstrip(buf);
1252 len = strlen(stripped);
1253 }
1254 if (len > INQUIRY_VENDOR_LEN) {
1255 pr_err("Emulated T10 Vendor Identification exceeds"
1256 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1257 "\n");
1258 return -EOVERFLOW;
1259 }
1260
1261 /*
1262 * SPC 4.3.1:
1263 * ASCII data fields shall contain only ASCII printable characters (i.e.,
1264 * code values 20h to 7Eh) and may be terminated with one or more ASCII
1265 * null (00h) characters.
1266 */
1267 for (i = 0; i < len; i++) {
1268 if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
1269 pr_err("Emulated T10 Vendor Identification contains"
1270 " non-ASCII-printable characters\n");
1271 return -EINVAL;
1272 }
1273 }
1274
1275 /*
1276 * Check to see if any active exports exist. If they do exist, fail
1277 * here as changing this information on the fly (underneath the
1278 * initiator side OS dependent multipath code) could cause negative
1279 * effects.
1280 */
1281 if (dev->export_count) {
1282 pr_err("Unable to set T10 Vendor Identification while"
1283 " active %d exports exist\n", dev->export_count);
1284 return -EINVAL;
1285 }
1286
1287 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
1288 strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
1289
1290 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1291 " %s\n", dev->t10_wwn.vendor);
1292
1293 return count;
1294}
1295
1296/*
1214 * VPD page 0x80 Unit serial 1297 * VPD page 0x80 Unit serial
1215 */ 1298 */
1216static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, 1299static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
@@ -1356,6 +1439,7 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1356/* VPD page 0x83 Association: SCSI Target Device */ 1439/* VPD page 0x83 Association: SCSI Target Device */
1357DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); 1440DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1358 1441
1442CONFIGFS_ATTR(target_wwn_, vendor_id);
1359CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); 1443CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1360CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); 1444CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1361CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); 1445CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1363,6 +1447,7 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1363CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); 1447CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1364 1448
1365static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1449static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1450 &target_wwn_attr_vendor_id,
1366 &target_wwn_attr_vpd_unit_serial, 1451 &target_wwn_attr_vpd_unit_serial,
1367 &target_wwn_attr_vpd_protocol_identifier, 1452 &target_wwn_attr_vpd_protocol_identifier,
1368 &target_wwn_attr_vpd_assoc_logical_unit, 1453 &target_wwn_attr_vpd_assoc_logical_unit,
@@ -1400,7 +1485,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1400 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1485 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1401 1486
1402 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", 1487 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1403 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1488 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1404 se_nacl->initiatorname, i_buf); 1489 se_nacl->initiatorname, i_buf);
1405} 1490}
1406 1491
@@ -1414,7 +1499,7 @@ static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1414 if (se_nacl) { 1499 if (se_nacl) {
1415 len = sprintf(page, 1500 len = sprintf(page,
1416 "SPC-2 Reservation: %s Initiator: %s\n", 1501 "SPC-2 Reservation: %s Initiator: %s\n",
1417 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1502 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1418 se_nacl->initiatorname); 1503 se_nacl->initiatorname);
1419 } else { 1504 } else {
1420 len = sprintf(page, "No SPC-2 Reservation holder\n"); 1505 len = sprintf(page, "No SPC-2 Reservation holder\n");
@@ -1427,6 +1512,9 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1427 struct se_device *dev = pr_to_dev(item); 1512 struct se_device *dev = pr_to_dev(item);
1428 int ret; 1513 int ret;
1429 1514
1515 if (!dev->dev_attrib.emulate_pr)
1516 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1517
1430 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1518 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1431 return sprintf(page, "Passthrough\n"); 1519 return sprintf(page, "Passthrough\n");
1432 1520
@@ -1489,13 +1577,13 @@ static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1489 tfo = se_tpg->se_tpg_tfo; 1577 tfo = se_tpg->se_tpg_tfo;
1490 1578
1491 len += sprintf(page+len, "SPC-3 Reservation: %s" 1579 len += sprintf(page+len, "SPC-3 Reservation: %s"
1492 " Target Node Endpoint: %s\n", tfo->get_fabric_name(), 1580 " Target Node Endpoint: %s\n", tfo->fabric_name,
1493 tfo->tpg_get_wwn(se_tpg)); 1581 tfo->tpg_get_wwn(se_tpg));
1494 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1582 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1495 " Identifier Tag: %hu %s Portal Group Tag: %hu" 1583 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1496 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, 1584 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1497 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), 1585 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1498 tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun); 1586 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1499 1587
1500out_unlock: 1588out_unlock:
1501 spin_unlock(&dev->dev_reservation_lock); 1589 spin_unlock(&dev->dev_reservation_lock);
@@ -1526,7 +1614,7 @@ static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1526 core_pr_dump_initiator_port(pr_reg, i_buf, 1614 core_pr_dump_initiator_port(pr_reg, i_buf,
1527 PR_REG_ISID_ID_LEN); 1615 PR_REG_ISID_ID_LEN);
1528 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", 1616 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1529 tfo->get_fabric_name(), 1617 tfo->fabric_name,
1530 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, 1618 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1531 pr_reg->pr_res_generation); 1619 pr_reg->pr_res_generation);
1532 1620
@@ -1567,12 +1655,14 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1567{ 1655{
1568 struct se_device *dev = pr_to_dev(item); 1656 struct se_device *dev = pr_to_dev(item);
1569 1657
1658 if (!dev->dev_attrib.emulate_pr)
1659 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1570 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1660 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1571 return sprintf(page, "SPC_PASSTHROUGH\n"); 1661 return sprintf(page, "SPC_PASSTHROUGH\n");
1572 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1662 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1573 return sprintf(page, "SPC2_RESERVATIONS\n"); 1663 return sprintf(page, "SPC2_RESERVATIONS\n");
1574 else 1664
1575 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1665 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1576} 1666}
1577 1667
1578static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, 1668static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
@@ -1580,7 +1670,8 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1580{ 1670{
1581 struct se_device *dev = pr_to_dev(item); 1671 struct se_device *dev = pr_to_dev(item);
1582 1672
1583 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1673 if (!dev->dev_attrib.emulate_pr ||
1674 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1584 return 0; 1675 return 0;
1585 1676
1586 return sprintf(page, "APTPL Bit Status: %s\n", 1677 return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1592,7 +1683,8 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1592{ 1683{
1593 struct se_device *dev = pr_to_dev(item); 1684 struct se_device *dev = pr_to_dev(item);
1594 1685
1595 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1686 if (!dev->dev_attrib.emulate_pr ||
1687 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1596 return 0; 1688 return 0;
1597 1689
1598 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1690 return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1638,7 +1730,8 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1638 u16 tpgt = 0; 1730 u16 tpgt = 0;
1639 u8 type = 0; 1731 u8 type = 0;
1640 1732
1641 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1733 if (!dev->dev_attrib.emulate_pr ||
1734 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1642 return count; 1735 return count;
1643 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1736 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1644 return count; 1737 return count;
@@ -2746,7 +2839,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2746 struct se_portal_group *tpg = lun->lun_tpg; 2839 struct se_portal_group *tpg = lun->lun_tpg;
2747 2840
2748 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 2841 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2749 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), 2842 "/%s\n", tpg->se_tpg_tfo->fabric_name,
2750 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 2843 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2751 tpg->se_tpg_tfo->tpg_get_tag(tpg), 2844 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2752 config_item_name(&lun->lun_group.cg_item)); 2845 config_item_name(&lun->lun_group.cg_item));
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 47b5ef153135..93c56f4a9911 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -95,7 +95,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
95 deve->lun_access_ro) { 95 deve->lun_access_ro) {
96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
97 " Access for 0x%08llx\n", 97 " Access for 0x%08llx\n",
98 se_cmd->se_tfo->get_fabric_name(), 98 se_cmd->se_tfo->fabric_name,
99 unpacked_lun); 99 unpacked_lun);
100 rcu_read_unlock(); 100 rcu_read_unlock();
101 ret = TCM_WRITE_PROTECTED; 101 ret = TCM_WRITE_PROTECTED;
@@ -114,7 +114,7 @@ out_unlock:
114 if (unpacked_lun != 0) { 114 if (unpacked_lun != 0) {
115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
116 " Access for 0x%08llx\n", 116 " Access for 0x%08llx\n",
117 se_cmd->se_tfo->get_fabric_name(), 117 se_cmd->se_tfo->fabric_name,
118 unpacked_lun); 118 unpacked_lun);
119 return TCM_NON_EXISTENT_LUN; 119 return TCM_NON_EXISTENT_LUN;
120 } 120 }
@@ -188,7 +188,7 @@ out_unlock:
188 if (!se_lun) { 188 if (!se_lun) {
189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
190 " Access for 0x%08llx\n", 190 " Access for 0x%08llx\n",
191 se_cmd->se_tfo->get_fabric_name(), 191 se_cmd->se_tfo->fabric_name,
192 unpacked_lun); 192 unpacked_lun);
193 return -ENODEV; 193 return -ENODEV;
194 } 194 }
@@ -237,7 +237,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
237 if (!lun) { 237 if (!lun) {
238 pr_err("%s device entries device pointer is" 238 pr_err("%s device entries device pointer is"
239 " NULL, but Initiator has access.\n", 239 " NULL, but Initiator has access.\n",
240 tpg->se_tpg_tfo->get_fabric_name()); 240 tpg->se_tpg_tfo->fabric_name);
241 continue; 241 continue;
242 } 242 }
243 if (lun->lun_rtpi != rtpi) 243 if (lun->lun_rtpi != rtpi)
@@ -571,9 +571,9 @@ int core_dev_add_lun(
571 return rc; 571 return rc;
572 572
573 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 573 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
574 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 574 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
575 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 575 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
576 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 576 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
577 /* 577 /*
578 * Update LUN maps for dynamically added initiators when 578 * Update LUN maps for dynamically added initiators when
579 * generate_node_acl is enabled. 579 * generate_node_acl is enabled.
@@ -604,9 +604,9 @@ void core_dev_del_lun(
604 struct se_lun *lun) 604 struct se_lun *lun)
605{ 605{
606 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 606 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
607 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 607 " device object\n", tpg->se_tpg_tfo->fabric_name,
608 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 608 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
609 tpg->se_tpg_tfo->get_fabric_name()); 609 tpg->se_tpg_tfo->fabric_name);
610 610
611 core_tpg_remove_lun(tpg, lun); 611 core_tpg_remove_lun(tpg, lun);
612} 612}
@@ -621,7 +621,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
621 621
622 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 622 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
623 pr_err("%s InitiatorName exceeds maximum size.\n", 623 pr_err("%s InitiatorName exceeds maximum size.\n",
624 tpg->se_tpg_tfo->get_fabric_name()); 624 tpg->se_tpg_tfo->fabric_name);
625 *ret = -EOVERFLOW; 625 *ret = -EOVERFLOW;
626 return NULL; 626 return NULL;
627 } 627 }
@@ -664,7 +664,7 @@ int core_dev_add_initiator_node_lun_acl(
664 return -EINVAL; 664 return -EINVAL;
665 665
666 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 666 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
667 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 667 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
668 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 668 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
669 lun_access_ro ? "RO" : "RW", 669 lun_access_ro ? "RO" : "RW",
670 nacl->initiatorname); 670 nacl->initiatorname);
@@ -697,7 +697,7 @@ int core_dev_del_initiator_node_lun_acl(
697 697
698 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 698 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
699 " InitiatorNode: %s Mapped LUN: %llu\n", 699 " InitiatorNode: %s Mapped LUN: %llu\n",
700 tpg->se_tpg_tfo->get_fabric_name(), 700 tpg->se_tpg_tfo->fabric_name,
701 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 701 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
702 nacl->initiatorname, lacl->mapped_lun); 702 nacl->initiatorname, lacl->mapped_lun);
703 703
@@ -709,9 +709,9 @@ void core_dev_free_initiator_node_lun_acl(
709 struct se_lun_acl *lacl) 709 struct se_lun_acl *lacl)
710{ 710{
711 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 711 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
712 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 712 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
713 tpg->se_tpg_tfo->tpg_get_tag(tpg), 713 tpg->se_tpg_tfo->tpg_get_tag(tpg),
714 tpg->se_tpg_tfo->get_fabric_name(), 714 tpg->se_tpg_tfo->fabric_name,
715 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 715 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
716 716
717 kfree(lacl); 717 kfree(lacl);
@@ -720,36 +720,17 @@ void core_dev_free_initiator_node_lun_acl(
720static void scsi_dump_inquiry(struct se_device *dev) 720static void scsi_dump_inquiry(struct se_device *dev)
721{ 721{
722 struct t10_wwn *wwn = &dev->t10_wwn; 722 struct t10_wwn *wwn = &dev->t10_wwn;
723 char buf[17]; 723 int device_type = dev->transport->get_device_type(dev);
724 int i, device_type; 724
725 /* 725 /*
726 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 726 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
727 */ 727 */
728 for (i = 0; i < 8; i++) 728 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
729 if (wwn->vendor[i] >= 0x20) 729 wwn->vendor);
730 buf[i] = wwn->vendor[i]; 730 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
731 else 731 wwn->model);
732 buf[i] = ' '; 732 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
733 buf[i] = '\0'; 733 wwn->revision);
734 pr_debug(" Vendor: %s\n", buf);
735
736 for (i = 0; i < 16; i++)
737 if (wwn->model[i] >= 0x20)
738 buf[i] = wwn->model[i];
739 else
740 buf[i] = ' ';
741 buf[i] = '\0';
742 pr_debug(" Model: %s\n", buf);
743
744 for (i = 0; i < 4; i++)
745 if (wwn->revision[i] >= 0x20)
746 buf[i] = wwn->revision[i];
747 else
748 buf[i] = ' ';
749 buf[i] = '\0';
750 pr_debug(" Revision: %s\n", buf);
751
752 device_type = dev->transport->get_device_type(dev);
753 pr_debug(" Type: %s ", scsi_device_type(device_type)); 734 pr_debug(" Type: %s ", scsi_device_type(device_type));
754} 735}
755 736
@@ -805,6 +786,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
805 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 786 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
806 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 787 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
807 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 788 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
789 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
808 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 790 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
809 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 791 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
810 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 792 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
@@ -822,13 +804,19 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
822 804
823 xcopy_lun = &dev->xcopy_lun; 805 xcopy_lun = &dev->xcopy_lun;
824 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 806 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
825 init_completion(&xcopy_lun->lun_ref_comp);
826 init_completion(&xcopy_lun->lun_shutdown_comp); 807 init_completion(&xcopy_lun->lun_shutdown_comp);
827 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 808 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
828 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 809 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
829 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 810 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
830 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 811 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
831 812
813 /* Preload the default INQUIRY const values */
814 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
815 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
816 sizeof(dev->t10_wwn.model));
817 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
818 sizeof(dev->t10_wwn.revision));
819
832 return dev; 820 return dev;
833} 821}
834 822
@@ -987,35 +975,10 @@ int target_configure_device(struct se_device *dev)
987 goto out_destroy_device; 975 goto out_destroy_device;
988 976
989 /* 977 /*
990 * Startup the struct se_device processing thread
991 */
992 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
993 dev->transport->name);
994 if (!dev->tmr_wq) {
995 pr_err("Unable to create tmr workqueue for %s\n",
996 dev->transport->name);
997 ret = -ENOMEM;
998 goto out_free_alua;
999 }
1000
1001 /*
1002 * Setup work_queue for QUEUE_FULL 978 * Setup work_queue for QUEUE_FULL
1003 */ 979 */
1004 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 980 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1005 981
1006 /*
1007 * Preload the initial INQUIRY const values if we are doing
1008 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1009 * passthrough because this is being provided by the backend LLD.
1010 */
1011 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1012 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1013 strncpy(&dev->t10_wwn.model[0],
1014 dev->transport->inquiry_prod, 16);
1015 strncpy(&dev->t10_wwn.revision[0],
1016 dev->transport->inquiry_rev, 4);
1017 }
1018
1019 scsi_dump_inquiry(dev); 982 scsi_dump_inquiry(dev);
1020 983
1021 spin_lock(&hba->device_lock); 984 spin_lock(&hba->device_lock);
@@ -1026,8 +989,6 @@ int target_configure_device(struct se_device *dev)
1026 989
1027 return 0; 990 return 0;
1028 991
1029out_free_alua:
1030 core_alua_free_lu_gp_mem(dev);
1031out_destroy_device: 992out_destroy_device:
1032 dev->transport->destroy_device(dev); 993 dev->transport->destroy_device(dev);
1033out_free_index: 994out_free_index:
@@ -1046,8 +1007,6 @@ void target_free_device(struct se_device *dev)
1046 WARN_ON(!list_empty(&dev->dev_sep_list)); 1007 WARN_ON(!list_empty(&dev->dev_sep_list));
1047 1008
1048 if (target_dev_configured(dev)) { 1009 if (target_dev_configured(dev)) {
1049 destroy_workqueue(dev->tmr_wq);
1050
1051 dev->transport->destroy_device(dev); 1010 dev->transport->destroy_device(dev);
1052 1011
1053 mutex_lock(&device_mutex); 1012 mutex_lock(&device_mutex);
@@ -1159,6 +1118,18 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1159 } 1118 }
1160 1119
1161 /* 1120 /*
1121 * With emulate_pr disabled, all reservation requests should fail,
1122 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1123 */
1124 if (!dev->dev_attrib.emulate_pr &&
1125 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1126 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1127 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1128 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1129 return TCM_UNSUPPORTED_SCSI_OPCODE;
1130 }
1131
1132 /*
1162 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1133 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1163 * emulate the response, since tcmu does not have the information 1134 * emulate the response, since tcmu does not have the information
1164 * required to process these commands. 1135 * required to process these commands.
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index aa2f4f632ebe..9a6e20a2af7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
203 203
204 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 204 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
205 " Mapped LUN: %llu Write Protect bit to %s\n", 205 " Mapped LUN: %llu Write Protect bit to %s\n",
206 se_tpg->se_tpg_tfo->get_fabric_name(), 206 se_tpg->se_tpg_tfo->fabric_name,
207 se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF"); 207 se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
208 208
209 return count; 209 return count;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0c6635587930..853344415963 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,6 @@ int init_se_kmem_caches(void);
138void release_se_kmem_caches(void); 138void release_se_kmem_caches(void);
139u32 scsi_get_new_index(scsi_index_t); 139u32 scsi_get_new_index(scsi_index_t);
140void transport_subsystem_check_init(void); 140void transport_subsystem_check_init(void);
141int transport_cmd_finish_abort(struct se_cmd *);
142unsigned char *transport_dump_cmd_direction(struct se_cmd *); 141unsigned char *transport_dump_cmd_direction(struct se_cmd *);
143void transport_dump_dev_state(struct se_device *, char *, int *); 142void transport_dump_dev_state(struct se_device *, char *, int *);
144void transport_dump_dev_info(struct se_device *, struct se_lun *, 143void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -148,7 +147,6 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
148int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 147int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
149int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 148int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
150void transport_clear_lun_ref(struct se_lun *); 149void transport_clear_lun_ref(struct se_lun *);
151void transport_send_task_abort(struct se_cmd *);
152sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 150sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
153void target_qf_do_work(struct work_struct *work); 151void target_qf_do_work(struct work_struct *work);
154bool target_check_wce(struct se_device *dev); 152bool target_check_wce(struct se_device *dev);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 10db5656fd5d..397f38cb7f4e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -235,7 +235,7 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
235 tpg = sess->se_tpg; 235 tpg = sess->se_tpg;
236 pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->" 236 pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
237 " MAPPED LUN: %llu for %s\n", 237 " MAPPED LUN: %llu for %s\n",
238 tpg->se_tpg_tfo->get_fabric_name(), 238 tpg->se_tpg_tfo->fabric_name,
239 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, 239 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
240 sess->se_node_acl->initiatorname); 240 sess->se_node_acl->initiatorname);
241 241
@@ -278,7 +278,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
278 if (dev->dev_reserved_node_acl && 278 if (dev->dev_reserved_node_acl &&
279 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 279 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
280 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 280 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
281 tpg->se_tpg_tfo->get_fabric_name()); 281 tpg->se_tpg_tfo->fabric_name);
282 pr_err("Original reserver LUN: %llu %s\n", 282 pr_err("Original reserver LUN: %llu %s\n",
283 cmd->se_lun->unpacked_lun, 283 cmd->se_lun->unpacked_lun,
284 dev->dev_reserved_node_acl->initiatorname); 284 dev->dev_reserved_node_acl->initiatorname);
@@ -297,7 +297,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
297 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; 297 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
298 } 298 }
299 pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu" 299 pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
300 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 300 " for %s\n", tpg->se_tpg_tfo->fabric_name,
301 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, 301 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
302 sess->se_node_acl->initiatorname); 302 sess->se_node_acl->initiatorname);
303 303
@@ -914,11 +914,11 @@ static void core_scsi3_aptpl_reserve(
914 914
915 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" 915 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
916 " new reservation holder TYPE: %s ALL_TG_PT: %d\n", 916 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
917 tpg->se_tpg_tfo->get_fabric_name(), 917 tpg->se_tpg_tfo->fabric_name,
918 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 918 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
919 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 919 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
920 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 920 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
921 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, 921 tpg->se_tpg_tfo->fabric_name, node_acl->initiatorname,
922 i_buf); 922 i_buf);
923} 923}
924 924
@@ -1036,19 +1036,19 @@ static void __core_scsi3_dump_registration(
1036 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1036 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1037 1037
1038 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1038 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1039 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ? 1039 " Node: %s%s\n", tfo->fabric_name, (register_type == REGISTER_AND_MOVE) ?
1040 "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? 1040 "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
1041 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, 1041 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1042 i_buf); 1042 i_buf);
1043 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1043 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1044 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), 1044 tfo->fabric_name, tfo->tpg_get_wwn(se_tpg),
1045 tfo->tpg_get_tag(se_tpg)); 1045 tfo->tpg_get_tag(se_tpg));
1046 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1046 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1047 " Port(s)\n", tfo->get_fabric_name(), 1047 " Port(s)\n", tfo->fabric_name,
1048 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1048 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1049 dev->transport->name); 1049 dev->transport->name);
1050 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1050 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1051 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), 1051 " 0x%08x APTPL: %d\n", tfo->fabric_name,
1052 pr_reg->pr_res_key, pr_reg->pr_res_generation, 1052 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1053 pr_reg->pr_reg_aptpl); 1053 pr_reg->pr_reg_aptpl);
1054} 1054}
@@ -1329,7 +1329,7 @@ static void __core_scsi3_free_registration(
1329 */ 1329 */
1330 while (atomic_read(&pr_reg->pr_res_holders) != 0) { 1330 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1331 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", 1331 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
1332 tfo->get_fabric_name()); 1332 tfo->fabric_name);
1333 cpu_relax(); 1333 cpu_relax();
1334 } 1334 }
1335 1335
@@ -1341,15 +1341,15 @@ static void __core_scsi3_free_registration(
1341 1341
1342 spin_lock(&pr_tmpl->registration_lock); 1342 spin_lock(&pr_tmpl->registration_lock);
1343 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1343 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1344 " Node: %s%s\n", tfo->get_fabric_name(), 1344 " Node: %s%s\n", tfo->fabric_name,
1345 pr_reg->pr_reg_nacl->initiatorname, 1345 pr_reg->pr_reg_nacl->initiatorname,
1346 i_buf); 1346 i_buf);
1347 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1347 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1348 " Port(s)\n", tfo->get_fabric_name(), 1348 " Port(s)\n", tfo->fabric_name,
1349 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1349 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1350 dev->transport->name); 1350 dev->transport->name);
1351 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1351 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1352 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, 1352 " 0x%08x\n", tfo->fabric_name, pr_reg->pr_res_key,
1353 pr_reg->pr_res_generation); 1353 pr_reg->pr_res_generation);
1354 1354
1355 if (!preempt_and_abort_list) { 1355 if (!preempt_and_abort_list) {
@@ -1645,7 +1645,7 @@ core_scsi3_decode_spec_i_port(
1645 dest_tpg = tmp_tpg; 1645 dest_tpg = tmp_tpg;
1646 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" 1646 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
1647 " %s Port RTPI: %hu\n", 1647 " %s Port RTPI: %hu\n",
1648 dest_tpg->se_tpg_tfo->get_fabric_name(), 1648 dest_tpg->se_tpg_tfo->fabric_name,
1649 dest_node_acl->initiatorname, dest_rtpi); 1649 dest_node_acl->initiatorname, dest_rtpi);
1650 1650
1651 spin_lock(&dev->se_port_lock); 1651 spin_lock(&dev->se_port_lock);
@@ -1662,7 +1662,7 @@ core_scsi3_decode_spec_i_port(
1662 1662
1663 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1663 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1664 " tid_len: %d for %s + %s\n", 1664 " tid_len: %d for %s + %s\n",
1665 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1665 dest_tpg->se_tpg_tfo->fabric_name, cmd->data_length,
1666 tpdl, tid_len, i_str, iport_ptr); 1666 tpdl, tid_len, i_str, iport_ptr);
1667 1667
1668 if (tid_len > tpdl) { 1668 if (tid_len > tpdl) {
@@ -1683,7 +1683,7 @@ core_scsi3_decode_spec_i_port(
1683 if (!dest_se_deve) { 1683 if (!dest_se_deve) {
1684 pr_err("Unable to locate %s dest_se_deve" 1684 pr_err("Unable to locate %s dest_se_deve"
1685 " from destination RTPI: %hu\n", 1685 " from destination RTPI: %hu\n",
1686 dest_tpg->se_tpg_tfo->get_fabric_name(), 1686 dest_tpg->se_tpg_tfo->fabric_name,
1687 dest_rtpi); 1687 dest_rtpi);
1688 1688
1689 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1689 core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1704,7 +1704,7 @@ core_scsi3_decode_spec_i_port(
1704 1704
1705 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1705 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1706 " dest_se_deve mapped_lun: %llu\n", 1706 " dest_se_deve mapped_lun: %llu\n",
1707 dest_tpg->se_tpg_tfo->get_fabric_name(), 1707 dest_tpg->se_tpg_tfo->fabric_name,
1708 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); 1708 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1709 1709
1710 /* 1710 /*
@@ -1815,7 +1815,7 @@ core_scsi3_decode_spec_i_port(
1815 1815
1816 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" 1816 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
1817 " registered Transport ID for Node: %s%s Mapped LUN:" 1817 " registered Transport ID for Node: %s%s Mapped LUN:"
1818 " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(), 1818 " %llu\n", dest_tpg->se_tpg_tfo->fabric_name,
1819 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? 1819 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
1820 dest_se_deve->mapped_lun : 0); 1820 dest_se_deve->mapped_lun : 0);
1821 1821
@@ -1913,7 +1913,7 @@ static int core_scsi3_update_aptpl_buf(
1913 "res_holder=1\nres_type=%02x\n" 1913 "res_holder=1\nres_type=%02x\n"
1914 "res_scope=%02x\nres_all_tg_pt=%d\n" 1914 "res_scope=%02x\nres_all_tg_pt=%d\n"
1915 "mapped_lun=%llu\n", reg_count, 1915 "mapped_lun=%llu\n", reg_count,
1916 tpg->se_tpg_tfo->get_fabric_name(), 1916 tpg->se_tpg_tfo->fabric_name,
1917 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1917 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1918 pr_reg->pr_res_key, pr_reg->pr_res_type, 1918 pr_reg->pr_res_key, pr_reg->pr_res_type,
1919 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, 1919 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1923,7 +1923,7 @@ static int core_scsi3_update_aptpl_buf(
1923 "initiator_fabric=%s\ninitiator_node=%s\n%s" 1923 "initiator_fabric=%s\ninitiator_node=%s\n%s"
1924 "sa_res_key=%llu\nres_holder=0\n" 1924 "sa_res_key=%llu\nres_holder=0\n"
1925 "res_all_tg_pt=%d\nmapped_lun=%llu\n", 1925 "res_all_tg_pt=%d\nmapped_lun=%llu\n",
1926 reg_count, tpg->se_tpg_tfo->get_fabric_name(), 1926 reg_count, tpg->se_tpg_tfo->fabric_name,
1927 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1927 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1928 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, 1928 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
1929 pr_reg->pr_res_mapped_lun); 1929 pr_reg->pr_res_mapped_lun);
@@ -1942,7 +1942,7 @@ static int core_scsi3_update_aptpl_buf(
1942 */ 1942 */
1943 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" 1943 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
1944 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:" 1944 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
1945 " %d\n", tpg->se_tpg_tfo->get_fabric_name(), 1945 " %d\n", tpg->se_tpg_tfo->fabric_name,
1946 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1946 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1947 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1947 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1948 pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun, 1948 pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
@@ -2168,7 +2168,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2168 pr_reg->pr_res_key = sa_res_key; 2168 pr_reg->pr_res_key = sa_res_key;
2169 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2169 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2170 " Key for %s to: 0x%016Lx PRgeneration:" 2170 " Key for %s to: 0x%016Lx PRgeneration:"
2171 " 0x%08x\n", cmd->se_tfo->get_fabric_name(), 2171 " 0x%08x\n", cmd->se_tfo->fabric_name,
2172 (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "", 2172 (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
2173 pr_reg->pr_reg_nacl->initiatorname, 2173 pr_reg->pr_reg_nacl->initiatorname,
2174 pr_reg->pr_res_key, pr_reg->pr_res_generation); 2174 pr_reg->pr_res_key, pr_reg->pr_res_generation);
@@ -2356,9 +2356,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2356 pr_err("SPC-3 PR: Attempted RESERVE from" 2356 pr_err("SPC-3 PR: Attempted RESERVE from"
2357 " [%s]: %s while reservation already held by" 2357 " [%s]: %s while reservation already held by"
2358 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2358 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2359 cmd->se_tfo->get_fabric_name(), 2359 cmd->se_tfo->fabric_name,
2360 se_sess->se_node_acl->initiatorname, 2360 se_sess->se_node_acl->initiatorname,
2361 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2361 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2362 pr_res_holder->pr_reg_nacl->initiatorname); 2362 pr_res_holder->pr_reg_nacl->initiatorname);
2363 2363
2364 spin_unlock(&dev->dev_reservation_lock); 2364 spin_unlock(&dev->dev_reservation_lock);
@@ -2379,9 +2379,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2379 " [%s]: %s trying to change TYPE and/or SCOPE," 2379 " [%s]: %s trying to change TYPE and/or SCOPE,"
2380 " while reservation already held by [%s]: %s," 2380 " while reservation already held by [%s]: %s,"
2381 " returning RESERVATION_CONFLICT\n", 2381 " returning RESERVATION_CONFLICT\n",
2382 cmd->se_tfo->get_fabric_name(), 2382 cmd->se_tfo->fabric_name,
2383 se_sess->se_node_acl->initiatorname, 2383 se_sess->se_node_acl->initiatorname,
2384 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2384 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2385 pr_res_holder->pr_reg_nacl->initiatorname); 2385 pr_res_holder->pr_reg_nacl->initiatorname);
2386 2386
2387 spin_unlock(&dev->dev_reservation_lock); 2387 spin_unlock(&dev->dev_reservation_lock);
@@ -2414,10 +2414,10 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2414 2414
2415 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" 2415 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
2416 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2416 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2417 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), 2417 cmd->se_tfo->fabric_name, core_scsi3_pr_dump_type(type),
2418 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2418 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2419 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 2419 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
2420 cmd->se_tfo->get_fabric_name(), 2420 cmd->se_tfo->fabric_name,
2421 se_sess->se_node_acl->initiatorname, 2421 se_sess->se_node_acl->initiatorname,
2422 i_buf); 2422 i_buf);
2423 spin_unlock(&dev->dev_reservation_lock); 2423 spin_unlock(&dev->dev_reservation_lock);
@@ -2506,12 +2506,12 @@ out:
2506 if (!dev->dev_pr_res_holder) { 2506 if (!dev->dev_pr_res_holder) {
2507 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2507 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2508 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2508 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2509 tfo->get_fabric_name(), (explicit) ? "explicit" : 2509 tfo->fabric_name, (explicit) ? "explicit" :
2510 "implicit", core_scsi3_pr_dump_type(pr_res_type), 2510 "implicit", core_scsi3_pr_dump_type(pr_res_type),
2511 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2511 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2512 } 2512 }
2513 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2513 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2514 tfo->get_fabric_name(), se_nacl->initiatorname, 2514 tfo->fabric_name, se_nacl->initiatorname,
2515 i_buf); 2515 i_buf);
2516 /* 2516 /*
2517 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE 2517 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
@@ -2609,9 +2609,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
2609 " reservation from [%s]: %s with different TYPE " 2609 " reservation from [%s]: %s with different TYPE "
2610 "and/or SCOPE while reservation already held by" 2610 "and/or SCOPE while reservation already held by"
2611 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2611 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2612 cmd->se_tfo->get_fabric_name(), 2612 cmd->se_tfo->fabric_name,
2613 se_sess->se_node_acl->initiatorname, 2613 se_sess->se_node_acl->initiatorname,
2614 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2614 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2615 pr_res_holder->pr_reg_nacl->initiatorname); 2615 pr_res_holder->pr_reg_nacl->initiatorname);
2616 2616
2617 spin_unlock(&dev->dev_reservation_lock); 2617 spin_unlock(&dev->dev_reservation_lock);
@@ -2752,7 +2752,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
2752 spin_unlock(&pr_tmpl->registration_lock); 2752 spin_unlock(&pr_tmpl->registration_lock);
2753 2753
2754 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", 2754 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
2755 cmd->se_tfo->get_fabric_name()); 2755 cmd->se_tfo->fabric_name);
2756 2756
2757 core_scsi3_update_and_write_aptpl(cmd->se_dev, false); 2757 core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
2758 2758
@@ -2791,11 +2791,11 @@ static void __core_scsi3_complete_pro_preempt(
2791 2791
2792 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2792 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2793 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2793 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2794 tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", 2794 tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
2795 core_scsi3_pr_dump_type(type), 2795 core_scsi3_pr_dump_type(type),
2796 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2796 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2797 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2797 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2798 tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", 2798 tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
2799 nacl->initiatorname, i_buf); 2799 nacl->initiatorname, i_buf);
2800 /* 2800 /*
2801 * For PREEMPT_AND_ABORT, add the preempting reservation's 2801 * For PREEMPT_AND_ABORT, add the preempting reservation's
@@ -3282,7 +3282,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3282 " proto_ident: 0x%02x does not match ident: 0x%02x" 3282 " proto_ident: 0x%02x does not match ident: 0x%02x"
3283 " from fabric: %s\n", proto_ident, 3283 " from fabric: %s\n", proto_ident,
3284 dest_se_tpg->proto_id, 3284 dest_se_tpg->proto_id,
3285 dest_tf_ops->get_fabric_name()); 3285 dest_tf_ops->fabric_name);
3286 ret = TCM_INVALID_PARAMETER_LIST; 3286 ret = TCM_INVALID_PARAMETER_LIST;
3287 goto out; 3287 goto out;
3288 } 3288 }
@@ -3299,7 +3299,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3299 buf = NULL; 3299 buf = NULL;
3300 3300
3301 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3301 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3302 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? 3302 " %s\n", dest_tf_ops->fabric_name, (iport_ptr != NULL) ?
3303 "port" : "device", initiator_str, (iport_ptr != NULL) ? 3303 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3304 iport_ptr : ""); 3304 iport_ptr : "");
3305 /* 3305 /*
@@ -3344,7 +3344,7 @@ after_iport_check:
3344 3344
3345 if (!dest_node_acl) { 3345 if (!dest_node_acl) {
3346 pr_err("Unable to locate %s dest_node_acl for" 3346 pr_err("Unable to locate %s dest_node_acl for"
3347 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3347 " TransportID%s\n", dest_tf_ops->fabric_name,
3348 initiator_str); 3348 initiator_str);
3349 ret = TCM_INVALID_PARAMETER_LIST; 3349 ret = TCM_INVALID_PARAMETER_LIST;
3350 goto out; 3350 goto out;
@@ -3360,7 +3360,7 @@ after_iport_check:
3360 } 3360 }
3361 3361
3362 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3362 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3363 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3363 " %s from TransportID\n", dest_tf_ops->fabric_name,
3364 dest_node_acl->initiatorname); 3364 dest_node_acl->initiatorname);
3365 3365
3366 /* 3366 /*
@@ -3370,7 +3370,7 @@ after_iport_check:
3370 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); 3370 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3371 if (!dest_se_deve) { 3371 if (!dest_se_deve) {
3372 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3372 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3373 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3373 " %hu\n", dest_tf_ops->fabric_name, rtpi);
3374 ret = TCM_INVALID_PARAMETER_LIST; 3374 ret = TCM_INVALID_PARAMETER_LIST;
3375 goto out; 3375 goto out;
3376 } 3376 }
@@ -3385,7 +3385,7 @@ after_iport_check:
3385 3385
3386 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3386 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3387 " ACL for dest_se_deve->mapped_lun: %llu\n", 3387 " ACL for dest_se_deve->mapped_lun: %llu\n",
3388 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3388 dest_tf_ops->fabric_name, dest_node_acl->initiatorname,
3389 dest_se_deve->mapped_lun); 3389 dest_se_deve->mapped_lun);
3390 3390
3391 /* 3391 /*
@@ -3501,13 +3501,13 @@ after_iport_check:
3501 3501
3502 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3502 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3503 " created new reservation holder TYPE: %s on object RTPI:" 3503 " created new reservation holder TYPE: %s on object RTPI:"
3504 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), 3504 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->fabric_name,
3505 core_scsi3_pr_dump_type(type), rtpi, 3505 core_scsi3_pr_dump_type(type), rtpi,
3506 dest_pr_reg->pr_res_generation); 3506 dest_pr_reg->pr_res_generation);
3507 pr_debug("SPC-3 PR Successfully moved reservation from" 3507 pr_debug("SPC-3 PR Successfully moved reservation from"
3508 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", 3508 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3509 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, 3509 tf_ops->fabric_name, pr_reg_nacl->initiatorname,
3510 i_buf, dest_tf_ops->get_fabric_name(), 3510 i_buf, dest_tf_ops->fabric_name,
3511 dest_node_acl->initiatorname, (iport_ptr != NULL) ? 3511 dest_node_acl->initiatorname, (iport_ptr != NULL) ?
3512 iport_ptr : ""); 3512 iport_ptr : "");
3513 /* 3513 /*
@@ -4095,6 +4095,8 @@ target_check_reservation(struct se_cmd *cmd)
4095 return 0; 4095 return 0;
4096 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 4096 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
4097 return 0; 4097 return 0;
4098 if (!dev->dev_attrib.emulate_pr)
4099 return 0;
4098 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 4100 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
4099 return 0; 4101 return 0;
4100 4102
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c062d363dce3..b5388a106567 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -179,20 +179,20 @@ out_free:
179static void 179static void
180pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 180pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
181{ 181{
182 unsigned char *buf;
183
184 if (sdev->inquiry_len < INQUIRY_LEN) 182 if (sdev->inquiry_len < INQUIRY_LEN)
185 return; 183 return;
186
187 buf = sdev->inquiry;
188 if (!buf)
189 return;
190 /* 184 /*
191 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() 185 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
192 */ 186 */
193 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); 187 BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
194 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); 188 snprintf(wwn->vendor, sizeof(wwn->vendor),
195 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); 189 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
190 BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
191 snprintf(wwn->model, sizeof(wwn->model),
192 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
193 BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
194 snprintf(wwn->revision, sizeof(wwn->revision),
195 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
196} 196}
197 197
198static int 198static int
@@ -811,7 +811,6 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
811 struct scsi_device *sd = pdv->pdv_sd; 811 struct scsi_device *sd = pdv->pdv_sd;
812 unsigned char host_id[16]; 812 unsigned char host_id[16];
813 ssize_t bl; 813 ssize_t bl;
814 int i;
815 814
816 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 815 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
817 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 816 snprintf(host_id, 16, "%d", pdv->pdv_host_id);
@@ -824,29 +823,12 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
824 host_id); 823 host_id);
825 824
826 if (sd) { 825 if (sd) {
827 bl += sprintf(b + bl, " "); 826 bl += sprintf(b + bl, " Vendor: %."
828 bl += sprintf(b + bl, "Vendor: "); 827 __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
829 for (i = 0; i < 8; i++) { 828 bl += sprintf(b + bl, " Model: %."
830 if (ISPRINT(sd->vendor[i])) /* printable character? */ 829 __stringify(INQUIRY_MODEL_LEN) "s", sd->model);
831 bl += sprintf(b + bl, "%c", sd->vendor[i]); 830 bl += sprintf(b + bl, " Rev: %."
832 else 831 __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
833 bl += sprintf(b + bl, " ");
834 }
835 bl += sprintf(b + bl, " Model: ");
836 for (i = 0; i < 16; i++) {
837 if (ISPRINT(sd->model[i])) /* printable character ? */
838 bl += sprintf(b + bl, "%c", sd->model[i]);
839 else
840 bl += sprintf(b + bl, " ");
841 }
842 bl += sprintf(b + bl, " Rev: ");
843 for (i = 0; i < 4; i++) {
844 if (ISPRINT(sd->rev[i])) /* printable character ? */
845 bl += sprintf(b + bl, "%c", sd->rev[i]);
846 else
847 bl += sprintf(b + bl, " ");
848 }
849 bl += sprintf(b + bl, "\n");
850 } 832 }
851 return bl; 833 return bl;
852} 834}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index f459118bc11b..47094ae01c04 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,19 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
108 108
109 buf[7] = 0x2; /* CmdQue=1 */ 109 buf[7] = 0x2; /* CmdQue=1 */
110 110
111 memcpy(&buf[8], "LIO-ORG ", 8); 111 /*
112 memset(&buf[16], 0x20, 16); 112 * ASCII data fields described as being left-aligned shall have any
113 * unused bytes at the end of the field (i.e., highest offset) and the
114 * unused bytes shall be filled with ASCII space characters (20h).
115 */
116 memset(&buf[8], 0x20,
117 INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
118 memcpy(&buf[8], dev->t10_wwn.vendor,
119 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
113 memcpy(&buf[16], dev->t10_wwn.model, 120 memcpy(&buf[16], dev->t10_wwn.model,
114 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 121 strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
115 memcpy(&buf[32], dev->t10_wwn.revision, 122 memcpy(&buf[32], dev->t10_wwn.revision,
116 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 123 strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
117 buf[4] = 31; /* Set additional length to 31 */ 124 buf[4] = 31; /* Set additional length to 31 */
118 125
119 return 0; 126 return 0;
@@ -251,7 +258,10 @@ check_t10_vend_desc:
251 buf[off] = 0x2; /* ASCII */ 258 buf[off] = 0x2; /* ASCII */
252 buf[off+1] = 0x1; /* T10 Vendor ID */ 259 buf[off+1] = 0x1; /* T10 Vendor ID */
253 buf[off+2] = 0x0; 260 buf[off+2] = 0x0;
254 memcpy(&buf[off+4], "LIO-ORG", 8); 261 /* left align Vendor ID and pad with spaces */
262 memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
263 memcpy(&buf[off+4], dev->t10_wwn.vendor,
264 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
255 /* Extra Byte for NULL Terminator */ 265 /* Extra Byte for NULL Terminator */
256 id_len++; 266 id_len++;
257 /* Identifier Length */ 267 /* Identifier Length */
@@ -1281,6 +1291,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1281 struct se_device *dev = cmd->se_dev; 1291 struct se_device *dev = cmd->se_dev;
1282 unsigned char *cdb = cmd->t_task_cdb; 1292 unsigned char *cdb = cmd->t_task_cdb;
1283 1293
1294 if (!dev->dev_attrib.emulate_pr &&
1295 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1296 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1297 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1298 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1299 return TCM_UNSUPPORTED_SCSI_OPCODE;
1300 }
1301
1284 switch (cdb[0]) { 1302 switch (cdb[0]) {
1285 case MODE_SELECT: 1303 case MODE_SELECT:
1286 *size = cdb[4]; 1304 *size = cdb[4];
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index f0db91ebd735..8d9ceedfd455 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,43 +246,25 @@ static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
246static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page) 246static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
247{ 247{
248 struct se_device *dev = to_stat_lu_dev(item); 248 struct se_device *dev = to_stat_lu_dev(item);
249 int i;
250 char str[sizeof(dev->t10_wwn.vendor)+1];
251 249
252 /* scsiLuVendorId */ 250 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
253 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 251 "s\n", dev->t10_wwn.vendor);
254 str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
255 dev->t10_wwn.vendor[i] : ' ';
256 str[i] = '\0';
257 return snprintf(page, PAGE_SIZE, "%s\n", str);
258} 252}
259 253
260static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) 254static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
261{ 255{
262 struct se_device *dev = to_stat_lu_dev(item); 256 struct se_device *dev = to_stat_lu_dev(item);
263 int i;
264 char str[sizeof(dev->t10_wwn.model)+1];
265 257
266 /* scsiLuProductId */ 258 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
267 for (i = 0; i < sizeof(dev->t10_wwn.model); i++) 259 "s\n", dev->t10_wwn.model);
268 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
269 dev->t10_wwn.model[i] : ' ';
270 str[i] = '\0';
271 return snprintf(page, PAGE_SIZE, "%s\n", str);
272} 260}
273 261
274static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page) 262static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
275{ 263{
276 struct se_device *dev = to_stat_lu_dev(item); 264 struct se_device *dev = to_stat_lu_dev(item);
277 int i;
278 char str[sizeof(dev->t10_wwn.revision)+1];
279 265
280 /* scsiLuRevisionId */ 266 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
281 for (i = 0; i < sizeof(dev->t10_wwn.revision); i++) 267 "s\n", dev->t10_wwn.revision);
282 str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
283 dev->t10_wwn.revision[i] : ' ';
284 str[i] = '\0';
285 return snprintf(page, PAGE_SIZE, "%s\n", str);
286} 268}
287 269
288static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page) 270static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
@@ -612,7 +594,7 @@ static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
612 dev = rcu_dereference(lun->lun_se_dev); 594 dev = rcu_dereference(lun->lun_se_dev);
613 if (dev) 595 if (dev)
614 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 596 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
615 tpg->se_tpg_tfo->get_fabric_name(), 597 tpg->se_tpg_tfo->fabric_name,
616 lun->lun_rtpi); 598 lun->lun_rtpi);
617 rcu_read_unlock(); 599 rcu_read_unlock();
618 return ret; 600 return ret;
@@ -767,7 +749,7 @@ static ssize_t target_stat_transport_device_show(struct config_item *item,
767 if (dev) { 749 if (dev) {
768 /* scsiTransportType */ 750 /* scsiTransportType */
769 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 751 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
770 tpg->se_tpg_tfo->get_fabric_name()); 752 tpg->se_tpg_tfo->fabric_name);
771 } 753 }
772 rcu_read_unlock(); 754 rcu_read_unlock();
773 return ret; 755 return ret;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6d1179a7f043..ad0061e09d4c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -163,18 +163,23 @@ void core_tmr_abort_task(
163 continue; 163 continue;
164 164
165 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 165 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
166 se_cmd->se_tfo->get_fabric_name(), ref_tag); 166 se_cmd->se_tfo->fabric_name, ref_tag);
167 167
168 if (!__target_check_io_state(se_cmd, se_sess, 0)) 168 if (!__target_check_io_state(se_cmd, se_sess,
169 dev->dev_attrib.emulate_tas))
169 continue; 170 continue;
170 171
171 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 172 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
172 173
173 cancel_work_sync(&se_cmd->work); 174 /*
174 transport_wait_for_tasks(se_cmd); 175 * Ensure that this ABORT request is visible to the LU RESET
176 * code.
177 */
178 if (!tmr->tmr_dev)
179 WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd,
180 se_cmd->orig_fe_lun) < 0);
175 181
176 if (!transport_cmd_finish_abort(se_cmd)) 182 target_put_cmd_and_wait(se_cmd);
177 target_put_sess_cmd(se_cmd);
178 183
179 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 184 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
180 " ref_tag: %llu\n", ref_tag); 185 " ref_tag: %llu\n", ref_tag);
@@ -268,14 +273,28 @@ static void core_tmr_drain_tmr_list(
268 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 273 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
269 tmr_p->function, tmr_p->response, cmd->t_state); 274 tmr_p->function, tmr_p->response, cmd->t_state);
270 275
271 cancel_work_sync(&cmd->work); 276 target_put_cmd_and_wait(cmd);
272 transport_wait_for_tasks(cmd);
273
274 if (!transport_cmd_finish_abort(cmd))
275 target_put_sess_cmd(cmd);
276 } 277 }
277} 278}
278 279
280/**
281 * core_tmr_drain_state_list() - abort SCSI commands associated with a device
282 *
283 * @dev: Device for which to abort outstanding SCSI commands.
284 * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
285 * to realize the PREEMPT AND ABORT functionality.
286 * @tmr_sess: Session through which the LUN RESET has been received.
287 * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
288 * A quote from SPC-4, paragraph "7.5.10 Control mode page":
289 * "A task aborted status (TAS) bit set to zero specifies that
290 * aborted commands shall be terminated by the device server
291 * without any response to the application client. A TAS bit set
292 * to one specifies that commands aborted by the actions of an I_T
293 * nexus other than the I_T nexus on which the command was
294 * received shall be completed with TASK ABORTED status."
295 * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
296 * with registrations that will be preempted.
297 */
279static void core_tmr_drain_state_list( 298static void core_tmr_drain_state_list(
280 struct se_device *dev, 299 struct se_device *dev,
281 struct se_cmd *prout_cmd, 300 struct se_cmd *prout_cmd,
@@ -350,18 +369,7 @@ static void core_tmr_drain_state_list(
350 cmd->tag, (preempt_and_abort_list) ? "preempt" : "", 369 cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
351 cmd->pr_res_key); 370 cmd->pr_res_key);
352 371
353 /* 372 target_put_cmd_and_wait(cmd);
354 * If the command may be queued onto a workqueue cancel it now.
355 *
356 * This is equivalent to removal from the execute queue in the
357 * loop above, but we do it down here given that
358 * cancel_work_sync may block.
359 */
360 cancel_work_sync(&cmd->work);
361 transport_wait_for_tasks(cmd);
362
363 if (!transport_cmd_finish_abort(cmd))
364 target_put_sess_cmd(cmd);
365 } 373 }
366} 374}
367 375
@@ -398,7 +406,7 @@ int core_tmr_lun_reset(
398 if (tmr_nacl && tmr_tpg) { 406 if (tmr_nacl && tmr_tpg) {
399 pr_debug("LUN_RESET: TMR caller fabric: %s" 407 pr_debug("LUN_RESET: TMR caller fabric: %s"
400 " initiator port %s\n", 408 " initiator port %s\n",
401 tmr_tpg->se_tpg_tfo->get_fabric_name(), 409 tmr_tpg->se_tpg_tfo->fabric_name,
402 tmr_nacl->initiatorname); 410 tmr_nacl->initiatorname);
403 } 411 }
404 } 412 }
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 02e8a5d86658..e2ace1059437 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -151,7 +151,7 @@ void core_tpg_add_node_to_devs(
151 151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n", 153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(), 154 tpg->se_tpg_tfo->fabric_name,
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 lun_access_ro ? "READ-ONLY" : "READ-WRITE"); 156 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157 157
@@ -176,7 +176,7 @@ target_set_nacl_queue_depth(struct se_portal_group *tpg,
176 176
177 if (!acl->queue_depth) { 177 if (!acl->queue_depth) {
178 pr_warn("Queue depth for %s Initiator Node: %s is 0," 178 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 179 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
180 acl->initiatorname); 180 acl->initiatorname);
181 acl->queue_depth = 1; 181 acl->queue_depth = 1;
182 } 182 }
@@ -227,11 +227,11 @@ static void target_add_node_acl(struct se_node_acl *acl)
227 227
228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229 " Initiator Node: %s\n", 229 " Initiator Node: %s\n",
230 tpg->se_tpg_tfo->get_fabric_name(), 230 tpg->se_tpg_tfo->fabric_name,
231 tpg->se_tpg_tfo->tpg_get_tag(tpg), 231 tpg->se_tpg_tfo->tpg_get_tag(tpg),
232 acl->dynamic_node_acl ? "DYNAMIC" : "", 232 acl->dynamic_node_acl ? "DYNAMIC" : "",
233 acl->queue_depth, 233 acl->queue_depth,
234 tpg->se_tpg_tfo->get_fabric_name(), 234 tpg->se_tpg_tfo->fabric_name,
235 acl->initiatorname); 235 acl->initiatorname);
236} 236}
237 237
@@ -313,7 +313,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
313 if (acl->dynamic_node_acl) { 313 if (acl->dynamic_node_acl) {
314 acl->dynamic_node_acl = 0; 314 acl->dynamic_node_acl = 0;
315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 316 " for %s\n", tpg->se_tpg_tfo->fabric_name,
317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318 mutex_unlock(&tpg->acl_node_mutex); 318 mutex_unlock(&tpg->acl_node_mutex);
319 return acl; 319 return acl;
@@ -321,7 +321,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
321 321
322 pr_err("ACL entry for %s Initiator" 322 pr_err("ACL entry for %s Initiator"
323 " Node %s already exists for TPG %u, ignoring" 323 " Node %s already exists for TPG %u, ignoring"
324 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 324 " request.\n", tpg->se_tpg_tfo->fabric_name,
325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326 mutex_unlock(&tpg->acl_node_mutex); 326 mutex_unlock(&tpg->acl_node_mutex);
327 return ERR_PTR(-EEXIST); 327 return ERR_PTR(-EEXIST);
@@ -380,9 +380,9 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
380 core_free_device_list_for_node(acl, tpg); 380 core_free_device_list_for_node(acl, tpg);
381 381
382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
383 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 383 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
385 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 385 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
386 386
387 kfree(acl); 387 kfree(acl);
388} 388}
@@ -418,7 +418,7 @@ int core_tpg_set_initiator_node_queue_depth(
418 418
419 pr_debug("Successfully changed queue depth to: %d for Initiator" 419 pr_debug("Successfully changed queue depth to: %d for Initiator"
420 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, 420 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
421 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 421 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
422 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 422 tpg->se_tpg_tfo->tpg_get_tag(tpg));
423 423
424 return 0; 424 return 0;
@@ -512,7 +512,7 @@ int core_tpg_register(
512 spin_unlock_bh(&tpg_lock); 512 spin_unlock_bh(&tpg_lock);
513 513
514 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " 514 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
515 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), 515 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
516 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? 516 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
517 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, 517 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
518 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 518 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
@@ -528,7 +528,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
528 LIST_HEAD(node_list); 528 LIST_HEAD(node_list);
529 529
530 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " 530 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
531 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), 531 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
532 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, 532 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
533 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); 533 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
534 534
@@ -577,7 +577,6 @@ struct se_lun *core_tpg_alloc_lun(
577 } 577 }
578 lun->unpacked_lun = unpacked_lun; 578 lun->unpacked_lun = unpacked_lun;
579 atomic_set(&lun->lun_acl_count, 0); 579 atomic_set(&lun->lun_acl_count, 0);
580 init_completion(&lun->lun_ref_comp);
581 init_completion(&lun->lun_shutdown_comp); 580 init_completion(&lun->lun_shutdown_comp);
582 INIT_LIST_HEAD(&lun->lun_deve_list); 581 INIT_LIST_HEAD(&lun->lun_deve_list);
583 INIT_LIST_HEAD(&lun->lun_dev_link); 582 INIT_LIST_HEAD(&lun->lun_dev_link);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2cfd61d62e97..ef9e75b359d4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
224 sub_api_initialized = 1; 224 sub_api_initialized = 1;
225} 225}
226 226
227static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
228{
229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
230
231 wake_up(&sess->cmd_list_wq);
232}
233
227/** 234/**
228 * transport_init_session - initialize a session object 235 * transport_init_session - initialize a session object
229 * @se_sess: Session object pointer. 236 * @se_sess: Session object pointer.
230 * 237 *
231 * The caller must have zero-initialized @se_sess before calling this function. 238 * The caller must have zero-initialized @se_sess before calling this function.
232 */ 239 */
233void transport_init_session(struct se_session *se_sess) 240int transport_init_session(struct se_session *se_sess)
234{ 241{
235 INIT_LIST_HEAD(&se_sess->sess_list); 242 INIT_LIST_HEAD(&se_sess->sess_list);
236 INIT_LIST_HEAD(&se_sess->sess_acl_list); 243 INIT_LIST_HEAD(&se_sess->sess_acl_list);
237 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 244 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
238 spin_lock_init(&se_sess->sess_cmd_lock); 245 spin_lock_init(&se_sess->sess_cmd_lock);
239 init_waitqueue_head(&se_sess->cmd_list_wq); 246 init_waitqueue_head(&se_sess->cmd_list_wq);
247 return percpu_ref_init(&se_sess->cmd_count,
248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
240} 249}
241EXPORT_SYMBOL(transport_init_session); 250EXPORT_SYMBOL(transport_init_session);
242 251
@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
247struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 256struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
248{ 257{
249 struct se_session *se_sess; 258 struct se_session *se_sess;
259 int ret;
250 260
251 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 261 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
252 if (!se_sess) { 262 if (!se_sess) {
@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
254 " se_sess_cache\n"); 264 " se_sess_cache\n");
255 return ERR_PTR(-ENOMEM); 265 return ERR_PTR(-ENOMEM);
256 } 266 }
257 transport_init_session(se_sess); 267 ret = transport_init_session(se_sess);
268 if (ret < 0) {
269 kmem_cache_free(se_sess_cache, se_sess);
270 return ERR_PTR(ret);
271 }
258 se_sess->sup_prot_ops = sup_prot_ops; 272 se_sess->sup_prot_ops = sup_prot_ops;
259 273
260 return se_sess; 274 return se_sess;
@@ -273,14 +287,11 @@ int transport_alloc_session_tags(struct se_session *se_sess,
273{ 287{
274 int rc; 288 int rc;
275 289
276 se_sess->sess_cmd_map = kcalloc(tag_size, tag_num, 290 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
277 GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); 291 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
278 if (!se_sess->sess_cmd_map) { 292 if (!se_sess->sess_cmd_map) {
279 se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num)); 293 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
280 if (!se_sess->sess_cmd_map) { 294 return -ENOMEM;
281 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
282 return -ENOMEM;
283 }
284 } 295 }
285 296
286 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 297 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
@@ -397,7 +408,7 @@ void __transport_register_session(
397 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 408 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
398 409
399 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 410 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
400 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 411 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
401} 412}
402EXPORT_SYMBOL(__transport_register_session); 413EXPORT_SYMBOL(__transport_register_session);
403 414
@@ -581,6 +592,7 @@ void transport_free_session(struct se_session *se_sess)
581 sbitmap_queue_free(&se_sess->sess_tag_pool); 592 sbitmap_queue_free(&se_sess->sess_tag_pool);
582 kvfree(se_sess->sess_cmd_map); 593 kvfree(se_sess->sess_cmd_map);
583 } 594 }
595 percpu_ref_exit(&se_sess->cmd_count);
584 kmem_cache_free(se_sess_cache, se_sess); 596 kmem_cache_free(se_sess_cache, se_sess);
585} 597}
586EXPORT_SYMBOL(transport_free_session); 598EXPORT_SYMBOL(transport_free_session);
@@ -602,7 +614,7 @@ void transport_deregister_session(struct se_session *se_sess)
602 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 614 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
603 615
604 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 616 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
605 se_tpg->se_tpg_tfo->get_fabric_name()); 617 se_tpg->se_tpg_tfo->fabric_name);
606 /* 618 /*
607 * If last kref is dropping now for an explicit NodeACL, awake sleeping 619 * If last kref is dropping now for an explicit NodeACL, awake sleeping
608 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 620 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
@@ -695,32 +707,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
695 percpu_ref_put(&lun->lun_ref); 707 percpu_ref_put(&lun->lun_ref);
696} 708}
697 709
698int transport_cmd_finish_abort(struct se_cmd *cmd)
699{
700 bool send_tas = cmd->transport_state & CMD_T_TAS;
701 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
702 int ret = 0;
703
704 if (send_tas)
705 transport_send_task_abort(cmd);
706
707 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
708 transport_lun_remove_cmd(cmd);
709 /*
710 * Allow the fabric driver to unmap any resources before
711 * releasing the descriptor via TFO->release_cmd()
712 */
713 if (!send_tas)
714 cmd->se_tfo->aborted_task(cmd);
715
716 if (transport_cmd_check_stop_to_fabric(cmd))
717 return 1;
718 if (!send_tas && ack_kref)
719 ret = target_put_sess_cmd(cmd);
720
721 return ret;
722}
723
724static void target_complete_failure_work(struct work_struct *work) 710static void target_complete_failure_work(struct work_struct *work)
725{ 711{
726 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 712 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -770,12 +756,88 @@ void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
770} 756}
771EXPORT_SYMBOL(transport_copy_sense_to_cmd); 757EXPORT_SYMBOL(transport_copy_sense_to_cmd);
772 758
759static void target_handle_abort(struct se_cmd *cmd)
760{
761 bool tas = cmd->transport_state & CMD_T_TAS;
762 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
763 int ret;
764
765 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
766
767 if (tas) {
768 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
769 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
770 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
771 cmd->t_task_cdb[0], cmd->tag);
772 trace_target_cmd_complete(cmd);
773 ret = cmd->se_tfo->queue_status(cmd);
774 if (ret) {
775 transport_handle_queue_full(cmd, cmd->se_dev,
776 ret, false);
777 return;
778 }
779 } else {
780 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
781 cmd->se_tfo->queue_tm_rsp(cmd);
782 }
783 } else {
784 /*
785 * Allow the fabric driver to unmap any resources before
786 * releasing the descriptor via TFO->release_cmd().
787 */
788 cmd->se_tfo->aborted_task(cmd);
789 if (ack_kref)
790 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
791 /*
792 * To do: establish a unit attention condition on the I_T
793 * nexus associated with cmd. See also the paragraph "Aborting
794 * commands" in SAM.
795 */
796 }
797
798 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
799
800 transport_lun_remove_cmd(cmd);
801
802 transport_cmd_check_stop_to_fabric(cmd);
803}
804
805static void target_abort_work(struct work_struct *work)
806{
807 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
808
809 target_handle_abort(cmd);
810}
811
812static bool target_cmd_interrupted(struct se_cmd *cmd)
813{
814 int post_ret;
815
816 if (cmd->transport_state & CMD_T_ABORTED) {
817 if (cmd->transport_complete_callback)
818 cmd->transport_complete_callback(cmd, false, &post_ret);
819 INIT_WORK(&cmd->work, target_abort_work);
820 queue_work(target_completion_wq, &cmd->work);
821 return true;
822 } else if (cmd->transport_state & CMD_T_STOP) {
823 if (cmd->transport_complete_callback)
824 cmd->transport_complete_callback(cmd, false, &post_ret);
825 complete_all(&cmd->t_transport_stop_comp);
826 return true;
827 }
828
829 return false;
830}
831
832/* May be called from interrupt context so must not sleep. */
773void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 833void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
774{ 834{
775 struct se_device *dev = cmd->se_dev;
776 int success; 835 int success;
777 unsigned long flags; 836 unsigned long flags;
778 837
838 if (target_cmd_interrupted(cmd))
839 return;
840
779 cmd->scsi_status = scsi_status; 841 cmd->scsi_status = scsi_status;
780 842
781 spin_lock_irqsave(&cmd->t_state_lock, flags); 843 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -791,34 +853,12 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
791 break; 853 break;
792 } 854 }
793 855
794 /*
795 * Check for case where an explicit ABORT_TASK has been received
796 * and transport_wait_for_tasks() will be waiting for completion..
797 */
798 if (cmd->transport_state & CMD_T_ABORTED ||
799 cmd->transport_state & CMD_T_STOP) {
800 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
801 /*
802 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
803 * release se_device->caw_sem obtained by sbc_compare_and_write()
804 * since target_complete_ok_work() or target_complete_failure_work()
805 * won't be called to invoke the normal CAW completion callbacks.
806 */
807 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
808 up(&dev->caw_sem);
809 }
810 complete_all(&cmd->t_transport_stop_comp);
811 return;
812 } else if (!success) {
813 INIT_WORK(&cmd->work, target_complete_failure_work);
814 } else {
815 INIT_WORK(&cmd->work, target_complete_ok_work);
816 }
817
818 cmd->t_state = TRANSPORT_COMPLETE; 856 cmd->t_state = TRANSPORT_COMPLETE;
819 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 857 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
820 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 858 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
821 859
860 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
861 target_complete_failure_work);
822 if (cmd->se_cmd_flags & SCF_USE_CPUID) 862 if (cmd->se_cmd_flags & SCF_USE_CPUID)
823 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 863 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
824 else 864 else
@@ -880,7 +920,7 @@ void target_qf_do_work(struct work_struct *work)
880 atomic_dec_mb(&dev->dev_qf_count); 920 atomic_dec_mb(&dev->dev_qf_count);
881 921
882 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 922 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
883 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 923 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
884 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 924 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
885 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 925 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
886 : "UNKNOWN"); 926 : "UNKNOWN");
@@ -1244,7 +1284,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1244 } else if (size != cmd->data_length) { 1284 } else if (size != cmd->data_length) {
1245 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1285 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1246 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1286 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1247 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1287 " 0x%02x\n", cmd->se_tfo->fabric_name,
1248 cmd->data_length, size, cmd->t_task_cdb[0]); 1288 cmd->data_length, size, cmd->t_task_cdb[0]);
1249 1289
1250 if (cmd->data_direction == DMA_TO_DEVICE) { 1290 if (cmd->data_direction == DMA_TO_DEVICE) {
@@ -1316,7 +1356,8 @@ void transport_init_se_cmd(
1316 INIT_LIST_HEAD(&cmd->se_cmd_list); 1356 INIT_LIST_HEAD(&cmd->se_cmd_list);
1317 INIT_LIST_HEAD(&cmd->state_list); 1357 INIT_LIST_HEAD(&cmd->state_list);
1318 init_completion(&cmd->t_transport_stop_comp); 1358 init_completion(&cmd->t_transport_stop_comp);
1319 cmd->compl = NULL; 1359 cmd->free_compl = NULL;
1360 cmd->abrt_compl = NULL;
1320 spin_lock_init(&cmd->t_state_lock); 1361 spin_lock_init(&cmd->t_state_lock);
1321 INIT_WORK(&cmd->work, NULL); 1362 INIT_WORK(&cmd->work, NULL);
1322 kref_init(&cmd->cmd_kref); 1363 kref_init(&cmd->cmd_kref);
@@ -1396,7 +1437,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1396 ret = dev->transport->parse_cdb(cmd); 1437 ret = dev->transport->parse_cdb(cmd);
1397 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1438 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1398 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1439 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1399 cmd->se_tfo->get_fabric_name(), 1440 cmd->se_tfo->fabric_name,
1400 cmd->se_sess->se_node_acl->initiatorname, 1441 cmd->se_sess->se_node_acl->initiatorname,
1401 cmd->t_task_cdb[0]); 1442 cmd->t_task_cdb[0]);
1402 if (ret) 1443 if (ret)
@@ -1792,8 +1833,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1792 if (cmd->transport_complete_callback) 1833 if (cmd->transport_complete_callback)
1793 cmd->transport_complete_callback(cmd, false, &post_ret); 1834 cmd->transport_complete_callback(cmd, false, &post_ret);
1794 1835
1795 if (transport_check_aborted_status(cmd, 1)) 1836 if (cmd->transport_state & CMD_T_ABORTED) {
1837 INIT_WORK(&cmd->work, target_abort_work);
1838 queue_work(target_completion_wq, &cmd->work);
1796 return; 1839 return;
1840 }
1797 1841
1798 switch (sense_reason) { 1842 switch (sense_reason) {
1799 case TCM_NON_EXISTENT_LUN: 1843 case TCM_NON_EXISTENT_LUN:
@@ -1999,8 +2043,6 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1999 return true; 2043 return true;
2000} 2044}
2001 2045
2002static int __transport_check_aborted_status(struct se_cmd *, int);
2003
2004void target_execute_cmd(struct se_cmd *cmd) 2046void target_execute_cmd(struct se_cmd *cmd)
2005{ 2047{
2006 /* 2048 /*
@@ -2009,20 +2051,10 @@ void target_execute_cmd(struct se_cmd *cmd)
2009 * 2051 *
2010 * If the received CDB has already been aborted stop processing it here. 2052 * If the received CDB has already been aborted stop processing it here.
2011 */ 2053 */
2012 spin_lock_irq(&cmd->t_state_lock); 2054 if (target_cmd_interrupted(cmd))
2013 if (__transport_check_aborted_status(cmd, 1)) {
2014 spin_unlock_irq(&cmd->t_state_lock);
2015 return;
2016 }
2017 if (cmd->transport_state & CMD_T_STOP) {
2018 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2019 __func__, __LINE__, cmd->tag);
2020
2021 spin_unlock_irq(&cmd->t_state_lock);
2022 complete_all(&cmd->t_transport_stop_comp);
2023 return; 2055 return;
2024 }
2025 2056
2057 spin_lock_irq(&cmd->t_state_lock);
2026 cmd->t_state = TRANSPORT_PROCESSING; 2058 cmd->t_state = TRANSPORT_PROCESSING;
2027 cmd->transport_state &= ~CMD_T_PRE_EXECUTE; 2059 cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
2028 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2060 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
@@ -2571,7 +2603,8 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2571 * Determine if frontend context caller is requesting the stopping of 2603 * Determine if frontend context caller is requesting the stopping of
2572 * this command for frontend exceptions. 2604 * this command for frontend exceptions.
2573 */ 2605 */
2574 if (cmd->transport_state & CMD_T_STOP) { 2606 if (cmd->transport_state & CMD_T_STOP &&
2607 !cmd->se_tfo->write_pending_must_be_called) {
2575 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2608 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2576 __func__, __LINE__, cmd->tag); 2609 __func__, __LINE__, cmd->tag);
2577 2610
@@ -2635,13 +2668,29 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2635} 2668}
2636 2669
2637/* 2670/*
2671 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2672 * finished.
2673 */
2674void target_put_cmd_and_wait(struct se_cmd *cmd)
2675{
2676 DECLARE_COMPLETION_ONSTACK(compl);
2677
2678 WARN_ON_ONCE(cmd->abrt_compl);
2679 cmd->abrt_compl = &compl;
2680 target_put_sess_cmd(cmd);
2681 wait_for_completion(&compl);
2682}
2683
2684/*
2638 * This function is called by frontend drivers after processing of a command 2685 * This function is called by frontend drivers after processing of a command
2639 * has finished. 2686 * has finished.
2640 * 2687 *
2641 * The protocol for ensuring that either the regular flow or the TMF 2688 * The protocol for ensuring that either the regular frontend command
2642 * code drops one reference is as follows: 2689 * processing flow or target_handle_abort() code drops one reference is as
2690 * follows:
2643 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2691 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2644 * the frontend driver to drop one reference, synchronously or asynchronously. 2692 * the frontend driver to call this function synchronously or asynchronously.
2693 * That will cause one reference to be dropped.
2645 * - During regular command processing the target core sets CMD_T_COMPLETE 2694 * - During regular command processing the target core sets CMD_T_COMPLETE
2646 * before invoking one of the .queue_*() functions. 2695 * before invoking one of the .queue_*() functions.
2647 * - The code that aborts commands skips commands and TMFs for which 2696 * - The code that aborts commands skips commands and TMFs for which
@@ -2653,7 +2702,7 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2653 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2702 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2654 * be called and will drop a reference. 2703 * be called and will drop a reference.
2655 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2704 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2656 * will be called. transport_cmd_finish_abort() will drop the final reference. 2705 * will be called. target_handle_abort() will drop the final reference.
2657 */ 2706 */
2658int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2707int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2659{ 2708{
@@ -2677,9 +2726,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2677 transport_lun_remove_cmd(cmd); 2726 transport_lun_remove_cmd(cmd);
2678 } 2727 }
2679 if (aborted) 2728 if (aborted)
2680 cmd->compl = &compl; 2729 cmd->free_compl = &compl;
2681 if (!aborted || tas) 2730 ret = target_put_sess_cmd(cmd);
2682 ret = target_put_sess_cmd(cmd);
2683 if (aborted) { 2731 if (aborted) {
2684 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2732 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2685 wait_for_completion(&compl); 2733 wait_for_completion(&compl);
@@ -2719,6 +2767,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2719 } 2767 }
2720 se_cmd->transport_state |= CMD_T_PRE_EXECUTE; 2768 se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2721 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2769 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2770 percpu_ref_get(&se_sess->cmd_count);
2722out: 2771out:
2723 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2772 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2724 2773
@@ -2743,21 +2792,24 @@ static void target_release_cmd_kref(struct kref *kref)
2743{ 2792{
2744 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2793 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2745 struct se_session *se_sess = se_cmd->se_sess; 2794 struct se_session *se_sess = se_cmd->se_sess;
2746 struct completion *compl = se_cmd->compl; 2795 struct completion *free_compl = se_cmd->free_compl;
2796 struct completion *abrt_compl = se_cmd->abrt_compl;
2747 unsigned long flags; 2797 unsigned long flags;
2748 2798
2749 if (se_sess) { 2799 if (se_sess) {
2750 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2800 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2751 list_del_init(&se_cmd->se_cmd_list); 2801 list_del_init(&se_cmd->se_cmd_list);
2752 if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
2753 wake_up(&se_sess->cmd_list_wq);
2754 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2802 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2755 } 2803 }
2756 2804
2757 target_free_cmd_mem(se_cmd); 2805 target_free_cmd_mem(se_cmd);
2758 se_cmd->se_tfo->release_cmd(se_cmd); 2806 se_cmd->se_tfo->release_cmd(se_cmd);
2759 if (compl) 2807 if (free_compl)
2760 complete(compl); 2808 complete(free_compl);
2809 if (abrt_compl)
2810 complete(abrt_compl);
2811
2812 percpu_ref_put(&se_sess->cmd_count);
2761} 2813}
2762 2814
2763/** 2815/**
@@ -2886,6 +2938,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2886 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2938 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2887 se_sess->sess_tearing_down = 1; 2939 se_sess->sess_tearing_down = 1;
2888 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2940 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2941
2942 percpu_ref_kill(&se_sess->cmd_count);
2889} 2943}
2890EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2944EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2891 2945
@@ -2900,52 +2954,24 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2900 2954
2901 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2955 WARN_ON_ONCE(!se_sess->sess_tearing_down);
2902 2956
2903 spin_lock_irq(&se_sess->sess_cmd_lock);
2904 do { 2957 do {
2905 ret = wait_event_lock_irq_timeout( 2958 ret = wait_event_timeout(se_sess->cmd_list_wq,
2906 se_sess->cmd_list_wq, 2959 percpu_ref_is_zero(&se_sess->cmd_count),
2907 list_empty(&se_sess->sess_cmd_list), 2960 180 * HZ);
2908 se_sess->sess_cmd_lock, 180 * HZ);
2909 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2961 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2910 target_show_cmd("session shutdown: still waiting for ", 2962 target_show_cmd("session shutdown: still waiting for ",
2911 cmd); 2963 cmd);
2912 } while (ret <= 0); 2964 } while (ret <= 0);
2913 spin_unlock_irq(&se_sess->sess_cmd_lock);
2914} 2965}
2915EXPORT_SYMBOL(target_wait_for_sess_cmds); 2966EXPORT_SYMBOL(target_wait_for_sess_cmds);
2916 2967
2917static void target_lun_confirm(struct percpu_ref *ref) 2968/*
2918{ 2969 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
2919 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 2970 * all references to the LUN have been released. Called during LUN shutdown.
2920 2971 */
2921 complete(&lun->lun_ref_comp);
2922}
2923
2924void transport_clear_lun_ref(struct se_lun *lun) 2972void transport_clear_lun_ref(struct se_lun *lun)
2925{ 2973{
2926 /* 2974 percpu_ref_kill(&lun->lun_ref);
2927 * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2928 * the initial reference and schedule confirm kill to be
2929 * executed after one full RCU grace period has completed.
2930 */
2931 percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2932 /*
2933 * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2934 * to call target_lun_confirm after lun->lun_ref has been marked
2935 * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2936 * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2937 * fails for all new incoming I/O.
2938 */
2939 wait_for_completion(&lun->lun_ref_comp);
2940 /*
2941 * The second completion waits for percpu_ref_put_many() to
2942 * invoke ->release() after lun->lun_ref has switched to
2943 * atomic_t mode, and lun->lun_ref.count has reached zero.
2944 *
2945 * At this point all target-core lun->lun_ref references have
2946 * been dropped via transport_lun_remove_cmd(), and it's safe
2947 * to proceed with the remaining LUN shutdown.
2948 */
2949 wait_for_completion(&lun->lun_shutdown_comp); 2975 wait_for_completion(&lun->lun_shutdown_comp);
2950} 2976}
2951 2977
@@ -3229,6 +3255,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3229{ 3255{
3230 unsigned long flags; 3256 unsigned long flags;
3231 3257
3258 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3259
3232 spin_lock_irqsave(&cmd->t_state_lock, flags); 3260 spin_lock_irqsave(&cmd->t_state_lock, flags);
3233 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3261 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3234 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3262 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -3245,114 +3273,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3245} 3273}
3246EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3274EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3247 3275
3248static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3249 __releases(&cmd->t_state_lock)
3250 __acquires(&cmd->t_state_lock)
3251{
3252 int ret;
3253
3254 assert_spin_locked(&cmd->t_state_lock);
3255 WARN_ON_ONCE(!irqs_disabled());
3256
3257 if (!(cmd->transport_state & CMD_T_ABORTED))
3258 return 0;
3259 /*
3260 * If cmd has been aborted but either no status is to be sent or it has
3261 * already been sent, just return
3262 */
3263 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3264 if (send_status)
3265 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3266 return 1;
3267 }
3268
3269 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3270 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3271
3272 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3273 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3274 trace_target_cmd_complete(cmd);
3275
3276 spin_unlock_irq(&cmd->t_state_lock);
3277 ret = cmd->se_tfo->queue_status(cmd);
3278 if (ret)
3279 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3280 spin_lock_irq(&cmd->t_state_lock);
3281
3282 return 1;
3283}
3284
3285int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3286{
3287 int ret;
3288
3289 spin_lock_irq(&cmd->t_state_lock);
3290 ret = __transport_check_aborted_status(cmd, send_status);
3291 spin_unlock_irq(&cmd->t_state_lock);
3292
3293 return ret;
3294}
3295EXPORT_SYMBOL(transport_check_aborted_status);
3296
3297void transport_send_task_abort(struct se_cmd *cmd)
3298{
3299 unsigned long flags;
3300 int ret;
3301
3302 spin_lock_irqsave(&cmd->t_state_lock, flags);
3303 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3304 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3305 return;
3306 }
3307 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3308
3309 /*
3310 * If there are still expected incoming fabric WRITEs, we wait
3311 * until until they have completed before sending a TASK_ABORTED
3312 * response. This response with TASK_ABORTED status will be
3313 * queued back to fabric module by transport_check_aborted_status().
3314 */
3315 if (cmd->data_direction == DMA_TO_DEVICE) {
3316 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3317 spin_lock_irqsave(&cmd->t_state_lock, flags);
3318 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3319 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3320 goto send_abort;
3321 }
3322 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3324 return;
3325 }
3326 }
3327send_abort:
3328 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3329
3330 transport_lun_remove_cmd(cmd);
3331
3332 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3333 cmd->t_task_cdb[0], cmd->tag);
3334
3335 trace_target_cmd_complete(cmd);
3336 ret = cmd->se_tfo->queue_status(cmd);
3337 if (ret)
3338 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3339}
3340
3341static void target_tmr_work(struct work_struct *work) 3276static void target_tmr_work(struct work_struct *work)
3342{ 3277{
3343 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3278 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3344 struct se_device *dev = cmd->se_dev; 3279 struct se_device *dev = cmd->se_dev;
3345 struct se_tmr_req *tmr = cmd->se_tmr_req; 3280 struct se_tmr_req *tmr = cmd->se_tmr_req;
3346 unsigned long flags;
3347 int ret; 3281 int ret;
3348 3282
3349 spin_lock_irqsave(&cmd->t_state_lock, flags); 3283 if (cmd->transport_state & CMD_T_ABORTED)
3350 if (cmd->transport_state & CMD_T_ABORTED) { 3284 goto aborted;
3351 tmr->response = TMR_FUNCTION_REJECTED;
3352 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3353 goto check_stop;
3354 }
3355 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3356 3285
3357 switch (tmr->function) { 3286 switch (tmr->function) {
3358 case TMR_ABORT_TASK: 3287 case TMR_ABORT_TASK:
@@ -3386,18 +3315,16 @@ static void target_tmr_work(struct work_struct *work)
3386 break; 3315 break;
3387 } 3316 }
3388 3317
3389 spin_lock_irqsave(&cmd->t_state_lock, flags); 3318 if (cmd->transport_state & CMD_T_ABORTED)
3390 if (cmd->transport_state & CMD_T_ABORTED) { 3319 goto aborted;
3391 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3392 goto check_stop;
3393 }
3394 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3395 3320
3396 cmd->se_tfo->queue_tm_rsp(cmd); 3321 cmd->se_tfo->queue_tm_rsp(cmd);
3397 3322
3398check_stop:
3399 transport_lun_remove_cmd(cmd);
3400 transport_cmd_check_stop_to_fabric(cmd); 3323 transport_cmd_check_stop_to_fabric(cmd);
3324 return;
3325
3326aborted:
3327 target_handle_abort(cmd);
3401} 3328}
3402 3329
3403int transport_generic_handle_tmr( 3330int transport_generic_handle_tmr(
@@ -3416,16 +3343,15 @@ int transport_generic_handle_tmr(
3416 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3343 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3417 3344
3418 if (aborted) { 3345 if (aborted) {
3419 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3346 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3420 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3347 cmd->se_tmr_req->function,
3421 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3348 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3422 transport_lun_remove_cmd(cmd); 3349 target_handle_abort(cmd);
3423 transport_cmd_check_stop_to_fabric(cmd);
3424 return 0; 3350 return 0;
3425 } 3351 }
3426 3352
3427 INIT_WORK(&cmd->work, target_tmr_work); 3353 INIT_WORK(&cmd->work, target_tmr_work);
3428 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3354 schedule_work(&cmd->work);
3429 return 0; 3355 return 0;
3430} 3356}
3431EXPORT_SYMBOL(transport_generic_handle_tmr); 3357EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index c8ac242ce888..ced1c10364eb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -266,7 +266,7 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
266 pr_debug("[%s]: %s UNIT ATTENTION condition with" 266 pr_debug("[%s]: %s UNIT ATTENTION condition with"
267 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x" 267 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
269 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 269 nacl->se_tpg->se_tpg_tfo->fabric_name,
270 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 270 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
271 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, 271 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); 272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
@@ -327,7 +327,7 @@ int core_scsi3_ua_clear_for_request_sense(
327 327
328 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" 328 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
329 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x," 329 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
330 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 330 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
331 cmd->orig_fe_lun, *asc, *ascq); 331 cmd->orig_fe_lun, *asc, *ascq);
332 332
333 return (head) ? -EPERM : 0; 333 return (head) ? -EPERM : 0;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9cd404acdb82..1e6d24943565 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -958,7 +958,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
958 * 0 success 958 * 0 success
959 * 1 internally queued to wait for ring memory to free. 959 * 1 internally queued to wait for ring memory to free.
960 */ 960 */
961static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) 961static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
962{ 962{
963 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 963 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
964 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 964 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 70adcfdca8d1..c2e1fc927fdf 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -399,11 +399,6 @@ struct se_portal_group xcopy_pt_tpg;
399static struct se_session xcopy_pt_sess; 399static struct se_session xcopy_pt_sess;
400static struct se_node_acl xcopy_pt_nacl; 400static struct se_node_acl xcopy_pt_nacl;
401 401
402static char *xcopy_pt_get_fabric_name(void)
403{
404 return "xcopy-pt";
405}
406
407static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 402static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
408{ 403{
409 return 0; 404 return 0;
@@ -463,7 +458,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
463} 458}
464 459
465static const struct target_core_fabric_ops xcopy_pt_tfo = { 460static const struct target_core_fabric_ops xcopy_pt_tfo = {
466 .get_fabric_name = xcopy_pt_get_fabric_name, 461 .fabric_name = "xcopy-pt",
467 .get_cmd_state = xcopy_pt_get_cmd_state, 462 .get_cmd_state = xcopy_pt_get_cmd_state,
468 .release_cmd = xcopy_pt_release_cmd, 463 .release_cmd = xcopy_pt_release_cmd,
469 .check_stop_free = xcopy_pt_check_stop_free, 464 .check_stop_free = xcopy_pt_check_stop_free,
@@ -479,6 +474,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
479 474
480int target_xcopy_setup_pt(void) 475int target_xcopy_setup_pt(void)
481{ 476{
477 int ret;
478
482 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 479 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
483 if (!xcopy_wq) { 480 if (!xcopy_wq) {
484 pr_err("Unable to allocate xcopy_wq\n"); 481 pr_err("Unable to allocate xcopy_wq\n");
@@ -496,7 +493,9 @@ int target_xcopy_setup_pt(void)
496 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 493 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
497 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 494 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
498 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 495 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
499 transport_init_session(&xcopy_pt_sess); 496 ret = transport_init_session(&xcopy_pt_sess);
497 if (ret < 0)
498 return ret;
500 499
501 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 500 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
502 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 501 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e55c4d537592..1ce49518d440 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -392,11 +392,6 @@ static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
392 return container_of(se_tpg, struct ft_tpg, se_tpg); 392 return container_of(se_tpg, struct ft_tpg, se_tpg);
393} 393}
394 394
395static char *ft_get_fabric_name(void)
396{
397 return "fc";
398}
399
400static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) 395static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
401{ 396{
402 return ft_tpg(se_tpg)->lport_wwn->name; 397 return ft_tpg(se_tpg)->lport_wwn->name;
@@ -427,9 +422,8 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
427 422
428static const struct target_core_fabric_ops ft_fabric_ops = { 423static const struct target_core_fabric_ops ft_fabric_ops = {
429 .module = THIS_MODULE, 424 .module = THIS_MODULE,
430 .name = "fc", 425 .fabric_name = "fc",
431 .node_acl_size = sizeof(struct ft_node_acl), 426 .node_acl_size = sizeof(struct ft_node_acl),
432 .get_fabric_name = ft_get_fabric_name,
433 .tpg_get_wwn = ft_get_fabric_wwn, 427 .tpg_get_wwn = ft_get_fabric_wwn,
434 .tpg_get_tag = ft_get_tag, 428 .tpg_get_tag = ft_get_tag,
435 .tpg_check_demo_mode = ft_check_false, 429 .tpg_check_demo_mode = ft_check_false,