aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:20:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:20:53 -0500
commit71e4634e00119b2fb8dd0da99b3f5ebbb49cc872 (patch)
treec42a37a98e99aa9ce8a2af79710e295704782a16 /drivers/target
parent19a3dd7621af01b7e44a70d16beab59326c38824 (diff)
parentfab683eb12e71ac6057dc42dc7d1e5e71e5cba5e (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - Introduce configfs support for unlocked configfs_depend_item() (krzysztof + andrezej) - Conversion of usb-gadget target driver to new function registration interface (andrzej + sebastian) - Enable qla2xxx FC target mode support for Extended Logins (himansu + giridhar) - Enable qla2xxx FC target mode support for Exchange Offload (himansu + giridhar) - Add qla2xxx FC target mode irq affinity notification + selective command queuing. (quinn + himanshu) - Fix iscsi-target deadlock in se_node_acl configfs deletion (sagi + nab) - Convert se_node_acl configfs deletion + se_node_acl->queue_depth to proper se_session->sess_kref + target_get_session() usage. (hch + sagi + nab) - Fix long-standing race between se_node_acl->acl_kref get and get_initiator_node_acl() lookup. (hch + nab) - Fix target/user block-size handling, and make sure netlink reaches all network namespaces (sheng + andy) Note there is an outstanding bug-fix series for remote I_T nexus port TMR LUN_RESET has been posted and still being tested, and will likely become post -rc1 material at this point" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (56 commits) scsi: qla2xxxx: avoid type mismatch in comparison target/user: Make sure netlink would reach all network namespaces target: Obtain se_node_acl->acl_kref during get_initiator_node_acl target: Convert ACL change queue_depth se_session reference usage iscsi-target: Fix potential dead-lock during node acl delete ib_srpt: Convert acl lookup to modern get_initiator_node_acl usage tcm_fc: Convert acl lookup to modern get_initiator_node_acl usage tcm_fc: Wait for command completion before freeing a session target: Fix a memory leak in target_dev_lba_map_store() target: Support aborting tasks with a 64-bit tag usb/gadget: Remove set-but-not-used variables target: Remove an unused variable target: Fix indentation in target_core_configfs.c target/user: Allow user to set block size before enabling device iser-target: Fix non negative ERR_PTR isert_device_get usage target/fcoe: Add tag support to tcm_fc qla2xxx: Check for online flag instead of active reset when transmitting responses qla2xxx: Set all queues to 4k qla2xxx: Disable ZIO at start time. qla2xxx: Move atioq to a different lock to reduce lock contention ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c14
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c47
-rw-r--r--drivers/target/target_core_device.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_spc.c12
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_tpg.c197
-rw-r--r--drivers/target/target_core_transport.c33
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c47
-rw-r--r--drivers/target/tcm_fc/tfc_io.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
23 files changed, 265 insertions, 221 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 72204fbf2bb1..576a7a43470c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1333,7 +1333,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1333 /* 1333 /*
1334 * Check if a delayed TASK_ABORTED status needs to 1334 * Check if a delayed TASK_ABORTED status needs to
1335 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1335 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1336 * received with the unsolicitied data out. 1336 * received with the unsolicited data out.
1337 */ 1337 */
1338 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1338 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1339 iscsit_stop_dataout_timer(cmd); 1339 iscsit_stop_dataout_timer(cmd);
@@ -3435,7 +3435,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3435 3435
3436 if ((tpg->tpg_attrib.generate_node_acls == 0) && 3436 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3437 (tpg->tpg_attrib.demo_mode_discovery == 0) && 3437 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3438 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, 3438 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3439 cmd->conn->sess->sess_ops->InitiatorName))) { 3439 cmd->conn->sess->sess_ops->InitiatorName))) {
3440 continue; 3440 continue;
3441 } 3441 }
@@ -4459,9 +4459,6 @@ int iscsit_close_connection(
4459 4459
4460 return 0; 4460 return 0;
4461 } 4461 }
4462 spin_unlock_bh(&sess->conn_lock);
4463
4464 return 0;
4465} 4462}
4466 4463
4467int iscsit_close_session(struct iscsi_session *sess) 4464int iscsit_close_session(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 255204cc43e6..2f821de63049 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -725,11 +725,8 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
725 725
726 if (iscsit_get_tpg(tpg) < 0) 726 if (iscsit_get_tpg(tpg) < 0)
727 return -EINVAL; 727 return -EINVAL;
728 /* 728
729 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1 729 ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
730 */
731 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
732 config_item_name(acl_ci), cmdsn_depth, 1);
733 730
734 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for" 731 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
735 "InitiatorName: %s\n", config_item_name(wwn_ci), 732 "InitiatorName: %s\n", config_item_name(wwn_ci),
@@ -1593,28 +1590,30 @@ static int lio_tpg_check_prot_fabric_only(
1593} 1590}
1594 1591
1595/* 1592/*
1596 * Called with spin_lock_bh(struct se_portal_group->session_lock) held.. 1593 * This function calls iscsit_inc_session_usage_count() on the
1597 *
1598 * Also, this function calls iscsit_inc_session_usage_count() on the
1599 * struct iscsi_session in question. 1594 * struct iscsi_session in question.
1600 */ 1595 */
1601static int lio_tpg_shutdown_session(struct se_session *se_sess) 1596static int lio_tpg_shutdown_session(struct se_session *se_sess)
1602{ 1597{
1603 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1598 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1599 struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
1604 1600
1601 spin_lock_bh(&se_tpg->session_lock);
1605 spin_lock(&sess->conn_lock); 1602 spin_lock(&sess->conn_lock);
1606 if (atomic_read(&sess->session_fall_back_to_erl0) || 1603 if (atomic_read(&sess->session_fall_back_to_erl0) ||
1607 atomic_read(&sess->session_logout) || 1604 atomic_read(&sess->session_logout) ||
1608 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 1605 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1609 spin_unlock(&sess->conn_lock); 1606 spin_unlock(&sess->conn_lock);
1607 spin_unlock_bh(&se_tpg->session_lock);
1610 return 0; 1608 return 0;
1611 } 1609 }
1612 atomic_set(&sess->session_reinstatement, 1); 1610 atomic_set(&sess->session_reinstatement, 1);
1613 spin_unlock(&sess->conn_lock); 1611 spin_unlock(&sess->conn_lock);
1614 1612
1615 iscsit_stop_time2retain_timer(sess); 1613 iscsit_stop_time2retain_timer(sess);
1616 iscsit_stop_session(sess, 1, 1); 1614 spin_unlock_bh(&se_tpg->session_lock);
1617 1615
1616 iscsit_stop_session(sess, 1, 1);
1618 return 1; 1617 return 1;
1619} 1618}
1620 1619
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 2e561deb30a2..9214c9dafa2b 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -160,8 +160,7 @@ static int iscsit_handle_r2t_snack(
160 " protocol error.\n", cmd->init_task_tag, begrun, 160 " protocol error.\n", cmd->init_task_tag, begrun,
161 (begrun + runlength), cmd->acked_data_sn); 161 (begrun + runlength), cmd->acked_data_sn);
162 162
163 return iscsit_reject_cmd(cmd, 163 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
164 ISCSI_REASON_PROTOCOL_ERROR, buf);
165 } 164 }
166 165
167 if (runlength) { 166 if (runlength) {
@@ -628,8 +627,8 @@ int iscsit_dataout_datapduinorder_no_fbit(
628 if (cmd->pdu_list[i].seq_no == pdu->seq_no) { 627 if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
629 if (!first_pdu) 628 if (!first_pdu)
630 first_pdu = &cmd->pdu_list[i]; 629 first_pdu = &cmd->pdu_list[i];
631 xfer_len += cmd->pdu_list[i].length; 630 xfer_len += cmd->pdu_list[i].length;
632 pdu_count++; 631 pdu_count++;
633 } else if (pdu_count) 632 } else if (pdu_count)
634 break; 633 break;
635 } 634 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 2cbea2af7cd0..3a1f9a7e6bb6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1668,7 +1668,7 @@ void iscsi_set_session_parameters(
1668 param->value); 1668 param->value);
1669 } else if (!strcmp(param->name, INITIALR2T)) { 1669 } else if (!strcmp(param->name, INITIALR2T)) {
1670 ops->InitialR2T = !strcmp(param->value, YES); 1670 ops->InitialR2T = !strcmp(param->value, YES);
1671 pr_debug("InitialR2T: %s\n", 1671 pr_debug("InitialR2T: %s\n",
1672 param->value); 1672 param->value);
1673 } else if (!strcmp(param->name, IMMEDIATEDATA)) { 1673 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
1674 ops->ImmediateData = !strcmp(param->value, YES); 1674 ops->ImmediateData = !strcmp(param->value, YES);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 11320df939f7..3d637055c36f 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -82,7 +82,7 @@ int iscsit_tmr_task_warm_reset(
82 pr_err("TMR Opcode TARGET_WARM_RESET authorization" 82 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
83 " failed for Initiator Node: %s\n", 83 " failed for Initiator Node: %s\n",
84 sess->se_sess->se_node_acl->initiatorname); 84 sess->se_sess->se_node_acl->initiatorname);
85 return -1; 85 return -1;
86 } 86 }
87 /* 87 /*
88 * Do the real work in transport_generic_do_tmr(). 88 * Do the real work in transport_generic_do_tmr().
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 23c95cd14167..0814e5894a96 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -590,16 +590,6 @@ int iscsit_tpg_del_network_portal(
590 return iscsit_tpg_release_np(tpg_np, tpg, np); 590 return iscsit_tpg_release_np(tpg_np, tpg, np);
591} 591}
592 592
593int iscsit_tpg_set_initiator_node_queue_depth(
594 struct iscsi_portal_group *tpg,
595 unsigned char *initiatorname,
596 u32 queue_depth,
597 int force)
598{
599 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
600 initiatorname, queue_depth, force);
601}
602
603int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) 593int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
604{ 594{
605 unsigned char buf1[256], buf2[256], *none = NULL; 595 unsigned char buf1[256], buf2[256], *none = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 9db32bd24cd4..2da211920c18 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -26,8 +26,6 @@ extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_gr
26 int); 26 int);
27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, 27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
28 struct iscsi_tpg_np *); 28 struct iscsi_tpg_np *);
29extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
30 unsigned char *, u32, int);
31extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32); 29extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
32extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32); 30extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
33extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32); 31extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4fb0eca86857..d41a5c300e31 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -1036,12 +1036,26 @@ static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1036 return -EINVAL; 1036 return -EINVAL;
1037} 1037}
1038 1038
1039static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1040 char *page)
1041{
1042 struct se_portal_group *se_tpg = to_tpg(item);
1043 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1044 struct tcm_loop_tpg, tl_se_tpg);
1045 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1046
1047 return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1048 tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1049}
1050
1039CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 1051CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1040CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 1052CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1053CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1041 1054
1042static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1055static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1043 &tcm_loop_tpg_attr_nexus, 1056 &tcm_loop_tpg_attr_nexus,
1044 &tcm_loop_tpg_attr_transport_status, 1057 &tcm_loop_tpg_attr_transport_status,
1058 &tcm_loop_tpg_attr_address,
1045 NULL, 1059 NULL,
1046}; 1060};
1047 1061
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 35f7d31b29d2..3072f1aca8ec 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -39,8 +39,6 @@
39 39
40#include "sbp_target.h" 40#include "sbp_target.h"
41 41
42static const struct target_core_fabric_ops sbp_ops;
43
44/* FireWire address region for management and command block address handlers */ 42/* FireWire address region for management and command block address handlers */
45static const struct fw_address_region sbp_register_region = { 43static const struct fw_address_region sbp_register_region = {
46 .start = CSR_REGISTER_BASE + 0x10000, 44 .start = CSR_REGISTER_BASE + 0x10000,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index b9b9ffde4c7a..3327c49674d3 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(target_depend_item);
278 278
279void target_undepend_item(struct config_item *item) 279void target_undepend_item(struct config_item *item)
280{ 280{
281 return configfs_undepend_item(&target_core_fabrics, item); 281 return configfs_undepend_item(item);
282} 282}
283EXPORT_SYMBOL(target_undepend_item); 283EXPORT_SYMBOL(target_undepend_item);
284 284
@@ -499,6 +499,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
499DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); 499DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
500DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); 500DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
501DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); 501DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
502DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
502DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 503DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
503 504
504#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ 505#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
@@ -548,7 +549,8 @@ static ssize_t _name##_store(struct config_item *item, const char *page,\
548 size_t count) \ 549 size_t count) \
549{ \ 550{ \
550 printk_once(KERN_WARNING \ 551 printk_once(KERN_WARNING \
551 "ignoring deprecated ##_name## attribute\n"); \ 552 "ignoring deprecated %s attribute\n", \
553 __stringify(_name)); \
552 return count; \ 554 return count; \
553} 555}
554 556
@@ -866,6 +868,39 @@ static ssize_t emulate_rest_reord_store(struct config_item *item,
866 return count; 868 return count;
867} 869}
868 870
871static ssize_t unmap_zeroes_data_store(struct config_item *item,
872 const char *page, size_t count)
873{
874 struct se_dev_attrib *da = to_attrib(item);
875 bool flag;
876 int ret;
877
878 ret = strtobool(page, &flag);
879 if (ret < 0)
880 return ret;
881
882 if (da->da_dev->export_count) {
883 pr_err("dev[%p]: Unable to change SE Device"
884 " unmap_zeroes_data while export_count is %d\n",
885 da->da_dev, da->da_dev->export_count);
886 return -EINVAL;
887 }
888 /*
889 * We expect this value to be non-zero when generic Block Layer
890 * Discard supported is detected iblock_configure_device().
891 */
892 if (flag && !da->max_unmap_block_desc_count) {
893 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
894 " because max_unmap_block_desc_count is zero\n",
895 da->da_dev);
896 return -ENOSYS;
897 }
898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
900 da->da_dev, flag);
901 return 0;
902}
903
869/* 904/*
870 * Note, this can only be called on unexported SE Device Object. 905 * Note, this can only be called on unexported SE Device Object.
871 */ 906 */
@@ -998,6 +1033,7 @@ CONFIGFS_ATTR(, max_unmap_lba_count);
998CONFIGFS_ATTR(, max_unmap_block_desc_count); 1033CONFIGFS_ATTR(, max_unmap_block_desc_count);
999CONFIGFS_ATTR(, unmap_granularity); 1034CONFIGFS_ATTR(, unmap_granularity);
1000CONFIGFS_ATTR(, unmap_granularity_alignment); 1035CONFIGFS_ATTR(, unmap_granularity_alignment);
1036CONFIGFS_ATTR(, unmap_zeroes_data);
1001CONFIGFS_ATTR(, max_write_same_len); 1037CONFIGFS_ATTR(, max_write_same_len);
1002 1038
1003/* 1039/*
@@ -1034,6 +1070,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
1034 &attr_max_unmap_block_desc_count, 1070 &attr_max_unmap_block_desc_count,
1035 &attr_unmap_granularity, 1071 &attr_unmap_granularity,
1036 &attr_unmap_granularity_alignment, 1072 &attr_unmap_granularity_alignment,
1073 &attr_unmap_zeroes_data,
1037 &attr_max_write_same_len, 1074 &attr_max_write_same_len,
1038 NULL, 1075 NULL,
1039}; 1076};
@@ -1980,14 +2017,14 @@ static ssize_t target_dev_lba_map_store(struct config_item *item,
1980 struct se_device *dev = to_device(item); 2017 struct se_device *dev = to_device(item);
1981 struct t10_alua_lba_map *lba_map = NULL; 2018 struct t10_alua_lba_map *lba_map = NULL;
1982 struct list_head lba_list; 2019 struct list_head lba_list;
1983 char *map_entries, *ptr; 2020 char *map_entries, *orig, *ptr;
1984 char state; 2021 char state;
1985 int pg_num = -1, pg; 2022 int pg_num = -1, pg;
1986 int ret = 0, num = 0, pg_id, alua_state; 2023 int ret = 0, num = 0, pg_id, alua_state;
1987 unsigned long start_lba = -1, end_lba = -1; 2024 unsigned long start_lba = -1, end_lba = -1;
1988 unsigned long segment_size = -1, segment_mult = -1; 2025 unsigned long segment_size = -1, segment_mult = -1;
1989 2026
1990 map_entries = kstrdup(page, GFP_KERNEL); 2027 orig = map_entries = kstrdup(page, GFP_KERNEL);
1991 if (!map_entries) 2028 if (!map_entries)
1992 return -ENOMEM; 2029 return -ENOMEM;
1993 2030
@@ -2085,7 +2122,7 @@ out:
2085 } else 2122 } else
2086 core_alua_set_lba_map(dev, &lba_list, 2123 core_alua_set_lba_map(dev, &lba_list,
2087 segment_size, segment_mult); 2124 segment_size, segment_mult);
2088 kfree(map_entries); 2125 kfree(orig);
2089 return count; 2126 return count;
2090} 2127}
2091 2128
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 88ea4e4f124b..cacd97a8cbd0 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -813,6 +813,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
813 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 813 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
814 dev->dev_attrib.unmap_granularity_alignment = 814 dev->dev_attrib.unmap_granularity_alignment =
815 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 815 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
816 dev->dev_attrib.unmap_zeroes_data =
817 DA_UNMAP_ZEROES_DATA_DEFAULT;
816 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 818 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
817 819
818 xcopy_lun = &dev->xcopy_lun; 820 xcopy_lun = &dev->xcopy_lun;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f29c69120054..e77d15000caa 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -138,6 +138,8 @@ static int iblock_configure_device(struct se_device *dev)
138 q->limits.discard_granularity >> 9; 138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment = 139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment; 140 q->limits.discard_alignment;
141 dev->dev_attrib.unmap_zeroes_data =
142 q->limits.discard_zeroes_data;
141 143
142 pr_debug("IBLOCK: BLOCK Discard support available," 144 pr_debug("IBLOCK: BLOCK Discard support available,"
143 " disabled by default\n"); 145 " disabled by default\n");
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index e7933115087a..b1795735eafc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1457,8 +1457,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1457static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1457static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1458{ 1458{
1459 struct se_lun_acl *lun_acl; 1459 struct se_lun_acl *lun_acl;
1460 struct se_node_acl *nacl; 1460
1461 struct se_portal_group *tpg;
1462 /* 1461 /*
1463 * For nacl->dynamic_node_acl=1 1462 * For nacl->dynamic_node_acl=1
1464 */ 1463 */
@@ -1467,17 +1466,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1467 if (!lun_acl) 1466 if (!lun_acl)
1468 return 0; 1467 return 0;
1469 1468
1470 nacl = lun_acl->se_lun_nacl;
1471 tpg = nacl->se_tpg;
1472
1473 return target_depend_item(&lun_acl->se_lun_group.cg_item); 1469 return target_depend_item(&lun_acl->se_lun_group.cg_item);
1474} 1470}
1475 1471
1476static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) 1472static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1477{ 1473{
1478 struct se_lun_acl *lun_acl; 1474 struct se_lun_acl *lun_acl;
1479 struct se_node_acl *nacl; 1475
1480 struct se_portal_group *tpg;
1481 /* 1476 /*
1482 * For nacl->dynamic_node_acl=1 1477 * For nacl->dynamic_node_acl=1
1483 */ 1478 */
@@ -1487,8 +1482,6 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1487 kref_put(&se_deve->pr_kref, target_pr_kref_release); 1482 kref_put(&se_deve->pr_kref, target_pr_kref_release);
1488 return; 1483 return;
1489 } 1484 }
1490 nacl = lun_acl->se_lun_nacl;
1491 tpg = nacl->se_tpg;
1492 1485
1493 target_undepend_item(&lun_acl->se_lun_group.cg_item); 1486 target_undepend_item(&lun_acl->se_lun_group.cg_item);
1494 kref_put(&se_deve->pr_kref, target_pr_kref_release); 1487 kref_put(&se_deve->pr_kref, target_pr_kref_release);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 98698d875742..a9057aa07176 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -141,9 +141,17 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
141 * Set Thin Provisioning Enable bit following sbc3r22 in section 141 * Set Thin Provisioning Enable bit following sbc3r22 in section
142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
143 */ 143 */
144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
145 buf[14] |= 0x80; 145 buf[14] |= 0x80;
146 146
147 /*
148 * LBPRZ signifies that zeroes will be read back from an LBA after
149 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
150 */
151 if (dev->dev_attrib.unmap_zeroes_data)
152 buf[14] |= 0x40;
153 }
154
147 rbuf = transport_kmap_data_sg(cmd); 155 rbuf = transport_kmap_data_sg(cmd);
148 if (rbuf) { 156 if (rbuf) {
149 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 157 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9413e1a949e5..0aa47babd16c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -635,6 +635,18 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
635 if (dev->dev_attrib.emulate_tpws != 0) 635 if (dev->dev_attrib.emulate_tpws != 0)
636 buf[5] |= 0x40 | 0x20; 636 buf[5] |= 0x40 | 0x20;
637 637
638 /*
639 * The unmap_zeroes_data set means that the underlying device supports
640 * REQ_DISCARD and has the discard_zeroes_data bit set. This satisfies
641 * the SBC requirements for LBPRZ, meaning that a subsequent read
642 * will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
643 * See sbc4r36 6.6.4.
644 */
645 if (((dev->dev_attrib.emulate_tpu != 0) ||
646 (dev->dev_attrib.emulate_tpws != 0)) &&
647 (dev->dev_attrib.unmap_zeroes_data != 0))
648 buf[5] |= 0x04;
649
638 return 0; 650 return 0;
639} 651}
640 652
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 28fb3016370f..fcdcb117c60d 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -201,7 +201,7 @@ static void core_tmr_drain_tmr_list(
201 /* 201 /*
202 * If this function was called with a valid pr_res_key 202 * If this function was called with a valid pr_res_key
203 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 203 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
204 * skip non regisration key matching TMRs. 204 * skip non registration key matching TMRs.
205 */ 205 */
206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
207 continue; 207 continue;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5fb9dd7f08bb..3608b1b5ecf7 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -75,9 +75,21 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
75 unsigned char *initiatorname) 75 unsigned char *initiatorname)
76{ 76{
77 struct se_node_acl *acl; 77 struct se_node_acl *acl;
78 78 /*
79 * Obtain se_node_acl->acl_kref using fabric driver provided
80 * initiatorname[] during node acl endpoint lookup driven by
81 * new se_session login.
82 *
83 * The reference is held until se_session shutdown -> release
84 * occurs via fabric driver invoked transport_deregister_session()
85 * or transport_free_session() code.
86 */
79 mutex_lock(&tpg->acl_node_mutex); 87 mutex_lock(&tpg->acl_node_mutex);
80 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
92 }
81 mutex_unlock(&tpg->acl_node_mutex); 93 mutex_unlock(&tpg->acl_node_mutex);
82 94
83 return acl; 95 return acl;
@@ -157,28 +169,25 @@ void core_tpg_add_node_to_devs(
157 mutex_unlock(&tpg->tpg_lun_mutex); 169 mutex_unlock(&tpg->tpg_lun_mutex);
158} 170}
159 171
160/* core_set_queue_depth_for_node(): 172static void
161 * 173target_set_nacl_queue_depth(struct se_portal_group *tpg,
162 * 174 struct se_node_acl *acl, u32 queue_depth)
163 */
164static int core_set_queue_depth_for_node(
165 struct se_portal_group *tpg,
166 struct se_node_acl *acl)
167{ 175{
176 acl->queue_depth = queue_depth;
177
168 if (!acl->queue_depth) { 178 if (!acl->queue_depth) {
169 pr_err("Queue depth for %s Initiator Node: %s is 0," 179 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
170 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 180 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
171 acl->initiatorname); 181 acl->initiatorname);
172 acl->queue_depth = 1; 182 acl->queue_depth = 1;
173 } 183 }
174
175 return 0;
176} 184}
177 185
178static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, 186static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
179 const unsigned char *initiatorname) 187 const unsigned char *initiatorname)
180{ 188{
181 struct se_node_acl *acl; 189 struct se_node_acl *acl;
190 u32 queue_depth;
182 191
183 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), 192 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
184 GFP_KERNEL); 193 GFP_KERNEL);
@@ -193,24 +202,20 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
193 spin_lock_init(&acl->nacl_sess_lock); 202 spin_lock_init(&acl->nacl_sess_lock);
194 mutex_init(&acl->lun_entry_mutex); 203 mutex_init(&acl->lun_entry_mutex);
195 atomic_set(&acl->acl_pr_ref_count, 0); 204 atomic_set(&acl->acl_pr_ref_count, 0);
205
196 if (tpg->se_tpg_tfo->tpg_get_default_depth) 206 if (tpg->se_tpg_tfo->tpg_get_default_depth)
197 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 207 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
198 else 208 else
199 acl->queue_depth = 1; 209 queue_depth = 1;
210 target_set_nacl_queue_depth(tpg, acl, queue_depth);
211
200 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 212 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
201 acl->se_tpg = tpg; 213 acl->se_tpg = tpg;
202 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 214 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
203 215
204 tpg->se_tpg_tfo->set_default_node_attributes(acl); 216 tpg->se_tpg_tfo->set_default_node_attributes(acl);
205 217
206 if (core_set_queue_depth_for_node(tpg, acl) < 0)
207 goto out_free_acl;
208
209 return acl; 218 return acl;
210
211out_free_acl:
212 kfree(acl);
213 return NULL;
214} 219}
215 220
216static void target_add_node_acl(struct se_node_acl *acl) 221static void target_add_node_acl(struct se_node_acl *acl)
@@ -219,7 +224,6 @@ static void target_add_node_acl(struct se_node_acl *acl)
219 224
220 mutex_lock(&tpg->acl_node_mutex); 225 mutex_lock(&tpg->acl_node_mutex);
221 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 226 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
222 tpg->num_node_acls++;
223 mutex_unlock(&tpg->acl_node_mutex); 227 mutex_unlock(&tpg->acl_node_mutex);
224 228
225 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 229 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
@@ -232,6 +236,25 @@ static void target_add_node_acl(struct se_node_acl *acl)
232 acl->initiatorname); 236 acl->initiatorname);
233} 237}
234 238
239bool target_tpg_has_node_acl(struct se_portal_group *tpg,
240 const char *initiatorname)
241{
242 struct se_node_acl *acl;
243 bool found = false;
244
245 mutex_lock(&tpg->acl_node_mutex);
246 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
247 if (!strcmp(acl->initiatorname, initiatorname)) {
248 found = true;
249 break;
250 }
251 }
252 mutex_unlock(&tpg->acl_node_mutex);
253
254 return found;
255}
256EXPORT_SYMBOL(target_tpg_has_node_acl);
257
235struct se_node_acl *core_tpg_check_initiator_node_acl( 258struct se_node_acl *core_tpg_check_initiator_node_acl(
236 struct se_portal_group *tpg, 259 struct se_portal_group *tpg,
237 unsigned char *initiatorname) 260 unsigned char *initiatorname)
@@ -248,6 +271,15 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
248 acl = target_alloc_node_acl(tpg, initiatorname); 271 acl = target_alloc_node_acl(tpg, initiatorname);
249 if (!acl) 272 if (!acl)
250 return NULL; 273 return NULL;
274 /*
275 * When allocating a dynamically generated node_acl, go ahead
276 * and take the extra kref now before returning to the fabric
277 * driver caller.
278 *
279 * Note this reference will be released at session shutdown
280 * time within transport_free_session() code.
281 */
282 kref_get(&acl->acl_kref);
251 acl->dynamic_node_acl = 1; 283 acl->dynamic_node_acl = 1;
252 284
253 /* 285 /*
@@ -318,7 +350,6 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
318 acl->dynamic_node_acl = 0; 350 acl->dynamic_node_acl = 0;
319 } 351 }
320 list_del(&acl->acl_list); 352 list_del(&acl->acl_list);
321 tpg->num_node_acls--;
322 mutex_unlock(&tpg->acl_node_mutex); 353 mutex_unlock(&tpg->acl_node_mutex);
323 354
324 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 355 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
@@ -329,7 +360,8 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
329 if (sess->sess_tearing_down != 0) 360 if (sess->sess_tearing_down != 0)
330 continue; 361 continue;
331 362
332 target_get_session(sess); 363 if (!target_get_session(sess))
364 continue;
333 list_move(&sess->sess_acl_list, &sess_list); 365 list_move(&sess->sess_acl_list, &sess_list);
334 } 366 }
335 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 367 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
@@ -366,108 +398,52 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
366 * 398 *
367 */ 399 */
368int core_tpg_set_initiator_node_queue_depth( 400int core_tpg_set_initiator_node_queue_depth(
369 struct se_portal_group *tpg, 401 struct se_node_acl *acl,
370 unsigned char *initiatorname, 402 u32 queue_depth)
371 u32 queue_depth,
372 int force)
373{ 403{
374 struct se_session *sess, *init_sess = NULL; 404 LIST_HEAD(sess_list);
375 struct se_node_acl *acl; 405 struct se_portal_group *tpg = acl->se_tpg;
406 struct se_session *sess, *sess_tmp;
376 unsigned long flags; 407 unsigned long flags;
377 int dynamic_acl = 0; 408 int rc;
378
379 mutex_lock(&tpg->acl_node_mutex);
380 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
381 if (!acl) {
382 pr_err("Access Control List entry for %s Initiator"
383 " Node %s does not exists for TPG %hu, ignoring"
384 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
385 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
386 mutex_unlock(&tpg->acl_node_mutex);
387 return -ENODEV;
388 }
389 if (acl->dynamic_node_acl) {
390 acl->dynamic_node_acl = 0;
391 dynamic_acl = 1;
392 }
393 mutex_unlock(&tpg->acl_node_mutex);
394
395 spin_lock_irqsave(&tpg->session_lock, flags);
396 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
397 if (sess->se_node_acl != acl)
398 continue;
399
400 if (!force) {
401 pr_err("Unable to change queue depth for %s"
402 " Initiator Node: %s while session is"
403 " operational. To forcefully change the queue"
404 " depth and force session reinstatement"
405 " use the \"force=1\" parameter.\n",
406 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
407 spin_unlock_irqrestore(&tpg->session_lock, flags);
408
409 mutex_lock(&tpg->acl_node_mutex);
410 if (dynamic_acl)
411 acl->dynamic_node_acl = 1;
412 mutex_unlock(&tpg->acl_node_mutex);
413 return -EEXIST;
414 }
415 /*
416 * Determine if the session needs to be closed by our context.
417 */
418 if (!tpg->se_tpg_tfo->shutdown_session(sess))
419 continue;
420
421 init_sess = sess;
422 break;
423 }
424 409
425 /* 410 /*
426 * User has requested to change the queue depth for a Initiator Node. 411 * User has requested to change the queue depth for a Initiator Node.
427 * Change the value in the Node's struct se_node_acl, and call 412 * Change the value in the Node's struct se_node_acl, and call
428 * core_set_queue_depth_for_node() to add the requested queue depth. 413 * target_set_nacl_queue_depth() to set the new queue depth.
429 *
430 * Finally call tpg->se_tpg_tfo->close_session() to force session
431 * reinstatement to occur if there is an active session for the
432 * $FABRIC_MOD Initiator Node in question.
433 */ 414 */
434 acl->queue_depth = queue_depth; 415 target_set_nacl_queue_depth(tpg, acl, queue_depth);
416
417 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
418 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
419 sess_acl_list) {
420 if (sess->sess_tearing_down != 0)
421 continue;
422 if (!target_get_session(sess))
423 continue;
424 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
435 425
436 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
437 spin_unlock_irqrestore(&tpg->session_lock, flags);
438 /* 426 /*
439 * Force session reinstatement if 427 * Finally call tpg->se_tpg_tfo->close_session() to force session
440 * core_set_queue_depth_for_node() failed, because we assume 428 * reinstatement to occur if there is an active session for the
441 * the $FABRIC_MOD has already the set session reinstatement 429 * $FABRIC_MOD Initiator Node in question.
442 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
443 */ 430 */
444 if (init_sess) 431 rc = tpg->se_tpg_tfo->shutdown_session(sess);
445 tpg->se_tpg_tfo->close_session(init_sess); 432 target_put_session(sess);
446 433 if (!rc) {
447 mutex_lock(&tpg->acl_node_mutex); 434 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
448 if (dynamic_acl) 435 continue;
449 acl->dynamic_node_acl = 1; 436 }
450 mutex_unlock(&tpg->acl_node_mutex); 437 target_put_session(sess);
451 return -EINVAL; 438 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
452 } 439 }
453 spin_unlock_irqrestore(&tpg->session_lock, flags); 440 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
454 /*
455 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
456 * forcefully shutdown the $FABRIC_MOD session/nexus.
457 */
458 if (init_sess)
459 tpg->se_tpg_tfo->close_session(init_sess);
460 441
461 pr_debug("Successfully changed queue depth to: %d for Initiator" 442 pr_debug("Successfully changed queue depth to: %d for Initiator"
462 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 443 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
463 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 444 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
464 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 445 tpg->se_tpg_tfo->tpg_get_tag(tpg));
465 446
466 mutex_lock(&tpg->acl_node_mutex);
467 if (dynamic_acl)
468 acl->dynamic_node_acl = 1;
469 mutex_unlock(&tpg->acl_node_mutex);
470
471 return 0; 447 return 0;
472} 448}
473EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 449EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
@@ -595,7 +571,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
595 */ 571 */
596 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 572 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
597 list_del(&nacl->acl_list); 573 list_del(&nacl->acl_list);
598 se_tpg->num_node_acls--;
599 574
600 core_tpg_wait_for_nacl_pr_ref(nacl); 575 core_tpg_wait_for_nacl_pr_ref(nacl);
601 core_free_device_list_for_node(nacl, se_tpg); 576 core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4fdcee2006d1..9f3608e10f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -341,7 +341,6 @@ void __transport_register_session(
341 &buf[0], PR_REG_ISID_LEN); 341 &buf[0], PR_REG_ISID_LEN);
342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
343 } 343 }
344 kref_get(&se_nacl->acl_kref);
345 344
346 spin_lock_irq(&se_nacl->nacl_sess_lock); 345 spin_lock_irq(&se_nacl->nacl_sess_lock);
347 /* 346 /*
@@ -384,9 +383,9 @@ static void target_release_session(struct kref *kref)
384 se_tpg->se_tpg_tfo->close_session(se_sess); 383 se_tpg->se_tpg_tfo->close_session(se_sess);
385} 384}
386 385
387void target_get_session(struct se_session *se_sess) 386int target_get_session(struct se_session *se_sess)
388{ 387{
389 kref_get(&se_sess->sess_kref); 388 return kref_get_unless_zero(&se_sess->sess_kref);
390} 389}
391EXPORT_SYMBOL(target_get_session); 390EXPORT_SYMBOL(target_get_session);
392 391
@@ -432,6 +431,7 @@ void target_put_nacl(struct se_node_acl *nacl)
432{ 431{
433 kref_put(&nacl->acl_kref, target_complete_nacl); 432 kref_put(&nacl->acl_kref, target_complete_nacl);
434} 433}
434EXPORT_SYMBOL(target_put_nacl);
435 435
436void transport_deregister_session_configfs(struct se_session *se_sess) 436void transport_deregister_session_configfs(struct se_session *se_sess)
437{ 437{
@@ -464,6 +464,15 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
464 464
465void transport_free_session(struct se_session *se_sess) 465void transport_free_session(struct se_session *se_sess)
466{ 466{
467 struct se_node_acl *se_nacl = se_sess->se_node_acl;
468 /*
469 * Drop the se_node_acl->nacl_kref obtained from within
470 * core_tpg_get_initiator_node_acl().
471 */
472 if (se_nacl) {
473 se_sess->se_node_acl = NULL;
474 target_put_nacl(se_nacl);
475 }
467 if (se_sess->sess_cmd_map) { 476 if (se_sess->sess_cmd_map) {
468 percpu_ida_destroy(&se_sess->sess_tag_pool); 477 percpu_ida_destroy(&se_sess->sess_tag_pool);
469 kvfree(se_sess->sess_cmd_map); 478 kvfree(se_sess->sess_cmd_map);
@@ -478,7 +487,7 @@ void transport_deregister_session(struct se_session *se_sess)
478 const struct target_core_fabric_ops *se_tfo; 487 const struct target_core_fabric_ops *se_tfo;
479 struct se_node_acl *se_nacl; 488 struct se_node_acl *se_nacl;
480 unsigned long flags; 489 unsigned long flags;
481 bool comp_nacl = true, drop_nacl = false; 490 bool drop_nacl = false;
482 491
483 if (!se_tpg) { 492 if (!se_tpg) {
484 transport_free_session(se_sess); 493 transport_free_session(se_sess);
@@ -502,7 +511,6 @@ void transport_deregister_session(struct se_session *se_sess)
502 if (se_nacl && se_nacl->dynamic_node_acl) { 511 if (se_nacl && se_nacl->dynamic_node_acl) {
503 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 512 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
504 list_del(&se_nacl->acl_list); 513 list_del(&se_nacl->acl_list);
505 se_tpg->num_node_acls--;
506 drop_nacl = true; 514 drop_nacl = true;
507 } 515 }
508 } 516 }
@@ -511,18 +519,16 @@ void transport_deregister_session(struct se_session *se_sess)
511 if (drop_nacl) { 519 if (drop_nacl) {
512 core_tpg_wait_for_nacl_pr_ref(se_nacl); 520 core_tpg_wait_for_nacl_pr_ref(se_nacl);
513 core_free_device_list_for_node(se_nacl, se_tpg); 521 core_free_device_list_for_node(se_nacl, se_tpg);
522 se_sess->se_node_acl = NULL;
514 kfree(se_nacl); 523 kfree(se_nacl);
515 comp_nacl = false;
516 } 524 }
517 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 525 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
518 se_tpg->se_tpg_tfo->get_fabric_name()); 526 se_tpg->se_tpg_tfo->get_fabric_name());
519 /* 527 /*
520 * If last kref is dropping now for an explicit NodeACL, awake sleeping 528 * If last kref is dropping now for an explicit NodeACL, awake sleeping
521 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 529 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
522 * removal context. 530 * removal context from within transport_free_session() code.
523 */ 531 */
524 if (se_nacl && comp_nacl)
525 target_put_nacl(se_nacl);
526 532
527 transport_free_session(se_sess); 533 transport_free_session(se_sess);
528} 534}
@@ -715,7 +721,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
715 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 721 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
717 723
718 queue_work(target_completion_wq, &cmd->work); 724 if (cmd->cpuid == -1)
725 queue_work(target_completion_wq, &cmd->work);
726 else
727 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
719} 728}
720EXPORT_SYMBOL(target_complete_cmd); 729EXPORT_SYMBOL(target_complete_cmd);
721 730
@@ -1309,7 +1318,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1309 1318
1310/* 1319/*
1311 * Used by fabric module frontends to queue tasks directly. 1320 * Used by fabric module frontends to queue tasks directly.
1312 * Many only be used from process context only 1321 * May only be used from process context.
1313 */ 1322 */
1314int transport_handle_cdb_direct( 1323int transport_handle_cdb_direct(
1315 struct se_cmd *cmd) 1324 struct se_cmd *cmd)
@@ -1582,7 +1591,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
1582int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1591int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1583 unsigned char *sense, u64 unpacked_lun, 1592 unsigned char *sense, u64 unpacked_lun,
1584 void *fabric_tmr_ptr, unsigned char tm_type, 1593 void *fabric_tmr_ptr, unsigned char tm_type,
1585 gfp_t gfp, unsigned int tag, int flags) 1594 gfp_t gfp, u64 tag, int flags)
1586{ 1595{
1587 struct se_portal_group *se_tpg; 1596 struct se_portal_group *se_tpg;
1588 int ret; 1597 int ret;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5e6d6cb348fc..dd600e5ead71 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -152,6 +152,7 @@ static struct genl_family tcmu_genl_family = {
152 .maxattr = TCMU_ATTR_MAX, 152 .maxattr = TCMU_ATTR_MAX,
153 .mcgrps = tcmu_mcgrps, 153 .mcgrps = tcmu_mcgrps,
154 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 154 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
155 .netnsok = true,
155}; 156};
156 157
157static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 158static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -194,7 +195,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
194 195
195static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 196static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
196{ 197{
197 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; 198 unsigned long offset = offset_in_page(vaddr);
198 199
199 size = round_up(size+offset, PAGE_SIZE); 200 size = round_up(size+offset, PAGE_SIZE);
200 vaddr -= offset; 201 vaddr -= offset;
@@ -840,7 +841,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
840 841
841 genlmsg_end(skb, msg_header); 842 genlmsg_end(skb, msg_header);
842 843
843 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, 844 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
844 TCMU_MCGRP_CONFIG, GFP_KERNEL); 845 TCMU_MCGRP_CONFIG, GFP_KERNEL);
845 846
846 /* We don't care if no one is listening */ 847 /* We don't care if no one is listening */
@@ -917,8 +918,10 @@ static int tcmu_configure_device(struct se_device *dev)
917 if (ret) 918 if (ret)
918 goto err_register; 919 goto err_register;
919 920
921 /* User can set hw_block_size before enable the device */
922 if (dev->dev_attrib.hw_block_size == 0)
923 dev->dev_attrib.hw_block_size = 512;
920 /* Other attributes can be configured in userspace */ 924 /* Other attributes can be configured in userspace */
921 dev->dev_attrib.hw_block_size = 512;
922 dev->dev_attrib.hw_max_sectors = 128; 925 dev->dev_attrib.hw_max_sectors = 128;
923 dev->dev_attrib.hw_queue_depth = 128; 926 dev->dev_attrib.hw_queue_depth = 128;
924 927
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 39909dadef3e..c30003bd4ff0 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -166,7 +166,6 @@ void ft_aborted_task(struct se_cmd *);
166 */ 166 */
167void ft_recv_req(struct ft_sess *, struct fc_frame *); 167void ft_recv_req(struct ft_sess *, struct fc_frame *);
168struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 168struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
169struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
170 169
171void ft_recv_write_data(struct ft_cmd *, struct fc_frame *); 170void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
172void ft_dump_cmd(struct ft_cmd *, const char *caller); 171void ft_dump_cmd(struct ft_cmd *, const char *caller);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 85aeaa0ad303..4d375e95841b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -171,9 +171,31 @@ static ssize_t ft_nacl_node_name_store(struct config_item *item,
171CONFIGFS_ATTR(ft_nacl_, node_name); 171CONFIGFS_ATTR(ft_nacl_, node_name);
172CONFIGFS_ATTR(ft_nacl_, port_name); 172CONFIGFS_ATTR(ft_nacl_, port_name);
173 173
174static ssize_t ft_nacl_tag_show(struct config_item *item,
175 char *page)
176{
177 return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
178}
179
180static ssize_t ft_nacl_tag_store(struct config_item *item,
181 const char *page, size_t count)
182{
183 struct se_node_acl *se_nacl = acl_to_nacl(item);
184 int ret;
185
186 ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
187
188 if (ret < 0)
189 return ret;
190 return count;
191}
192
193CONFIGFS_ATTR(ft_nacl_, tag);
194
174static struct configfs_attribute *ft_nacl_base_attrs[] = { 195static struct configfs_attribute *ft_nacl_base_attrs[] = {
175 &ft_nacl_attr_port_name, 196 &ft_nacl_attr_port_name,
176 &ft_nacl_attr_node_name, 197 &ft_nacl_attr_node_name,
198 &ft_nacl_attr_tag,
177 NULL, 199 NULL,
178}; 200};
179 201
@@ -198,31 +220,6 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
198 return 0; 220 return 0;
199} 221}
200 222
201struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
202{
203 struct ft_node_acl *found = NULL;
204 struct ft_node_acl *acl;
205 struct se_portal_group *se_tpg = &tpg->se_tpg;
206 struct se_node_acl *se_acl;
207
208 mutex_lock(&se_tpg->acl_node_mutex);
209 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
210 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
211 pr_debug("acl %p port_name %llx\n",
212 acl, (unsigned long long)acl->node_auth.port_name);
213 if (acl->node_auth.port_name == rdata->ids.port_name ||
214 acl->node_auth.node_name == rdata->ids.node_name) {
215 pr_debug("acl %p port_name %llx matched\n", acl,
216 (unsigned long long)rdata->ids.port_name);
217 found = acl;
218 /* XXX need to hold onto ACL */
219 break;
220 }
221 }
222 mutex_unlock(&se_tpg->acl_node_mutex);
223 return found;
224}
225
226/* 223/*
227 * local_port port_group (tpg) ops. 224 * local_port port_group (tpg) ops.
228 */ 225 */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 847c1aa6fbf4..6f7c65abfe2a 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -154,9 +154,9 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
154 BUG_ON(!page); 154 BUG_ON(!page);
155 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); 155 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
156 page_addr = from; 156 page_addr = from;
157 from += mem_off & ~PAGE_MASK; 157 from += offset_in_page(mem_off);
158 tlen = min(tlen, (size_t)(PAGE_SIZE - 158 tlen = min(tlen, (size_t)(PAGE_SIZE -
159 (mem_off & ~PAGE_MASK))); 159 offset_in_page(mem_off)));
160 memcpy(to, from, tlen); 160 memcpy(to, from, tlen);
161 kunmap_atomic(page_addr); 161 kunmap_atomic(page_addr);
162 to += tlen; 162 to += tlen;
@@ -314,9 +314,9 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
314 314
315 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); 315 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
316 page_addr = to; 316 page_addr = to;
317 to += mem_off & ~PAGE_MASK; 317 to += offset_in_page(mem_off);
318 tlen = min(tlen, (size_t)(PAGE_SIZE - 318 tlen = min(tlen, (size_t)(PAGE_SIZE -
319 (mem_off & ~PAGE_MASK))); 319 offset_in_page(mem_off)));
320 memcpy(to, from, tlen); 320 memcpy(to, from, tlen);
321 kunmap_atomic(page_addr); 321 kunmap_atomic(page_addr);
322 322
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 7b934eac995d..e19f4c58c6fa 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -191,10 +191,15 @@ out:
191 * Caller holds ft_lport_lock. 191 * Caller holds ft_lport_lock.
192 */ 192 */
193static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, 193static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
194 struct ft_node_acl *acl) 194 struct fc_rport_priv *rdata)
195{ 195{
196 struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
197 struct se_node_acl *se_acl;
196 struct ft_sess *sess; 198 struct ft_sess *sess;
197 struct hlist_head *head; 199 struct hlist_head *head;
200 unsigned char initiatorname[TRANSPORT_IQN_LEN];
201
202 ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name);
198 203
199 head = &tport->hash[ft_sess_hash(port_id)]; 204 head = &tport->hash[ft_sess_hash(port_id)];
200 hlist_for_each_entry_rcu(sess, head, hash) 205 hlist_for_each_entry_rcu(sess, head, hash)
@@ -212,7 +217,14 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
212 kfree(sess); 217 kfree(sess);
213 return NULL; 218 return NULL;
214 } 219 }
215 sess->se_sess->se_node_acl = &acl->se_node_acl; 220
221 se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
222 if (!se_acl) {
223 transport_free_session(sess->se_sess);
224 kfree(sess);
225 return NULL;
226 }
227 sess->se_sess->se_node_acl = se_acl;
216 sess->tport = tport; 228 sess->tport = tport;
217 sess->port_id = port_id; 229 sess->port_id = port_id;
218 kref_init(&sess->kref); /* ref for table entry */ 230 kref_init(&sess->kref); /* ref for table entry */
@@ -221,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
221 233
222 pr_debug("port_id %x sess %p\n", port_id, sess); 234 pr_debug("port_id %x sess %p\n", port_id, sess);
223 235
224 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, 236 transport_register_session(&tport->tpg->se_tpg, se_acl,
225 sess->se_sess, sess); 237 sess->se_sess, sess);
226 return sess; 238 return sess;
227} 239}
@@ -260,6 +272,14 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
260 return NULL; 272 return NULL;
261} 273}
262 274
275static void ft_close_sess(struct ft_sess *sess)
276{
277 transport_deregister_session_configfs(sess->se_sess);
278 target_sess_cmd_list_set_waiting(sess->se_sess);
279 target_wait_for_sess_cmds(sess->se_sess);
280 ft_sess_put(sess);
281}
282
263/* 283/*
264 * Delete all sessions from tport. 284 * Delete all sessions from tport.
265 * Caller holds ft_lport_lock. 285 * Caller holds ft_lport_lock.
@@ -273,8 +293,7 @@ static void ft_sess_delete_all(struct ft_tport *tport)
273 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { 293 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
274 hlist_for_each_entry_rcu(sess, head, hash) { 294 hlist_for_each_entry_rcu(sess, head, hash) {
275 ft_sess_unhash(sess); 295 ft_sess_unhash(sess);
276 transport_deregister_session_configfs(sess->se_sess); 296 ft_close_sess(sess); /* release from table */
277 ft_sess_put(sess); /* release from table */
278 } 297 }
279 } 298 }
280} 299}
@@ -313,8 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
313 pr_debug("port_id %x\n", port_id); 332 pr_debug("port_id %x\n", port_id);
314 ft_sess_unhash(sess); 333 ft_sess_unhash(sess);
315 mutex_unlock(&ft_lport_lock); 334 mutex_unlock(&ft_lport_lock);
316 transport_deregister_session_configfs(se_sess); 335 ft_close_sess(sess);
317 ft_sess_put(sess);
318 /* XXX Send LOGO or PRLO */ 336 /* XXX Send LOGO or PRLO */
319 synchronize_rcu(); /* let transport deregister happen */ 337 synchronize_rcu(); /* let transport deregister happen */
320} 338}
@@ -343,17 +361,12 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
343{ 361{
344 struct ft_tport *tport; 362 struct ft_tport *tport;
345 struct ft_sess *sess; 363 struct ft_sess *sess;
346 struct ft_node_acl *acl;
347 u32 fcp_parm; 364 u32 fcp_parm;
348 365
349 tport = ft_tport_get(rdata->local_port); 366 tport = ft_tport_get(rdata->local_port);
350 if (!tport) 367 if (!tport)
351 goto not_target; /* not a target for this local port */ 368 goto not_target; /* not a target for this local port */
352 369
353 acl = ft_acl_get(tport->tpg, rdata);
354 if (!acl)
355 goto not_target; /* no target for this remote */
356
357 if (!rspp) 370 if (!rspp)
358 goto fill; 371 goto fill;
359 372
@@ -375,7 +388,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
375 spp->spp_flags |= FC_SPP_EST_IMG_PAIR; 388 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
376 if (!(fcp_parm & FCP_SPPF_INIT_FCN)) 389 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
377 return FC_SPP_RESP_CONF; 390 return FC_SPP_RESP_CONF;
378 sess = ft_sess_create(tport, rdata->ids.port_id, acl); 391 sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
379 if (!sess) 392 if (!sess)
380 return FC_SPP_RESP_RES; 393 return FC_SPP_RESP_RES;
381 if (!sess->params) 394 if (!sess->params)
@@ -460,8 +473,7 @@ static void ft_prlo(struct fc_rport_priv *rdata)
460 return; 473 return;
461 } 474 }
462 mutex_unlock(&ft_lport_lock); 475 mutex_unlock(&ft_lport_lock);
463 transport_deregister_session_configfs(sess->se_sess); 476 ft_close_sess(sess); /* release from table */
464 ft_sess_put(sess); /* release from table */
465 rdata->prli_count--; 477 rdata->prli_count--;
466 /* XXX TBD - clearing actions. unit attn, see 4.10 */ 478 /* XXX TBD - clearing actions. unit attn, see 4.10 */
467} 479}