aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py18
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c99
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c56
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h2
-rw-r--r--drivers/target/iscsi/iscsi_target.c90
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c86
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_alua.c150
-rw-r--r--drivers/target/target_core_alua.h33
-rw-r--r--drivers/target/target_core_configfs.c123
-rw-r--r--drivers/target/target_core_device.c35
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c43
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c12
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c41
-rw-r--r--drivers/target/target_core_transport.c244
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c18
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c18
-rw-r--r--drivers/vhost/scsi.c18
-rw-r--r--include/target/target_core_backend.h5
-rw-r--r--include/target/target_core_base.h84
-rw-r--r--include/target/target_core_configfs.h1
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--lib/Makefile4
-rw-r--r--lib/percpu_ida.c5
53 files changed, 1009 insertions, 789 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 54d29c1320ed..230ce71f4d75 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -440,15 +440,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
440 buf += " /*\n" 440 buf += " /*\n"
441 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" 441 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
442 buf += " */\n" 442 buf += " */\n"
443 buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 443 buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
444 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" 444 buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
445 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" 445 buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
446 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" 446 buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
447 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" 447 buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
448 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" 448 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
449 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" 449 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
450 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" 450 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
451 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" 451 buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
452 buf += " /*\n" 452 buf += " /*\n"
453 buf += " * Register the fabric for use within TCM\n" 453 buf += " * Register the fabric for use within TCM\n"
454 buf += " */\n" 454 buf += " */\n"
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6df23502059a..6be57c38638d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,6 +22,7 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/in.h> 23#include <linux/in.h>
24#include <linux/in6.h> 24#include <linux/in6.h>
25#include <linux/llist.h>
25#include <rdma/ib_verbs.h> 26#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h> 27#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h> 28#include <target/target_core_base.h>
@@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
489 kref_init(&isert_conn->conn_kref); 490 kref_init(&isert_conn->conn_kref);
490 kref_get(&isert_conn->conn_kref); 491 kref_get(&isert_conn->conn_kref);
491 mutex_init(&isert_conn->conn_mutex); 492 mutex_init(&isert_conn->conn_mutex);
493 mutex_init(&isert_conn->conn_comp_mutex);
492 spin_lock_init(&isert_conn->conn_lock); 494 spin_lock_init(&isert_conn->conn_lock);
493 495
494 cma_id->context = isert_conn; 496 cma_id->context = isert_conn;
@@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
843} 845}
844 846
845static void 847static void
846isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) 848isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
849 struct ib_send_wr *send_wr, bool coalesce)
847{ 850{
851 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
852
848 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 853 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
849 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 854 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
850 send_wr->opcode = IB_WR_SEND; 855 send_wr->opcode = IB_WR_SEND;
851 send_wr->send_flags = IB_SEND_SIGNALED; 856 send_wr->sg_list = &tx_desc->tx_sg[0];
852 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
853 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 857 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
858 /*
859 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
860 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
861 */
862 mutex_lock(&isert_conn->conn_comp_mutex);
863 if (coalesce &&
864 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
865 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
866 mutex_unlock(&isert_conn->conn_comp_mutex);
867 return;
868 }
869 isert_conn->conn_comp_batch = 0;
870 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
871 mutex_unlock(&isert_conn->conn_comp_mutex);
872
873 send_wr->send_flags = IB_SEND_SIGNALED;
854} 874}
855 875
856static int 876static int
@@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1582} 1602}
1583 1603
1584static void 1604static void
1585isert_send_completion(struct iser_tx_desc *tx_desc, 1605__isert_send_completion(struct iser_tx_desc *tx_desc,
1586 struct isert_conn *isert_conn) 1606 struct isert_conn *isert_conn)
1587{ 1607{
1588 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1608 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1589 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1609 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
@@ -1624,6 +1644,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1624} 1644}
1625 1645
1626static void 1646static void
1647isert_send_completion(struct iser_tx_desc *tx_desc,
1648 struct isert_conn *isert_conn)
1649{
1650 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1651 struct iser_tx_desc *t;
1652 /*
1653 * Drain coalesced completion llist starting from comp_llnode_batch
1654 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1655 */
1656 while (llnode) {
1657 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1658 llnode = llist_next(llnode);
1659 __isert_send_completion(t, isert_conn);
1660 }
1661 __isert_send_completion(tx_desc, isert_conn);
1662}
1663
1664static void
1627isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1665isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1628{ 1666{
1629 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1667 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1793 isert_cmd->tx_desc.num_sge = 2; 1831 isert_cmd->tx_desc.num_sge = 2;
1794 } 1832 }
1795 1833
1796 isert_init_send_wr(isert_cmd, send_wr); 1834 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1797 1835
1798 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1836 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1799 1837
@@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1813 &isert_cmd->tx_desc.iscsi_header, 1851 &isert_cmd->tx_desc.iscsi_header,
1814 nopout_response); 1852 nopout_response);
1815 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1853 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1816 isert_init_send_wr(isert_cmd, send_wr); 1854 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1817 1855
1818 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1856 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1819 1857
@@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1831 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 1869 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1832 &isert_cmd->tx_desc.iscsi_header); 1870 &isert_cmd->tx_desc.iscsi_header);
1833 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1834 isert_init_send_wr(isert_cmd, send_wr); 1872 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1835 1873
1836 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1874 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1837 1875
@@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1849 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 1887 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1850 &isert_cmd->tx_desc.iscsi_header); 1888 &isert_cmd->tx_desc.iscsi_header);
1851 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 1889 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1852 isert_init_send_wr(isert_cmd, send_wr); 1890 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1853 1891
1854 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1892 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1855 1893
@@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1881 tx_dsg->lkey = isert_conn->conn_mr->lkey; 1919 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1882 isert_cmd->tx_desc.num_sge = 2; 1920 isert_cmd->tx_desc.num_sge = 2;
1883 1921
1884 isert_init_send_wr(isert_cmd, send_wr); 1922 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1885 1923
1886 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1924 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1887 1925
@@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1921 tx_dsg->lkey = isert_conn->conn_mr->lkey; 1959 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1922 isert_cmd->tx_desc.num_sge = 2; 1960 isert_cmd->tx_desc.num_sge = 2;
1923 } 1961 }
1924 isert_init_send_wr(isert_cmd, send_wr); 1962 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1925 1963
1926 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 1964 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1927 1965
@@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1991 2029
1992 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2030 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1993 data_left = se_cmd->data_length; 2031 data_left = se_cmd->data_length;
1994 iscsit_increment_maxcmdsn(cmd, conn->sess);
1995 cmd->stat_sn = conn->stat_sn++;
1996 } else { 2032 } else {
1997 sg_off = cmd->write_data_done / PAGE_SIZE; 2033 sg_off = cmd->write_data_done / PAGE_SIZE;
1998 data_left = se_cmd->data_length - cmd->write_data_done; 2034 data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2204 2240
2205 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2241 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2206 data_left = se_cmd->data_length; 2242 data_left = se_cmd->data_length;
2207 iscsit_increment_maxcmdsn(cmd, conn->sess);
2208 cmd->stat_sn = conn->stat_sn++;
2209 } else { 2243 } else {
2210 sg_off = cmd->write_data_done / PAGE_SIZE; 2244 sg_off = cmd->write_data_done / PAGE_SIZE;
2211 data_left = se_cmd->data_length - cmd->write_data_done; 2245 data_left = se_cmd->data_length - cmd->write_data_done;
@@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2259 data_len = min(data_left, rdma_write_max); 2293 data_len = min(data_left, rdma_write_max);
2260 wr->cur_rdma_length = data_len; 2294 wr->cur_rdma_length = data_len;
2261 2295
2262 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2296 /* if there is a single dma entry, dma mr is sufficient */
2263 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2297 if (count == 1) {
2264 struct fast_reg_descriptor, list); 2298 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2265 list_del(&fr_desc->list); 2299 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2266 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2300 ib_sge->lkey = isert_conn->conn_mr->lkey;
2267 wr->fr_desc = fr_desc; 2301 wr->fr_desc = NULL;
2302 } else {
2303 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2304 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2305 struct fast_reg_descriptor, list);
2306 list_del(&fr_desc->list);
2307 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2308 wr->fr_desc = fr_desc;
2268 2309
2269 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2310 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2270 ib_sge, offset, data_len); 2311 ib_sge, offset, data_len);
2271 if (ret) { 2312 if (ret) {
2272 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2313 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2273 goto unmap_sg; 2314 goto unmap_sg;
2315 }
2274 } 2316 }
2275 2317
2276 return 0; 2318 return 0;
@@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2306 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2348 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2307 */ 2349 */
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2350 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) 2351 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2310 &isert_cmd->tx_desc.iscsi_header); 2352 &isert_cmd->tx_desc.iscsi_header);
2311 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2353 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); 2354 isert_init_send_wr(isert_conn, isert_cmd,
2355 &isert_cmd->tx_desc.send_wr, true);
2313 2356
2314 atomic_inc(&isert_conn->post_send_buf_count); 2357 atomic_inc(&isert_conn->post_send_buf_count);
2315 2358
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 631f2090f0b8..691f90ff2d83 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -43,6 +43,8 @@ struct iser_tx_desc {
43 struct ib_sge tx_sg[2]; 43 struct ib_sge tx_sg[2];
44 int num_sge; 44 int num_sge;
45 struct isert_cmd *isert_cmd; 45 struct isert_cmd *isert_cmd;
46 struct llist_node *comp_llnode_batch;
47 struct llist_node comp_llnode;
46 struct ib_send_wr send_wr; 48 struct ib_send_wr send_wr;
47} __packed; 49} __packed;
48 50
@@ -121,6 +123,10 @@ struct isert_conn {
121 int conn_frwr_pool_size; 123 int conn_frwr_pool_size;
122 /* lock to protect frwr_pool */ 124 /* lock to protect frwr_pool */
123 spinlock_t conn_lock; 125 spinlock_t conn_lock;
126#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch;
128 struct llist_head conn_comp_llist;
129 struct mutex conn_comp_mutex;
124}; 130};
125 131
126#define ISERT_MAX_CQ 64 132#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6c923c7039a1..520a7e5a490b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1352 1352
1353 /* XXX(hch): this is a horrible layering violation.. */ 1353 /* XXX(hch): this is a horrible layering violation.. */
1354 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1354 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1355 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1356 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; 1355 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
1357 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1356 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1358
1359 complete(&ioctx->cmd.transport_lun_stop_comp);
1360 break; 1357 break;
1361 case SRPT_STATE_CMD_RSP_SENT: 1358 case SRPT_STATE_CMD_RSP_SENT:
1362 /* 1359 /*
@@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1364 * not been received in time. 1361 * not been received in time.
1365 */ 1362 */
1366 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 1363 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1367 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1368 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1369 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1370 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); 1364 target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1371 break; 1365 break;
1372 case SRPT_STATE_MGMT_RSP_SENT: 1366 case SRPT_STATE_MGMT_RSP_SENT:
@@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1476{ 1470{
1477 struct se_cmd *cmd; 1471 struct se_cmd *cmd;
1478 enum srpt_command_state state; 1472 enum srpt_command_state state;
1479 unsigned long flags;
1480 1473
1481 cmd = &ioctx->cmd; 1474 cmd = &ioctx->cmd;
1482 state = srpt_get_cmd_state(ioctx); 1475 state = srpt_get_cmd_state(ioctx);
@@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1496 __func__, __LINE__, state); 1489 __func__, __LINE__, state);
1497 break; 1490 break;
1498 case SRPT_RDMA_WRITE_LAST: 1491 case SRPT_RDMA_WRITE_LAST:
1499 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1500 ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
1501 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1502 break; 1492 break;
1503 default: 1493 default:
1504 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, 1494 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index f85b9e5c1f05..7eb19be35d46 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
330 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 330 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
331 struct tcm_qla2xxx_tpg, se_tpg); 331 struct tcm_qla2xxx_tpg, se_tpg);
332 332
333 return QLA_TPG_ATTRIB(tpg)->generate_node_acls; 333 return tpg->tpg_attrib.generate_node_acls;
334} 334}
335 335
336static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 336static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
@@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
338 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 338 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
339 struct tcm_qla2xxx_tpg, se_tpg); 339 struct tcm_qla2xxx_tpg, se_tpg);
340 340
341 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; 341 return tpg->tpg_attrib.cache_dynamic_acls;
342} 342}
343 343
344static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 344static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
@@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
346 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 346 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
347 struct tcm_qla2xxx_tpg, se_tpg); 347 struct tcm_qla2xxx_tpg, se_tpg);
348 348
349 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; 349 return tpg->tpg_attrib.demo_mode_write_protect;
350} 350}
351 351
352static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 352static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
@@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
354 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 354 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
355 struct tcm_qla2xxx_tpg, se_tpg); 355 struct tcm_qla2xxx_tpg, se_tpg);
356 356
357 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; 357 return tpg->tpg_attrib.prod_mode_write_protect;
358} 358}
359 359
360static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 360static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
@@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
363 struct tcm_qla2xxx_tpg, se_tpg); 363 struct tcm_qla2xxx_tpg, se_tpg);
364 364
365 return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; 365 return tpg->tpg_attrib.demo_mode_login_only;
366} 366}
367 367
368static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 368static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
@@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
847 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 847 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
848 struct tcm_qla2xxx_tpg, se_tpg); \ 848 struct tcm_qla2xxx_tpg, se_tpg); \
849 \ 849 \
850 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ 850 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
851} \ 851} \
852 \ 852 \
853static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ 853static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
@@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1027 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1027 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1028 * NodeACLs 1028 * NodeACLs
1029 */ 1029 */
1030 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; 1030 tpg->tpg_attrib.generate_node_acls = 1;
1031 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; 1031 tpg->tpg_attrib.demo_mode_write_protect = 1;
1032 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; 1032 tpg->tpg_attrib.cache_dynamic_acls = 1;
1033 QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; 1033 tpg->tpg_attrib.demo_mode_login_only = 1;
1034 1034
1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void)
1830 /* 1830 /*
1831 * Setup default attribute lists for various fabric->tf_cit_tmpl 1831 * Setup default attribute lists for various fabric->tf_cit_tmpl
1832 */ 1832 */
1833 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1833 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1834 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 1834 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1835 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = 1835 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
1836 tcm_qla2xxx_tpg_attrib_attrs; 1836 tcm_qla2xxx_tpg_attrib_attrs;
1837 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1837 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1838 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1838 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1839 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1839 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1840 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1840 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1841 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1841 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1842 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1842 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1843 /* 1843 /*
1844 * Register the fabric for use within TCM 1844 * Register the fabric for use within TCM
1845 */ 1845 */
@@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void)
1870 /* 1870 /*
1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1872 */ 1872 */
1873 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1873 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1874 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 1874 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
1875 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1875 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1876 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1876 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1877 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1877 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1878 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1878 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1879 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1879 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1880 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1880 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1881 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1881 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1882 /* 1882 /*
1883 * Register the npiv_fabric for use within TCM 1883 * Register the npiv_fabric for use within TCM
1884 */ 1884 */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 329327528a55..771f7b816443 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg {
45 struct se_portal_group se_tpg; 45 struct se_portal_group se_tpg;
46}; 46};
47 47
48#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
49
50struct tcm_qla2xxx_fc_loopid { 48struct tcm_qla2xxx_fc_loopid {
51 struct se_node_acl *se_nacl; 49 struct se_node_acl *se_nacl;
52}; 50};
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 38e44b9abf0f..d70e9119e906 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
805 int iscsi_task_attr; 805 int iscsi_task_attr;
806 int sam_task_attr; 806 int sam_task_attr;
807 807
808 spin_lock_bh(&conn->sess->session_stats_lock); 808 atomic_long_inc(&conn->sess->cmd_pdus);
809 conn->sess->cmd_pdus++;
810 if (conn->sess->se_sess->se_node_acl) {
811 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
812 conn->sess->se_sess->se_node_acl->num_cmds++;
813 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
814 }
815 spin_unlock_bh(&conn->sess->session_stats_lock);
816 809
817 hdr = (struct iscsi_scsi_req *) buf; 810 hdr = (struct iscsi_scsi_req *) buf;
818 payload_length = ntoh24(hdr->dlength); 811 payload_length = ntoh24(hdr->dlength);
@@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1254 int rc; 1247 int rc;
1255 1248
1256 if (!payload_length) { 1249 if (!payload_length) {
1257 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1250 pr_warn("DataOUT payload is ZERO, ignoring.\n");
1258 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1251 return 0;
1259 buf);
1260 } 1252 }
1261 1253
1262 /* iSCSI write */ 1254 /* iSCSI write */
1263 spin_lock_bh(&conn->sess->session_stats_lock); 1255 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1264 conn->sess->rx_data_octets += payload_length;
1265 if (conn->sess->se_sess->se_node_acl) {
1266 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1267 conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1268 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1269 }
1270 spin_unlock_bh(&conn->sess->session_stats_lock);
1271 1256
1272 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1257 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1273 pr_err("DataSegmentLength: %u is greater than" 1258 pr_err("DataSegmentLength: %u is greater than"
@@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);
1486 1471
1487static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1472static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1488{ 1473{
1489 struct iscsi_cmd *cmd; 1474 struct iscsi_cmd *cmd = NULL;
1490 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1475 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1491 int rc; 1476 int rc;
1492 bool data_crc_failed = false; 1477 bool data_crc_failed = false;
@@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1954 (unsigned char *)hdr); 1939 (unsigned char *)hdr);
1955 } 1940 }
1956 1941
1942 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
1943 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
1944 pr_err("Multi sequence text commands currently not supported\n");
1945 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
1946 (unsigned char *)hdr);
1947 }
1948
1957 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1949 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1958 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1950 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1959 hdr->exp_statsn, payload_length); 1951 hdr->exp_statsn, payload_length);
@@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2630 return -1; 2622 return -1;
2631 } 2623 }
2632 2624
2633 spin_lock_bh(&conn->sess->session_stats_lock); 2625 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2634 conn->sess->tx_data_octets += datain.length;
2635 if (conn->sess->se_sess->se_node_acl) {
2636 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2637 conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2638 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2639 }
2640 spin_unlock_bh(&conn->sess->session_stats_lock);
2641 /* 2626 /*
2642 * Special case for successfully execution w/ both DATAIN 2627 * Special case for successfully execution w/ both DATAIN
2643 * and Sense Data. 2628 * and Sense Data.
@@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3162 if (inc_stat_sn) 3147 if (inc_stat_sn)
3163 cmd->stat_sn = conn->stat_sn++; 3148 cmd->stat_sn = conn->stat_sn++;
3164 3149
3165 spin_lock_bh(&conn->sess->session_stats_lock); 3150 atomic_long_inc(&conn->sess->rsp_pdus);
3166 conn->sess->rsp_pdus++;
3167 spin_unlock_bh(&conn->sess->session_stats_lock);
3168 3151
3169 memset(hdr, 0, ISCSI_HDR_LEN); 3152 memset(hdr, 0, ISCSI_HDR_LEN);
3170 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3153 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
@@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3374 struct iscsi_tiqn *tiqn; 3357 struct iscsi_tiqn *tiqn;
3375 struct iscsi_tpg_np *tpg_np; 3358 struct iscsi_tpg_np *tpg_np;
3376 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3359 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3360 int target_name_printed;
3377 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3361 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3378 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3362 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3379 3363
@@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3411 continue; 3395 continue;
3412 } 3396 }
3413 3397
3414 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3398 target_name_printed = 0;
3415 len += 1;
3416
3417 if ((len + payload_len) > buffer_len) {
3418 end_of_buf = 1;
3419 goto eob;
3420 }
3421 memcpy(payload + payload_len, buf, len);
3422 payload_len += len;
3423 3399
3424 spin_lock(&tiqn->tiqn_tpg_lock); 3400 spin_lock(&tiqn->tiqn_tpg_lock);
3425 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3401 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3426 3402
3403 /* If demo_mode_discovery=0 and generate_node_acls=0
3404 * (demo mode dislabed) do not return
3405 * TargetName+TargetAddress unless a NodeACL exists.
3406 */
3407
3408 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3409 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3410 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
3411 cmd->conn->sess->sess_ops->InitiatorName))) {
3412 continue;
3413 }
3414
3427 spin_lock(&tpg->tpg_state_lock); 3415 spin_lock(&tpg->tpg_state_lock);
3428 if ((tpg->tpg_state == TPG_STATE_FREE) || 3416 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3429 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3417 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
@@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3438 struct iscsi_np *np = tpg_np->tpg_np; 3426 struct iscsi_np *np = tpg_np->tpg_np;
3439 bool inaddr_any = iscsit_check_inaddr_any(np); 3427 bool inaddr_any = iscsit_check_inaddr_any(np);
3440 3428
3429 if (!target_name_printed) {
3430 len = sprintf(buf, "TargetName=%s",
3431 tiqn->tiqn);
3432 len += 1;
3433
3434 if ((len + payload_len) > buffer_len) {
3435 spin_unlock(&tpg->tpg_np_lock);
3436 spin_unlock(&tiqn->tiqn_tpg_lock);
3437 end_of_buf = 1;
3438 goto eob;
3439 }
3440 memcpy(payload + payload_len, buf, len);
3441 payload_len += len;
3442 target_name_printed = 1;
3443 }
3444
3441 len = sprintf(buf, "TargetAddress=" 3445 len = sprintf(buf, "TargetAddress="
3442 "%s:%hu,%hu", 3446 "%s:%hu,%hu",
3443 (inaddr_any == false) ? 3447 (inaddr_any == false) ?
@@ -4092,9 +4096,7 @@ restart:
4092 * hit default in the switch below. 4096 * hit default in the switch below.
4093 */ 4097 */
4094 memset(buffer, 0xff, ISCSI_HDR_LEN); 4098 memset(buffer, 0xff, ISCSI_HDR_LEN);
4095 spin_lock_bh(&conn->sess->session_stats_lock); 4099 atomic_long_inc(&conn->sess->conn_digest_errors);
4096 conn->sess->conn_digest_errors++;
4097 spin_unlock_bh(&conn->sess->session_stats_lock);
4098 } else { 4100 } else {
4099 pr_debug("Got HeaderDigest CRC32C" 4101 pr_debug("Got HeaderDigest CRC32C"
4100 " 0x%08x\n", checksum); 4102 " 0x%08x\n", checksum);
@@ -4381,7 +4383,7 @@ int iscsit_close_connection(
4381 4383
4382int iscsit_close_session(struct iscsi_session *sess) 4384int iscsit_close_session(struct iscsi_session *sess)
4383{ 4385{
4384 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4386 struct iscsi_portal_group *tpg = sess->tpg;
4385 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4387 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4386 4388
4387 if (atomic_read(&sess->nconn)) { 4389 if (atomic_read(&sess->nconn)) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 7505fddca15f..de77d9aa22c6 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open(
111 /* 111 /*
112 * Set Identifier. 112 * Set Identifier.
113 */ 113 */
114 chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; 114 chap->id = conn->tpg->tpg_chap_id++;
115 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); 115 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
116 *aic_len += 1; 116 *aic_len += 1;
117 pr_debug("[server] Sending CHAP_I=%d\n", chap->id); 117 pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
@@ -146,6 +146,7 @@ static int chap_server_compute_md5(
146 unsigned char client_digest[MD5_SIGNATURE_SIZE]; 146 unsigned char client_digest[MD5_SIGNATURE_SIZE];
147 unsigned char server_digest[MD5_SIGNATURE_SIZE]; 147 unsigned char server_digest[MD5_SIGNATURE_SIZE];
148 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; 148 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
149 size_t compare_len;
149 struct iscsi_chap *chap = conn->auth_protocol; 150 struct iscsi_chap *chap = conn->auth_protocol;
150 struct crypto_hash *tfm; 151 struct crypto_hash *tfm;
151 struct hash_desc desc; 152 struct hash_desc desc;
@@ -184,7 +185,9 @@ static int chap_server_compute_md5(
184 goto out; 185 goto out;
185 } 186 }
186 187
187 if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { 188 /* Include the terminating NULL in the compare */
189 compare_len = strlen(auth->userid) + 1;
190 if (strncmp(chap_n, auth->userid, compare_len) != 0) {
188 pr_err("CHAP_N values do not match!\n"); 191 pr_err("CHAP_N values do not match!\n");
189 goto out; 192 goto out;
190 } 193 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index fd145259361d..e3318edb233d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
372 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ 372 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
373 se_node_acl); \ 373 se_node_acl); \
374 \ 374 \
375 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ 375 return sprintf(page, "%u\n", nacl->node_attrib.name); \
376} \ 376} \
377 \ 377 \
378static ssize_t iscsi_nacl_attrib_store_##name( \ 378static ssize_t iscsi_nacl_attrib_store_##name( \
@@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
897 if (!se_nacl_new) 897 if (!se_nacl_new)
898 return ERR_PTR(-ENOMEM); 898 return ERR_PTR(-ENOMEM);
899 899
900 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 900 cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
901 /* 901 /*
902 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 902 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
903 * when converting a NdoeACL from demo mode -> explict 903 * when converting a NdoeACL from demo mode -> explict
@@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
920 return ERR_PTR(-ENOMEM); 920 return ERR_PTR(-ENOMEM);
921 } 921 }
922 922
923 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; 923 stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
924 stats_cg->default_groups[1] = NULL; 924 stats_cg->default_groups[1] = NULL;
925 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, 925 config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
926 "iscsi_sess_stats", &iscsi_stat_sess_cit); 926 "iscsi_sess_stats", &iscsi_stat_sess_cit);
927 927
928 return se_nacl; 928 return se_nacl;
@@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
967 if (iscsit_get_tpg(tpg) < 0) \ 967 if (iscsit_get_tpg(tpg) < 0) \
968 return -EINVAL; \ 968 return -EINVAL; \
969 \ 969 \
970 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ 970 rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
971 iscsit_put_tpg(tpg); \ 971 iscsit_put_tpg(tpg); \
972 return rb; \ 972 return rb; \
973} \ 973} \
@@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
1041 */ 1041 */
1042DEF_TPG_ATTRIB(prod_mode_write_protect); 1042DEF_TPG_ATTRIB(prod_mode_write_protect);
1043TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 1043TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
1044/*
1045 * Define iscsi_tpg_attrib_s_demo_mode_discovery,
1046 */
1047DEF_TPG_ATTRIB(demo_mode_discovery);
1048TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
1049/*
1050 * Define iscsi_tpg_attrib_s_default_erl
1051 */
1052DEF_TPG_ATTRIB(default_erl);
1053TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
1044 1054
1045static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1055static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1046 &iscsi_tpg_attrib_authentication.attr, 1056 &iscsi_tpg_attrib_authentication.attr,
@@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1051 &iscsi_tpg_attrib_cache_dynamic_acls.attr, 1061 &iscsi_tpg_attrib_cache_dynamic_acls.attr,
1052 &iscsi_tpg_attrib_demo_mode_write_protect.attr, 1062 &iscsi_tpg_attrib_demo_mode_write_protect.attr,
1053 &iscsi_tpg_attrib_prod_mode_write_protect.attr, 1063 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
1064 &iscsi_tpg_attrib_demo_mode_discovery.attr,
1065 &iscsi_tpg_attrib_default_erl.attr,
1054 NULL, 1066 NULL,
1055}; 1067};
1056 1068
@@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1514 return ERR_PTR(-ENOMEM); 1526 return ERR_PTR(-ENOMEM);
1515 } 1527 }
1516 1528
1517 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; 1529 stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
1518 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; 1530 stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
1519 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; 1531 stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
1520 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; 1532 stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
1521 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; 1533 stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
1522 stats_cg->default_groups[5] = NULL; 1534 stats_cg->default_groups[5] = NULL;
1523 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, 1535 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
1524 "iscsi_instance", &iscsi_stat_instance_cit); 1536 "iscsi_instance", &iscsi_stat_instance_cit);
1525 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, 1537 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
1526 "iscsi_sess_err", &iscsi_stat_sess_err_cit); 1538 "iscsi_sess_err", &iscsi_stat_sess_err_cit);
1527 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, 1539 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
1528 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); 1540 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
1529 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, 1541 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
1530 "iscsi_login_stats", &iscsi_stat_login_cit); 1542 "iscsi_login_stats", &iscsi_stat_login_cit);
1531 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, 1543 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
1532 "iscsi_logout_stats", &iscsi_stat_logout_cit); 1544 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1533 1545
1534 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); 1546 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
@@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
1784 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1796 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1785 1797
1786 cmd->i_state = ISTATE_SEND_STATUS; 1798 cmd->i_state = ISTATE_SEND_STATUS;
1799
1800 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1801 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1802 return 0;
1803 }
1787 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1804 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
1788 1805
1789 return 0; 1806 return 0;
@@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
1815{ 1832{
1816 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1833 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1817 1834
1818 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 1835 return tpg->tpg_attrib.default_cmdsn_depth;
1819} 1836}
1820 1837
1821static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) 1838static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
1822{ 1839{
1823 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1840 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1824 1841
1825 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; 1842 return tpg->tpg_attrib.generate_node_acls;
1826} 1843}
1827 1844
1828static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) 1845static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
1829{ 1846{
1830 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1847 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1831 1848
1832 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; 1849 return tpg->tpg_attrib.cache_dynamic_acls;
1833} 1850}
1834 1851
1835static int lio_tpg_check_demo_mode_write_protect( 1852static int lio_tpg_check_demo_mode_write_protect(
@@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect(
1837{ 1854{
1838 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1855 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1839 1856
1840 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; 1857 return tpg->tpg_attrib.demo_mode_write_protect;
1841} 1858}
1842 1859
1843static int lio_tpg_check_prod_mode_write_protect( 1860static int lio_tpg_check_prod_mode_write_protect(
@@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect(
1845{ 1862{
1846 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1863 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1847 1864
1848 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; 1865 return tpg->tpg_attrib.prod_mode_write_protect;
1849} 1866}
1850 1867
1851static void lio_tpg_release_fabric_acl( 1868static void lio_tpg_release_fabric_acl(
@@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1908{ 1925{
1909 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, 1926 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
1910 se_node_acl); 1927 se_node_acl);
1928 struct se_portal_group *se_tpg = se_acl->se_tpg;
1929 struct iscsi_portal_group *tpg = container_of(se_tpg,
1930 struct iscsi_portal_group, tpg_se_tpg);
1911 1931
1912 ISCSI_NODE_ATTRIB(acl)->nacl = acl; 1932 acl->node_attrib.nacl = acl;
1913 iscsit_set_default_node_attribues(acl); 1933 iscsit_set_default_node_attribues(acl, tpg);
1914} 1934}
1915 1935
1916static int lio_check_stop_free(struct se_cmd *se_cmd) 1936static int lio_check_stop_free(struct se_cmd *se_cmd)
@@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void)
1995 * Setup default attribute lists for various fabric->tf_cit_tmpl 2015 * Setup default attribute lists for various fabric->tf_cit_tmpl
1996 * sturct config_item_type's 2016 * sturct config_item_type's
1997 */ 2017 */
1998 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; 2018 fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
1999 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; 2019 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
2000 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; 2020 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
2001 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; 2021 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
2002 TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; 2022 fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
2003 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; 2023 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
2004 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; 2024 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
2005 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; 2025 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
2006 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; 2026 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
2007 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; 2027 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
2008 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; 2028 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
2009 2029
2010 ret = target_fabric_configfs_register(fabric); 2030 ret = target_fabric_configfs_register(fabric);
2011 if (ret < 0) { 2031 if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 9a5721b8ff96..48f7b3bf4e8c 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -37,9 +37,6 @@
37#define NA_RANDOM_DATAIN_PDU_OFFSETS 0 37#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
38#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 38#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
39#define NA_RANDOM_R2T_OFFSETS 0 39#define NA_RANDOM_R2T_OFFSETS 0
40#define NA_DEFAULT_ERL 0
41#define NA_DEFAULT_ERL_MAX 2
42#define NA_DEFAULT_ERL_MIN 0
43 40
44/* struct iscsi_tpg_attrib sanity values */ 41/* struct iscsi_tpg_attrib sanity values */
45#define TA_AUTHENTICATION 1 42#define TA_AUTHENTICATION 1
@@ -58,6 +55,8 @@
58#define TA_DEMO_MODE_WRITE_PROTECT 1 55#define TA_DEMO_MODE_WRITE_PROTECT 1
59/* Disabled by default in production mode w/ explict ACLs */ 56/* Disabled by default in production mode w/ explict ACLs */
60#define TA_PROD_MODE_WRITE_PROTECT 0 57#define TA_PROD_MODE_WRITE_PROTECT 0
58#define TA_DEMO_MODE_DISCOVERY 1
59#define TA_DEFAULT_ERL 0
61#define TA_CACHE_CORE_NPS 0 60#define TA_CACHE_CORE_NPS 0
62 61
63 62
@@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table {
192 CMDSN_NORMAL_OPERATION = 0, 191 CMDSN_NORMAL_OPERATION = 0,
193 CMDSN_LOWER_THAN_EXP = 1, 192 CMDSN_LOWER_THAN_EXP = 1,
194 CMDSN_HIGHER_THAN_EXP = 2, 193 CMDSN_HIGHER_THAN_EXP = 2,
194 CMDSN_MAXCMDSN_OVERRUN = 3,
195}; 195};
196 196
197/* Used for iscsi_handle_immediate_data() return values */ 197/* Used for iscsi_handle_immediate_data() return values */
@@ -650,14 +650,13 @@ struct iscsi_session {
650 /* Used for session reference counting */ 650 /* Used for session reference counting */
651 int session_usage_count; 651 int session_usage_count;
652 int session_waiting_on_uc; 652 int session_waiting_on_uc;
653 u32 cmd_pdus; 653 atomic_long_t cmd_pdus;
654 u32 rsp_pdus; 654 atomic_long_t rsp_pdus;
655 u64 tx_data_octets; 655 atomic_long_t tx_data_octets;
656 u64 rx_data_octets; 656 atomic_long_t rx_data_octets;
657 u32 conn_digest_errors; 657 atomic_long_t conn_digest_errors;
658 u32 conn_timeout_errors; 658 atomic_long_t conn_timeout_errors;
659 u64 creation_time; 659 u64 creation_time;
660 spinlock_t session_stats_lock;
661 /* Number of active connections */ 660 /* Number of active connections */
662 atomic_t nconn; 661 atomic_t nconn;
663 atomic_t session_continuation; 662 atomic_t session_continuation;
@@ -755,11 +754,6 @@ struct iscsi_node_acl {
755 struct se_node_acl se_node_acl; 754 struct se_node_acl se_node_acl;
756}; 755};
757 756
758#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
759
760#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
761#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
762
763struct iscsi_tpg_attrib { 757struct iscsi_tpg_attrib {
764 u32 authentication; 758 u32 authentication;
765 u32 login_timeout; 759 u32 login_timeout;
@@ -769,6 +763,8 @@ struct iscsi_tpg_attrib {
769 u32 default_cmdsn_depth; 763 u32 default_cmdsn_depth;
770 u32 demo_mode_write_protect; 764 u32 demo_mode_write_protect;
771 u32 prod_mode_write_protect; 765 u32 prod_mode_write_protect;
766 u32 demo_mode_discovery;
767 u32 default_erl;
772 struct iscsi_portal_group *tpg; 768 struct iscsi_portal_group *tpg;
773}; 769};
774 770
@@ -835,12 +831,6 @@ struct iscsi_portal_group {
835 struct list_head tpg_list; 831 struct list_head tpg_list;
836} ____cacheline_aligned; 832} ____cacheline_aligned;
837 833
838#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
839#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
840#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
841#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
842#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
843
844struct iscsi_wwn_stat_grps { 834struct iscsi_wwn_stat_grps {
845 struct config_group iscsi_stat_group; 835 struct config_group iscsi_stat_group;
846 struct config_group iscsi_instance_group; 836 struct config_group iscsi_instance_group;
@@ -871,8 +861,6 @@ struct iscsi_tiqn {
871 struct iscsi_logout_stats logout_stats; 861 struct iscsi_logout_stats logout_stats;
872} ____cacheline_aligned; 862} ____cacheline_aligned;
873 863
874#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
875
876struct iscsit_global { 864struct iscsit_global {
877 /* In core shutdown */ 865 /* In core shutdown */
878 u32 in_shutdown; 866 u32 in_shutdown;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 6c7a5104a4cd..7087c736daa5 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
58 58
59 cmd->maxcmdsn_inc = 1; 59 cmd->maxcmdsn_inc = 1;
60 60
61 if (!mutex_trylock(&sess->cmdsn_mutex)) { 61 mutex_lock(&sess->cmdsn_mutex);
62 sess->max_cmd_sn += 1;
63 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
64 return;
65 }
66 sess->max_cmd_sn += 1; 62 sess->max_cmd_sn += 1;
67 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 63 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
68 mutex_unlock(&sess->cmdsn_mutex); 64 mutex_unlock(&sess->cmdsn_mutex);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 41052e512d92..0d1e6ee3e992 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -757,7 +757,7 @@ int iscsit_check_post_dataout(
757static void iscsit_handle_time2retain_timeout(unsigned long data) 757static void iscsit_handle_time2retain_timeout(unsigned long data)
758{ 758{
759 struct iscsi_session *sess = (struct iscsi_session *) data; 759 struct iscsi_session *sess = (struct iscsi_session *) data;
760 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 760 struct iscsi_portal_group *tpg = sess->tpg;
761 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 761 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
762 762
763 spin_lock_bh(&se_tpg->session_lock); 763 spin_lock_bh(&se_tpg->session_lock);
@@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
785 tiqn->sess_err_stats.last_sess_failure_type = 785 tiqn->sess_err_stats.last_sess_failure_type =
786 ISCSI_SESS_ERR_CXN_TIMEOUT; 786 ISCSI_SESS_ERR_CXN_TIMEOUT;
787 tiqn->sess_err_stats.cxn_timeout_errors++; 787 tiqn->sess_err_stats.cxn_timeout_errors++;
788 sess->conn_timeout_errors++; 788 atomic_long_inc(&sess->conn_timeout_errors);
789 spin_unlock(&tiqn->sess_err_stats.lock); 789 spin_unlock(&tiqn->sess_err_stats.lock);
790 } 790 }
791 } 791 }
@@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
801 * Only start Time2Retain timer when the associated TPG is still in 801 * Only start Time2Retain timer when the associated TPG is still in
802 * an ACTIVE (eg: not disabled or shutdown) state. 802 * an ACTIVE (eg: not disabled or shutdown) state.
803 */ 803 */
804 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); 804 spin_lock(&sess->tpg->tpg_state_lock);
805 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); 805 tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
806 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); 806 spin_unlock(&sess->tpg->tpg_state_lock);
807 807
808 if (!tpg_active) 808 if (!tpg_active)
809 return; 809 return;
@@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
829 */ 829 */
830int iscsit_stop_time2retain_timer(struct iscsi_session *sess) 830int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
831{ 831{
832 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 832 struct iscsi_portal_group *tpg = sess->tpg;
833 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 833 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
834 834
835 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) 835 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 1794c753954a..4eb93b2b6473 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1(
305 } 305 }
306 306
307 sess->creation_time = get_jiffies_64(); 307 sess->creation_time = get_jiffies_64();
308 spin_lock_init(&sess->session_stats_lock);
309 /* 308 /*
310 * The FFP CmdSN window values will be allocated from the TPG's 309 * The FFP CmdSN window values will be allocated from the TPG's
311 * Initiator Node's ACL once the login has been successfully completed. 310 * Initiator Node's ACL once the login has been successfully completed.
@@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2(
347 * Assign a new TPG Session Handle. Note this is protected with 346 * Assign a new TPG Session Handle. Note this is protected with
348 * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). 347 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
349 */ 348 */
350 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 349 sess->tsih = ++sess->tpg->ntsih;
351 if (!sess->tsih) 350 if (!sess->tsih)
352 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 351 sess->tsih = ++sess->tpg->ntsih;
353 352
354 /* 353 /*
355 * Create the default params from user defined values.. 354 * Create the default params from user defined values..
356 */ 355 */
357 if (iscsi_copy_param_list(&conn->param_list, 356 if (iscsi_copy_param_list(&conn->param_list,
358 ISCSI_TPG_C(conn)->param_list, 1) < 0) { 357 conn->tpg->param_list, 1) < 0) {
359 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 358 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
360 ISCSI_LOGIN_STATUS_NO_RESOURCES); 359 ISCSI_LOGIN_STATUS_NO_RESOURCES);
361 return -1; 360 return -1;
@@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2(
380 * In our case, we have already located the struct iscsi_tiqn at this point. 379 * In our case, we have already located the struct iscsi_tiqn at this point.
381 */ 380 */
382 memset(buf, 0, 32); 381 memset(buf, 0, 32);
383 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 382 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
384 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 383 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
385 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 384 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
386 ISCSI_LOGIN_STATUS_NO_RESOURCES); 385 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2(
575 iscsi_login_set_conn_values(sess, conn, pdu->cid); 574 iscsi_login_set_conn_values(sess, conn, pdu->cid);
576 575
577 if (iscsi_copy_param_list(&conn->param_list, 576 if (iscsi_copy_param_list(&conn->param_list,
578 ISCSI_TPG_C(conn)->param_list, 0) < 0) { 577 conn->tpg->param_list, 0) < 0) {
579 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 578 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
580 ISCSI_LOGIN_STATUS_NO_RESOURCES); 579 ISCSI_LOGIN_STATUS_NO_RESOURCES);
581 return -1; 580 return -1;
@@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2(
593 * In our case, we have already located the struct iscsi_tiqn at this point. 592 * In our case, we have already located the struct iscsi_tiqn at this point.
594 */ 593 */
595 memset(buf, 0, 32); 594 memset(buf, 0, 32);
596 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 595 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
597 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 596 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
598 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 597 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
599 ISCSI_LOGIN_STATUS_NO_RESOURCES); 598 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -691,7 +690,7 @@ int iscsi_post_login_handler(
691 int stop_timer = 0; 690 int stop_timer = 0;
692 struct iscsi_session *sess = conn->sess; 691 struct iscsi_session *sess = conn->sess;
693 struct se_session *se_sess = sess->se_sess; 692 struct se_session *se_sess = sess->se_sess;
694 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 693 struct iscsi_portal_group *tpg = sess->tpg;
695 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 694 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
696 struct iscsi_thread_set *ts; 695 struct iscsi_thread_set *ts;
697 696
@@ -1154,7 +1153,7 @@ old_sess_out:
1154 spin_lock_bh(&conn->sess->conn_lock); 1153 spin_lock_bh(&conn->sess->conn_lock);
1155 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { 1154 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1156 struct se_portal_group *se_tpg = 1155 struct se_portal_group *se_tpg =
1157 &ISCSI_TPG_C(conn)->tpg_se_tpg; 1156 &conn->tpg->tpg_se_tpg;
1158 1157
1159 atomic_set(&conn->sess->session_continuation, 0); 1158 atomic_set(&conn->sess->session_continuation, 0);
1160 spin_unlock_bh(&conn->sess->conn_lock); 1159 spin_unlock_bh(&conn->sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ef6d836a4d09..83c965c65386 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -88,7 +88,7 @@ int extract_param(
88 if (len < 0) 88 if (len < 0)
89 return -1; 89 return -1;
90 90
91 if (len > max_length) { 91 if (len >= max_length) {
92 pr_err("Length of input: %d exceeds max_length:" 92 pr_err("Length of input: %d exceeds max_length:"
93 " %d\n", len, max_length); 93 " %d\n", len, max_length);
94 return -1; 94 return -1;
@@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(
140 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, 140 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
141 se_node_acl); 141 se_node_acl);
142 142
143 auth = ISCSI_NODE_AUTH(iscsi_nacl); 143 auth = &iscsi_nacl->node_auth;
144 } 144 }
145 } else { 145 } else {
146 /* 146 /*
@@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero(
789 return -1; 789 return -1;
790 790
791 if (!iscsi_check_negotiated_keys(conn->param_list)) { 791 if (!iscsi_check_negotiated_keys(conn->param_list)) {
792 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 792 if (conn->tpg->tpg_attrib.authentication &&
793 !strncmp(param->value, NONE, 4)) { 793 !strncmp(param->value, NONE, 4)) {
794 pr_err("Initiator sent AuthMethod=None but" 794 pr_err("Initiator sent AuthMethod=None but"
795 " Target is enforcing iSCSI Authentication," 795 " Target is enforcing iSCSI Authentication,"
@@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero(
799 return -1; 799 return -1;
800 } 800 }
801 801
802 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 802 if (conn->tpg->tpg_attrib.authentication &&
803 !login->auth_complete) 803 !login->auth_complete)
804 return 0; 804 return 0;
805 805
@@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
862 } 862 }
863 863
864 if (!login->auth_complete && 864 if (!login->auth_complete &&
865 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { 865 conn->tpg->tpg_attrib.authentication) {
866 pr_err("Initiator is requesting CSG: 1, has not been" 866 pr_err("Initiator is requesting CSG: 1, has not been"
867 " successfully authenticated, and the Target is" 867 " successfully authenticated, and the Target is"
868 " enforcing iSCSI Authentication, login failed.\n"); 868 " enforcing iSCSI Authentication, login failed.\n");
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 93bdc475eb00..16454a922e2b 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
33} 33}
34 34
35void iscsit_set_default_node_attribues( 35void iscsit_set_default_node_attribues(
36 struct iscsi_node_acl *acl) 36 struct iscsi_node_acl *acl,
37 struct iscsi_portal_group *tpg)
37{ 38{
38 struct iscsi_node_attrib *a = &acl->node_attrib; 39 struct iscsi_node_attrib *a = &acl->node_attrib;
39 40
@@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(
44 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; 45 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
45 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; 46 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
46 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; 47 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
47 a->default_erl = NA_DEFAULT_ERL; 48 a->default_erl = tpg->tpg_attrib.default_erl;
48} 49}
49 50
50int iscsit_na_dataout_timeout( 51int iscsit_na_dataout_timeout(
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef23..0c69a46a62ec 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,8 @@
1#ifndef ISCSI_TARGET_NODEATTRIB_H 1#ifndef ISCSI_TARGET_NODEATTRIB_H
2#define ISCSI_TARGET_NODEATTRIB_H 2#define ISCSI_TARGET_NODEATTRIB_H
3 3
4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); 4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
5 struct iscsi_portal_group *);
5extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); 6extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
6extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); 7extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
7extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); 8extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f788e8b5e855..103395510307 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
792 if (se_sess) { 792 if (se_sess) {
793 sess = se_sess->fabric_sess_ptr; 793 sess = se_sess->fabric_sess_ptr;
794 if (sess) 794 if (sess)
795 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); 795 ret = snprintf(page, PAGE_SIZE, "%lu\n",
796 atomic_long_read(&sess->cmd_pdus));
796 } 797 }
797 spin_unlock_bh(&se_nacl->nacl_sess_lock); 798 spin_unlock_bh(&se_nacl->nacl_sess_lock);
798 799
@@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
815 if (se_sess) { 816 if (se_sess) {
816 sess = se_sess->fabric_sess_ptr; 817 sess = se_sess->fabric_sess_ptr;
817 if (sess) 818 if (sess)
818 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); 819 ret = snprintf(page, PAGE_SIZE, "%lu\n",
820 atomic_long_read(&sess->rsp_pdus));
819 } 821 }
820 spin_unlock_bh(&se_nacl->nacl_sess_lock); 822 spin_unlock_bh(&se_nacl->nacl_sess_lock);
821 823
@@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
838 if (se_sess) { 840 if (se_sess) {
839 sess = se_sess->fabric_sess_ptr; 841 sess = se_sess->fabric_sess_ptr;
840 if (sess) 842 if (sess)
841 ret = snprintf(page, PAGE_SIZE, "%llu\n", 843 ret = snprintf(page, PAGE_SIZE, "%lu\n",
842 (unsigned long long)sess->tx_data_octets); 844 atomic_long_read(&sess->tx_data_octets));
843 } 845 }
844 spin_unlock_bh(&se_nacl->nacl_sess_lock); 846 spin_unlock_bh(&se_nacl->nacl_sess_lock);
845 847
@@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
862 if (se_sess) { 864 if (se_sess) {
863 sess = se_sess->fabric_sess_ptr; 865 sess = se_sess->fabric_sess_ptr;
864 if (sess) 866 if (sess)
865 ret = snprintf(page, PAGE_SIZE, "%llu\n", 867 ret = snprintf(page, PAGE_SIZE, "%lu\n",
866 (unsigned long long)sess->rx_data_octets); 868 atomic_long_read(&sess->rx_data_octets));
867 } 869 }
868 spin_unlock_bh(&se_nacl->nacl_sess_lock); 870 spin_unlock_bh(&se_nacl->nacl_sess_lock);
869 871
@@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
886 if (se_sess) { 888 if (se_sess) {
887 sess = se_sess->fabric_sess_ptr; 889 sess = se_sess->fabric_sess_ptr;
888 if (sess) 890 if (sess)
889 ret = snprintf(page, PAGE_SIZE, "%u\n", 891 ret = snprintf(page, PAGE_SIZE, "%lu\n",
890 sess->conn_digest_errors); 892 atomic_long_read(&sess->conn_digest_errors));
891 } 893 }
892 spin_unlock_bh(&se_nacl->nacl_sess_lock); 894 spin_unlock_bh(&se_nacl->nacl_sess_lock);
893 895
@@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
910 if (se_sess) { 912 if (se_sess) {
911 sess = se_sess->fabric_sess_ptr; 913 sess = se_sess->fabric_sess_ptr;
912 if (sess) 914 if (sess)
913 ret = snprintf(page, PAGE_SIZE, "%u\n", 915 ret = snprintf(page, PAGE_SIZE, "%lu\n",
914 sess->conn_timeout_errors); 916 atomic_long_read(&sess->conn_timeout_errors));
915 } 917 }
916 spin_unlock_bh(&se_nacl->nacl_sess_lock); 918 spin_unlock_bh(&se_nacl->nacl_sess_lock);
917 919
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4faeb47fa5e1..39761837608d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
223 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; 223 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
224 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; 224 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; 225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
227 a->default_erl = TA_DEFAULT_ERL;
226} 228}
227 229
228int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 230int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
237 if (iscsi_create_default_params(&tpg->param_list) < 0) 239 if (iscsi_create_default_params(&tpg->param_list) < 0)
238 goto err_out; 240 goto err_out;
239 241
240 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; 242 tpg->tpg_attrib.tpg = tpg;
241 243
242 spin_lock(&tpg->tpg_state_lock); 244 spin_lock(&tpg->tpg_state_lock);
243 tpg->tpg_state = TPG_STATE_INACTIVE; 245 tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
330 return -EINVAL; 332 return -EINVAL;
331 } 333 }
332 334
333 if (ISCSI_TPG_ATTRIB(tpg)->authentication) { 335 if (tpg->tpg_attrib.authentication) {
334 if (!strcmp(param->value, NONE)) { 336 if (!strcmp(param->value, NONE)) {
335 ret = iscsi_update_param_value(param, CHAP); 337 ret = iscsi_update_param_value(param, CHAP);
336 if (ret) 338 if (ret)
@@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect(
820 822
821 return 0; 823 return 0;
822} 824}
825
826int iscsit_ta_demo_mode_discovery(
827 struct iscsi_portal_group *tpg,
828 u32 flag)
829{
830 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
831
832 if ((flag != 0) && (flag != 1)) {
833 pr_err("Illegal value %d\n", flag);
834 return -EINVAL;
835 }
836
837 a->demo_mode_discovery = flag;
838 pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
839 " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
840 "ON" : "OFF");
841
842 return 0;
843}
844
845int iscsit_ta_default_erl(
846 struct iscsi_portal_group *tpg,
847 u32 default_erl)
848{
849 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
850
851 if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
852 pr_err("Illegal value for default_erl: %u\n", default_erl);
853 return -EINVAL;
854 }
855
856 a->default_erl = default_erl;
857 pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
858
859 return 0;
860}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index b77693e2c209..213c0fc7fdc9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); 37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); 38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); 39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
40 42
41#endif /* ISCSI_TARGET_TPG_H */ 43#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b0cac0c342e1..0819e688a398 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
242 */ 242 */
243 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 243 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
244 pr_err("Received CmdSN: 0x%08x is greater than" 244 pr_err("Received CmdSN: 0x%08x is greater than"
245 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, 245 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
246 sess->max_cmd_sn); 246 sess->max_cmd_sn);
247 ret = CMDSN_ERROR_CANNOT_RECOVER; 247 ret = CMDSN_MAXCMDSN_OVERRUN;
248 248
249 } else if (cmdsn == sess->exp_cmd_sn) { 249 } else if (cmdsn == sess->exp_cmd_sn) {
250 sess->exp_cmd_sn++; 250 sess->exp_cmd_sn++;
@@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
303 ret = CMDSN_HIGHER_THAN_EXP; 303 ret = CMDSN_HIGHER_THAN_EXP;
304 break; 304 break;
305 case CMDSN_LOWER_THAN_EXP: 305 case CMDSN_LOWER_THAN_EXP:
306 case CMDSN_MAXCMDSN_OVERRUN:
307 default:
306 cmd->i_state = ISTATE_REMOVE; 308 cmd->i_state = ISTATE_REMOVE;
307 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 309 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
308 ret = cmdsn_ret; 310 /*
309 break; 311 * Existing callers for iscsit_sequence_cmd() will silently
310 default: 312 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
311 reason = ISCSI_REASON_PROTOCOL_ERROR; 313 * return for CMDSN_MAXCMDSN_OVERRUN as well..
312 reject = true; 314 */
313 ret = cmdsn_ret; 315 ret = CMDSN_LOWER_THAN_EXP;
314 break; 316 break;
315 } 317 }
316 mutex_unlock(&conn->sess->cmdsn_mutex); 318 mutex_unlock(&conn->sess->cmdsn_mutex);
@@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
980 tiqn->sess_err_stats.last_sess_failure_type = 982 tiqn->sess_err_stats.last_sess_failure_type =
981 ISCSI_SESS_ERR_CXN_TIMEOUT; 983 ISCSI_SESS_ERR_CXN_TIMEOUT;
982 tiqn->sess_err_stats.cxn_timeout_errors++; 984 tiqn->sess_err_stats.cxn_timeout_errors++;
983 conn->sess->conn_timeout_errors++; 985 atomic_long_inc(&conn->sess->conn_timeout_errors);
984 spin_unlock_bh(&tiqn->sess_err_stats.lock); 986 spin_unlock_bh(&tiqn->sess_err_stats.lock);
985 } 987 }
986 } 988 }
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0f6d69dabca1..1b41e6776152 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(
135 return sdev->queue_depth; 135 return sdev->queue_depth;
136} 136}
137 137
138static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
139{
140 if (sdev->tagged_supported) {
141 scsi_set_tag_type(sdev, tag);
142
143 if (tag)
144 scsi_activate_tcq(sdev, sdev->queue_depth);
145 else
146 scsi_deactivate_tcq(sdev, sdev->queue_depth);
147 } else
148 tag = 0;
149
150 return tag;
151}
152
138/* 153/*
139 * Locate the SAM Task Attr from struct scsi_cmnd * 154 * Locate the SAM Task Attr from struct scsi_cmnd *
140 */ 155 */
@@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
178 set_host_byte(sc, DID_NO_CONNECT); 193 set_host_byte(sc, DID_NO_CONNECT);
179 goto out_done; 194 goto out_done;
180 } 195 }
181 196 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
197 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
198 goto out_done;
199 }
182 tl_nexus = tl_hba->tl_nexus; 200 tl_nexus = tl_hba->tl_nexus;
183 if (!tl_nexus) { 201 if (!tl_nexus) {
184 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 202 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
@@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
233 } 251 }
234 252
235 tl_cmd->sc = sc; 253 tl_cmd->sc = sc;
254 tl_cmd->sc_cmd_tag = sc->tag;
236 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 255 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
237 queue_work(tcm_loop_workqueue, &tl_cmd->work); 256 queue_work(tcm_loop_workqueue, &tl_cmd->work);
238 return 0; 257 return 0;
@@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
242 * Called from SCSI EH process context to issue a LUN_RESET TMR 261 * Called from SCSI EH process context to issue a LUN_RESET TMR
243 * to struct scsi_device 262 * to struct scsi_device
244 */ 263 */
245static int tcm_loop_device_reset(struct scsi_cmnd *sc) 264static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
265 struct tcm_loop_nexus *tl_nexus,
266 int lun, int task, enum tcm_tmreq_table tmr)
246{ 267{
247 struct se_cmd *se_cmd = NULL; 268 struct se_cmd *se_cmd = NULL;
248 struct se_portal_group *se_tpg;
249 struct se_session *se_sess; 269 struct se_session *se_sess;
270 struct se_portal_group *se_tpg;
250 struct tcm_loop_cmd *tl_cmd = NULL; 271 struct tcm_loop_cmd *tl_cmd = NULL;
251 struct tcm_loop_hba *tl_hba;
252 struct tcm_loop_nexus *tl_nexus;
253 struct tcm_loop_tmr *tl_tmr = NULL; 272 struct tcm_loop_tmr *tl_tmr = NULL;
254 struct tcm_loop_tpg *tl_tpg; 273 int ret = TMR_FUNCTION_FAILED, rc;
255 int ret = FAILED, rc;
256 /*
257 * Locate the tcm_loop_hba_t pointer
258 */
259 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
260 /*
261 * Locate the tl_nexus and se_sess pointers
262 */
263 tl_nexus = tl_hba->tl_nexus;
264 if (!tl_nexus) {
265 pr_err("Unable to perform device reset without"
266 " active I_T Nexus\n");
267 return FAILED;
268 }
269 se_sess = tl_nexus->se_sess;
270 /*
271 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
272 */
273 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
274 se_tpg = &tl_tpg->tl_se_tpg;
275 274
276 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 275 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
277 if (!tl_cmd) { 276 if (!tl_cmd) {
278 pr_err("Unable to allocate memory for tl_cmd\n"); 277 pr_err("Unable to allocate memory for tl_cmd\n");
279 return FAILED; 278 return ret;
280 } 279 }
281 280
282 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 281 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
@@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
287 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 286 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
288 287
289 se_cmd = &tl_cmd->tl_se_cmd; 288 se_cmd = &tl_cmd->tl_se_cmd;
289 se_tpg = &tl_tpg->tl_se_tpg;
290 se_sess = tl_nexus->se_sess;
290 /* 291 /*
291 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 292 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
292 */ 293 */
@@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
294 DMA_NONE, MSG_SIMPLE_TAG, 295 DMA_NONE, MSG_SIMPLE_TAG,
295 &tl_cmd->tl_sense_buf[0]); 296 &tl_cmd->tl_sense_buf[0]);
296 297
297 rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); 298 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
298 if (rc < 0) 299 if (rc < 0)
299 goto release; 300 goto release;
301
302 if (tmr == TMR_ABORT_TASK)
303 se_cmd->se_tmr_req->ref_task_tag = task;
304
300 /* 305 /*
301 * Locate the underlying TCM struct se_lun from sc->device->lun 306 * Locate the underlying TCM struct se_lun
302 */ 307 */
303 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) 308 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
309 ret = TMR_LUN_DOES_NOT_EXIST;
304 goto release; 310 goto release;
311 }
305 /* 312 /*
306 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 313 * Queue the TMR to TCM Core and sleep waiting for
307 * to wake us up. 314 * tcm_loop_queue_tm_rsp() to wake us up.
308 */ 315 */
309 transport_generic_handle_tmr(se_cmd); 316 transport_generic_handle_tmr(se_cmd);
310 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 317 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
@@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
312 * The TMR LUN_RESET has completed, check the response status and 319 * The TMR LUN_RESET has completed, check the response status and
313 * then release allocations. 320 * then release allocations.
314 */ 321 */
315 ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 322 ret = se_cmd->se_tmr_req->response;
316 SUCCESS : FAILED;
317release: 323release:
318 if (se_cmd) 324 if (se_cmd)
319 transport_generic_free_cmd(se_cmd, 1); 325 transport_generic_free_cmd(se_cmd, 1);
@@ -323,6 +329,94 @@ release:
323 return ret; 329 return ret;
324} 330}
325 331
332static int tcm_loop_abort_task(struct scsi_cmnd *sc)
333{
334 struct tcm_loop_hba *tl_hba;
335 struct tcm_loop_nexus *tl_nexus;
336 struct tcm_loop_tpg *tl_tpg;
337 int ret = FAILED;
338
339 /*
340 * Locate the tcm_loop_hba_t pointer
341 */
342 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
343 /*
344 * Locate the tl_nexus and se_sess pointers
345 */
346 tl_nexus = tl_hba->tl_nexus;
347 if (!tl_nexus) {
348 pr_err("Unable to perform device reset without"
349 " active I_T Nexus\n");
350 return FAILED;
351 }
352
353 /*
354 * Locate the tl_tpg pointer from TargetID in sc->device->id
355 */
356 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
357 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
358 sc->tag, TMR_ABORT_TASK);
359 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
360}
361
362/*
363 * Called from SCSI EH process context to issue a LUN_RESET TMR
364 * to struct scsi_device
365 */
366static int tcm_loop_device_reset(struct scsi_cmnd *sc)
367{
368 struct tcm_loop_hba *tl_hba;
369 struct tcm_loop_nexus *tl_nexus;
370 struct tcm_loop_tpg *tl_tpg;
371 int ret = FAILED;
372
373 /*
374 * Locate the tcm_loop_hba_t pointer
375 */
376 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
377 /*
378 * Locate the tl_nexus and se_sess pointers
379 */
380 tl_nexus = tl_hba->tl_nexus;
381 if (!tl_nexus) {
382 pr_err("Unable to perform device reset without"
383 " active I_T Nexus\n");
384 return FAILED;
385 }
386 /*
387 * Locate the tl_tpg pointer from TargetID in sc->device->id
388 */
389 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
390 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
391 0, TMR_LUN_RESET);
392 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
393}
394
395static int tcm_loop_target_reset(struct scsi_cmnd *sc)
396{
397 struct tcm_loop_hba *tl_hba;
398 struct tcm_loop_tpg *tl_tpg;
399
400 /*
401 * Locate the tcm_loop_hba_t pointer
402 */
403 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
404 if (!tl_hba) {
405 pr_err("Unable to perform device reset without"
406 " active I_T Nexus\n");
407 return FAILED;
408 }
409 /*
410 * Locate the tl_tpg pointer from TargetID in sc->device->id
411 */
412 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
413 if (tl_tpg) {
414 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
415 return SUCCESS;
416 }
417 return FAILED;
418}
419
326static int tcm_loop_slave_alloc(struct scsi_device *sd) 420static int tcm_loop_slave_alloc(struct scsi_device *sd)
327{ 421{
328 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 422 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
@@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
331 425
332static int tcm_loop_slave_configure(struct scsi_device *sd) 426static int tcm_loop_slave_configure(struct scsi_device *sd)
333{ 427{
428 if (sd->tagged_supported) {
429 scsi_activate_tcq(sd, sd->queue_depth);
430 scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
431 sd->host->cmd_per_lun);
432 } else {
433 scsi_adjust_queue_depth(sd, 0,
434 sd->host->cmd_per_lun);
435 }
436
334 return 0; 437 return 0;
335} 438}
336 439
@@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = {
340 .name = "TCM_Loopback", 443 .name = "TCM_Loopback",
341 .queuecommand = tcm_loop_queuecommand, 444 .queuecommand = tcm_loop_queuecommand,
342 .change_queue_depth = tcm_loop_change_queue_depth, 445 .change_queue_depth = tcm_loop_change_queue_depth,
446 .change_queue_type = tcm_loop_change_queue_type,
447 .eh_abort_handler = tcm_loop_abort_task,
343 .eh_device_reset_handler = tcm_loop_device_reset, 448 .eh_device_reset_handler = tcm_loop_device_reset,
449 .eh_target_reset_handler = tcm_loop_target_reset,
344 .can_queue = 1024, 450 .can_queue = 1024,
345 .this_id = -1, 451 .this_id = -1,
346 .sg_tablesize = 256, 452 .sg_tablesize = 256,
@@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
699 805
700static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) 806static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
701{ 807{
702 return 1; 808 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
809 struct tcm_loop_cmd, tl_se_cmd);
810
811 return tl_cmd->sc_cmd_tag;
703} 812}
704 813
705static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 814static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
@@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus(
932 struct tcm_loop_nexus *tl_nexus; 1041 struct tcm_loop_nexus *tl_nexus;
933 struct tcm_loop_hba *tl_hba = tpg->tl_hba; 1042 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
934 1043
935 tl_nexus = tpg->tl_hba->tl_nexus; 1044 if (!tl_hba)
1045 return -ENODEV;
1046
1047 tl_nexus = tl_hba->tl_nexus;
936 if (!tl_nexus) 1048 if (!tl_nexus)
937 return -ENODEV; 1049 return -ENODEV;
938 1050
@@ -1061,8 +1173,56 @@ check_newline:
1061 1173
1062TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); 1174TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
1063 1175
1176static ssize_t tcm_loop_tpg_show_transport_status(
1177 struct se_portal_group *se_tpg,
1178 char *page)
1179{
1180 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1181 struct tcm_loop_tpg, tl_se_tpg);
1182 const char *status = NULL;
1183 ssize_t ret = -EINVAL;
1184
1185 switch (tl_tpg->tl_transport_status) {
1186 case TCM_TRANSPORT_ONLINE:
1187 status = "online";
1188 break;
1189 case TCM_TRANSPORT_OFFLINE:
1190 status = "offline";
1191 break;
1192 default:
1193 break;
1194 }
1195
1196 if (status)
1197 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1198
1199 return ret;
1200}
1201
1202static ssize_t tcm_loop_tpg_store_transport_status(
1203 struct se_portal_group *se_tpg,
1204 const char *page,
1205 size_t count)
1206{
1207 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1208 struct tcm_loop_tpg, tl_se_tpg);
1209
1210 if (!strncmp(page, "online", 6)) {
1211 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1212 return count;
1213 }
1214 if (!strncmp(page, "offline", 7)) {
1215 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1216 return count;
1217 }
1218 return -EINVAL;
1219}
1220
1221TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
1222
1064static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1223static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1065 &tcm_loop_tpg_nexus.attr, 1224 &tcm_loop_tpg_nexus.attr,
1225 &tcm_loop_tpg_transport_status.attr,
1066 NULL, 1226 NULL,
1067}; 1227};
1068 1228
@@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void)
1334 /* 1494 /*
1335 * Setup default attribute lists for various fabric->tf_cit_tmpl 1495 * Setup default attribute lists for various fabric->tf_cit_tmpl
1336 */ 1496 */
1337 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; 1497 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1338 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; 1498 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1339 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1499 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1340 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1500 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1341 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1501 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1342 /* 1502 /*
1343 * Once fabric->tf_ops has been setup, now register the fabric for 1503 * Once fabric->tf_ops has been setup, now register the fabric for
1344 * use within TCM 1504 * use within TCM
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index dd7a84ee78e1..54c59d0b6608 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -10,6 +10,8 @@
10struct tcm_loop_cmd { 10struct tcm_loop_cmd {
11 /* State of Linux/SCSI CDB+Data descriptor */ 11 /* State of Linux/SCSI CDB+Data descriptor */
12 u32 sc_cmd_state; 12 u32 sc_cmd_state;
13 /* Tagged command queueing */
14 u32 sc_cmd_tag;
13 /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ 15 /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
14 struct scsi_cmnd *sc; 16 struct scsi_cmnd *sc;
15 /* The TCM I/O descriptor that is accessed via container_of() */ 17 /* The TCM I/O descriptor that is accessed via container_of() */
@@ -40,8 +42,12 @@ struct tcm_loop_nacl {
40 struct se_node_acl se_node_acl; 42 struct se_node_acl se_node_acl;
41}; 43};
42 44
45#define TCM_TRANSPORT_ONLINE 0
46#define TCM_TRANSPORT_OFFLINE 1
47
43struct tcm_loop_tpg { 48struct tcm_loop_tpg {
44 unsigned short tl_tpgt; 49 unsigned short tl_tpgt;
50 unsigned short tl_transport_status;
45 atomic_t tl_tpg_port_count; 51 atomic_t tl_tpg_port_count;
46 struct se_portal_group tl_se_tpg; 52 struct se_portal_group tl_se_tpg;
47 struct tcm_loop_hba *tl_hba; 53 struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e51b09a04d52..24884cac19ce 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void)
2556 /* 2556 /*
2557 * Setup default attribute lists for various fabric->tf_cit_tmpl 2557 * Setup default attribute lists for various fabric->tf_cit_tmpl
2558 */ 2558 */
2559 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; 2559 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
2560 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; 2560 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
2561 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; 2561 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
2562 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 2562 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2563 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 2563 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2564 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2564 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2565 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2565 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2566 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2566 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2567 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2567 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2568 2568
2569 ret = target_fabric_configfs_register(fabric); 2569 ret = target_fabric_configfs_register(fabric);
2570 if (ret < 0) { 2570 if (ret < 0) {
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47244102281e..fdcee326bfbc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -44,7 +44,7 @@
44static sense_reason_t core_alua_check_transition(int state, int *primary); 44static sense_reason_t core_alua_check_transition(int state, int *primary);
45static int core_alua_set_tg_pt_secondary_state( 45static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline); 47 struct se_port *port, int explicit, int offline);
48 48
49static u16 alua_lu_gps_counter; 49static u16 alua_lu_gps_counter;
50static u32 alua_lu_gps_count; 50static u32 alua_lu_gps_count;
@@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
117 /* 117 /*
118 * Set supported ASYMMETRIC ACCESS State bits 118 * Set supported ASYMMETRIC ACCESS State bits
119 */ 119 */
120 buf[off] = 0x80; /* T_SUP */ 120 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
121 buf[off] |= 0x40; /* O_SUP */
122 buf[off] |= 0x8; /* U_SUP */
123 buf[off] |= 0x4; /* S_SUP */
124 buf[off] |= 0x2; /* AN_SUP */
125 buf[off++] |= 0x1; /* AO_SUP */
126 /* 121 /*
127 * TARGET PORT GROUP 122 * TARGET PORT GROUP
128 */ 123 */
@@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
175 if (ext_hdr != 0) { 170 if (ext_hdr != 0) {
176 buf[4] = 0x10; 171 buf[4] = 0x10;
177 /* 172 /*
178 * Set the implict transition time (in seconds) for the application 173 * Set the implicit transition time (in seconds) for the application
179 * client to use as a base for it's transition timeout value. 174 * client to use as a base for it's transition timeout value.
180 * 175 *
181 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 176 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
@@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
188 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 183 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
189 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 184 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
190 if (tg_pt_gp) 185 if (tg_pt_gp)
191 buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; 186 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
192 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 187 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
193 } 188 }
194 } 189 }
@@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
199} 194}
200 195
201/* 196/*
202 * SET_TARGET_PORT_GROUPS for explict ALUA operation. 197 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
203 * 198 *
204 * See spc4r17 section 6.35 199 * See spc4r17 section 6.35
205 */ 200 */
@@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
232 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 227 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
233 228
234 /* 229 /*
235 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed 230 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
236 * for the local tg_pt_gp. 231 * for the local tg_pt_gp.
237 */ 232 */
238 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 233 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
@@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
251 } 246 }
252 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 247 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
253 248
254 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { 249 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
255 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 250 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
256 " while TPGS_EXPLICT_ALUA is disabled\n"); 251 " while TPGS_EXPLICIT_ALUA is disabled\n");
257 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 252 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
258 goto out; 253 goto out;
259 } 254 }
@@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
330 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 325 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
331 } else { 326 } else {
332 /* 327 /*
333 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify 328 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
334 * the Target Port in question for the the incoming 329 * the Target Port in question for the the incoming
335 * SET_TARGET_PORT_GROUPS op. 330 * SET_TARGET_PORT_GROUPS op.
336 */ 331 */
@@ -487,7 +482,7 @@ static inline int core_alua_state_transition(
487 u8 *alua_ascq) 482 u8 *alua_ascq)
488{ 483{
489 /* 484 /*
490 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by 485 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
491 * spc4r17 section 5.9.2.5 486 * spc4r17 section 5.9.2.5
492 */ 487 */
493 switch (cdb[0]) { 488 switch (cdb[0]) {
@@ -515,9 +510,9 @@ static inline int core_alua_state_transition(
515} 510}
516 511
517/* 512/*
518 * return 1: Is used to signal LUN not accecsable, and check condition/not ready 513 * return 1: Is used to signal LUN not accessible, and check condition/not ready
519 * return 0: Used to signal success 514 * return 0: Used to signal success
520 * reutrn -1: Used to signal failure, and invalid cdb field 515 * return -1: Used to signal failure, and invalid cdb field
521 */ 516 */
522sense_reason_t 517sense_reason_t
523target_alua_state_check(struct se_cmd *cmd) 518target_alua_state_check(struct se_cmd *cmd)
@@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd)
566 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 561 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
567 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 562 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
568 /* 563 /*
569 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional 564 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
570 * statement so the compiler knows explicitly to check this case first. 565 * statement so the compiler knows explicitly to check this case first.
571 * For the Optimized ALUA access state case, we want to process the 566 * For the Optimized ALUA access state case, we want to process the
572 * incoming fabric cmd ASAP.. 567 * incoming fabric cmd ASAP..
573 */ 568 */
574 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) 569 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
575 return 0; 570 return 0;
576 571
577 switch (out_alua_state) { 572 switch (out_alua_state) {
@@ -620,13 +615,13 @@ out:
620} 615}
621 616
622/* 617/*
623 * Check implict and explict ALUA state change request. 618 * Check implicit and explicit ALUA state change request.
624 */ 619 */
625static sense_reason_t 620static sense_reason_t
626core_alua_check_transition(int state, int *primary) 621core_alua_check_transition(int state, int *primary)
627{ 622{
628 switch (state) { 623 switch (state) {
629 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 624 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
630 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 625 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
631 case ALUA_ACCESS_STATE_STANDBY: 626 case ALUA_ACCESS_STATE_STANDBY:
632 case ALUA_ACCESS_STATE_UNAVAILABLE: 627 case ALUA_ACCESS_STATE_UNAVAILABLE:
@@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary)
654static char *core_alua_dump_state(int state) 649static char *core_alua_dump_state(int state)
655{ 650{
656 switch (state) { 651 switch (state) {
657 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 652 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
658 return "Active/Optimized"; 653 return "Active/Optimized";
659 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 654 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
660 return "Active/NonOptimized"; 655 return "Active/NonOptimized";
@@ -676,10 +671,10 @@ char *core_alua_dump_status(int status)
676 switch (status) { 671 switch (status) {
677 case ALUA_STATUS_NONE: 672 case ALUA_STATUS_NONE:
678 return "None"; 673 return "None";
679 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: 674 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
680 return "Altered by Explict STPG"; 675 return "Altered by Explicit STPG";
681 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: 676 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
682 return "Altered by Implict ALUA"; 677 return "Altered by Implicit ALUA";
683 default: 678 default:
684 return "Unknown"; 679 return "Unknown";
685 } 680 }
@@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt(
770 struct se_node_acl *nacl, 765 struct se_node_acl *nacl,
771 unsigned char *md_buf, 766 unsigned char *md_buf,
772 int new_state, 767 int new_state,
773 int explict) 768 int explicit)
774{ 769{
775 struct se_dev_entry *se_deve; 770 struct se_dev_entry *se_deve;
776 struct se_lun_acl *lacl; 771 struct se_lun_acl *lacl;
@@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt(
784 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 779 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
785 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 780 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
786 ALUA_ACCESS_STATE_TRANSITION); 781 ALUA_ACCESS_STATE_TRANSITION);
787 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? 782 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
788 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 783 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
789 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 784 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
790 /* 785 /*
791 * Check for the optional ALUA primary state transition delay 786 * Check for the optional ALUA primary state transition delay
792 */ 787 */
@@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt(
802 * change, a device server shall establish a unit attention 797 * change, a device server shall establish a unit attention
803 * condition for the initiator port associated with every I_T 798 * condition for the initiator port associated with every I_T
804 * nexus with the additional sense code set to ASYMMETRIC 799 * nexus with the additional sense code set to ASYMMETRIC
805 * ACCESS STATE CHAGED. 800 * ACCESS STATE CHANGED.
806 * 801 *
807 * After an explicit target port asymmetric access state 802 * After an explicit target port asymmetric access state
808 * change, a device server shall establish a unit attention 803 * change, a device server shall establish a unit attention
@@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt(
821 lacl = se_deve->se_lun_acl; 816 lacl = se_deve->se_lun_acl;
822 /* 817 /*
823 * se_deve->se_lun_acl pointer may be NULL for a 818 * se_deve->se_lun_acl pointer may be NULL for a
824 * entry created without explict Node+MappedLUN ACLs 819 * entry created without explicit Node+MappedLUN ACLs
825 */ 820 */
826 if (!lacl) 821 if (!lacl)
827 continue; 822 continue;
828 823
829 if (explict && 824 if (explicit &&
830 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 825 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
831 (l_port != NULL) && (l_port == port)) 826 (l_port != NULL) && (l_port == port))
832 continue; 827 continue;
@@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt(
866 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 861 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
867 862
868 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 863 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
869 " from primary access state %s to %s\n", (explict) ? "explict" : 864 " from primary access state %s to %s\n", (explicit) ? "explicit" :
870 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 865 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
871 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 866 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
872 core_alua_dump_state(new_state)); 867 core_alua_dump_state(new_state));
873 868
@@ -880,7 +875,7 @@ int core_alua_do_port_transition(
880 struct se_port *l_port, 875 struct se_port *l_port,
881 struct se_node_acl *l_nacl, 876 struct se_node_acl *l_nacl,
882 int new_state, 877 int new_state,
883 int explict) 878 int explicit)
884{ 879{
885 struct se_device *dev; 880 struct se_device *dev;
886 struct se_port *port; 881 struct se_port *port;
@@ -917,7 +912,7 @@ int core_alua_do_port_transition(
917 * success. 912 * success.
918 */ 913 */
919 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 914 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
920 md_buf, new_state, explict); 915 md_buf, new_state, explicit);
921 atomic_dec(&lu_gp->lu_gp_ref_cnt); 916 atomic_dec(&lu_gp->lu_gp_ref_cnt);
922 smp_mb__after_atomic_dec(); 917 smp_mb__after_atomic_dec();
923 kfree(md_buf); 918 kfree(md_buf);
@@ -946,7 +941,7 @@ int core_alua_do_port_transition(
946 continue; 941 continue;
947 /* 942 /*
948 * If the target behavior port asymmetric access state 943 * If the target behavior port asymmetric access state
949 * is changed for any target port group accessiable via 944 * is changed for any target port group accessible via
950 * a logical unit within a LU group, the target port 945 * a logical unit within a LU group, the target port
951 * behavior group asymmetric access states for the same 946 * behavior group asymmetric access states for the same
952 * target port group accessible via other logical units 947 * target port group accessible via other logical units
@@ -970,7 +965,7 @@ int core_alua_do_port_transition(
970 * success. 965 * success.
971 */ 966 */
972 core_alua_do_transition_tg_pt(tg_pt_gp, port, 967 core_alua_do_transition_tg_pt(tg_pt_gp, port,
973 nacl, md_buf, new_state, explict); 968 nacl, md_buf, new_state, explicit);
974 969
975 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 970 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
976 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 971 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -987,7 +982,7 @@ int core_alua_do_port_transition(
987 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 982 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
988 " Group IDs: %hu %s transition to primary state: %s\n", 983 " Group IDs: %hu %s transition to primary state: %s\n",
989 config_item_name(&lu_gp->lu_gp_group.cg_item), 984 config_item_name(&lu_gp->lu_gp_group.cg_item),
990 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", 985 l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
991 core_alua_dump_state(new_state)); 986 core_alua_dump_state(new_state));
992 987
993 atomic_dec(&lu_gp->lu_gp_ref_cnt); 988 atomic_dec(&lu_gp->lu_gp_ref_cnt);
@@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata(
1034static int core_alua_set_tg_pt_secondary_state( 1029static int core_alua_set_tg_pt_secondary_state(
1035 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1030 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1036 struct se_port *port, 1031 struct se_port *port,
1037 int explict, 1032 int explicit,
1038 int offline) 1033 int offline)
1039{ 1034{
1040 struct t10_alua_tg_pt_gp *tg_pt_gp; 1035 struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state(
1061 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1056 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1062 1057
1063 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 1058 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1064 port->sep_tg_pt_secondary_stat = (explict) ? 1059 port->sep_tg_pt_secondary_stat = (explicit) ?
1065 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 1060 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1066 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 1061 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1067 1062
1068 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1063 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1069 " to secondary access state: %s\n", (explict) ? "explict" : 1064 " to secondary access state: %s\n", (explicit) ? "explicit" :
1070 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1065 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1071 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1066 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1072 1067
1073 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1068 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1232 * struct se_device is released via core_alua_free_lu_gp_mem(). 1227 * struct se_device is released via core_alua_free_lu_gp_mem().
1233 * 1228 *
1234 * If the passed lu_gp does NOT match the default_lu_gp, assume 1229 * If the passed lu_gp does NOT match the default_lu_gp, assume
1235 * we want to re-assocate a given lu_gp_mem with default_lu_gp. 1230 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1236 */ 1231 */
1237 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1232 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1238 if (lu_gp != default_lu_gp) 1233 if (lu_gp != default_lu_gp)
@@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1354 tg_pt_gp->tg_pt_gp_dev = dev; 1349 tg_pt_gp->tg_pt_gp_dev = dev;
1355 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1350 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1356 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1351 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1357 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); 1352 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1358 /* 1353 /*
1359 * Enable both explict and implict ALUA support by default 1354 * Enable both explicit and implicit ALUA support by default
1360 */ 1355 */
1361 tg_pt_gp->tg_pt_gp_alua_access_type = 1356 tg_pt_gp->tg_pt_gp_alua_access_type =
1362 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; 1357 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1363 /* 1358 /*
1364 * Set the default Active/NonOptimized Delay in milliseconds 1359 * Set the default Active/NonOptimized Delay in milliseconds
1365 */ 1360 */
1366 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1361 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1367 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1362 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1368 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; 1363 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1364
1365 /*
1366 * Enable all supported states
1367 */
1368 tg_pt_gp->tg_pt_gp_alua_supported_states =
1369 ALUA_T_SUP | ALUA_O_SUP |
1370 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1369 1371
1370 if (def_group) { 1372 if (def_group) {
1371 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1373 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp(
1465 * been called from target_core_alua_drop_tg_pt_gp(). 1467 * been called from target_core_alua_drop_tg_pt_gp().
1466 * 1468 *
1467 * Here we remove *tg_pt_gp from the global list so that 1469 * Here we remove *tg_pt_gp from the global list so that
1468 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1470 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1469 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1471 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1470 */ 1472 */
1471 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1473 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp(
1501 * core_alua_free_tg_pt_gp_mem(). 1503 * core_alua_free_tg_pt_gp_mem().
1502 * 1504 *
1503 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1505 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1504 * assume we want to re-assocate a given tg_pt_gp_mem with 1506 * assume we want to re-associate a given tg_pt_gp_mem with
1505 * default_tg_pt_gp. 1507 * default_tg_pt_gp.
1506 */ 1508 */
1507 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1509 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type(
1740 struct t10_alua_tg_pt_gp *tg_pt_gp, 1742 struct t10_alua_tg_pt_gp *tg_pt_gp,
1741 char *page) 1743 char *page)
1742{ 1744{
1743 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && 1745 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1744 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) 1746 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1745 return sprintf(page, "Implict and Explict\n"); 1747 return sprintf(page, "Implicit and Explicit\n");
1746 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) 1748 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
1747 return sprintf(page, "Implict\n"); 1749 return sprintf(page, "Implicit\n");
1748 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) 1750 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
1749 return sprintf(page, "Explict\n"); 1751 return sprintf(page, "Explicit\n");
1750 else 1752 else
1751 return sprintf(page, "None\n"); 1753 return sprintf(page, "None\n");
1752} 1754}
@@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type(
1771 } 1773 }
1772 if (tmp == 3) 1774 if (tmp == 3)
1773 tg_pt_gp->tg_pt_gp_alua_access_type = 1775 tg_pt_gp->tg_pt_gp_alua_access_type =
1774 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; 1776 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
1775 else if (tmp == 2) 1777 else if (tmp == 2)
1776 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; 1778 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
1777 else if (tmp == 1) 1779 else if (tmp == 1)
1778 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; 1780 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
1779 else 1781 else
1780 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 1782 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1781 1783
@@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs(
1844 return count; 1846 return count;
1845} 1847}
1846 1848
1847ssize_t core_alua_show_implict_trans_secs( 1849ssize_t core_alua_show_implicit_trans_secs(
1848 struct t10_alua_tg_pt_gp *tg_pt_gp, 1850 struct t10_alua_tg_pt_gp *tg_pt_gp,
1849 char *page) 1851 char *page)
1850{ 1852{
1851 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); 1853 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
1852} 1854}
1853 1855
1854ssize_t core_alua_store_implict_trans_secs( 1856ssize_t core_alua_store_implicit_trans_secs(
1855 struct t10_alua_tg_pt_gp *tg_pt_gp, 1857 struct t10_alua_tg_pt_gp *tg_pt_gp,
1856 const char *page, 1858 const char *page,
1857 size_t count) 1859 size_t count)
@@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs(
1861 1863
1862 ret = kstrtoul(page, 0, &tmp); 1864 ret = kstrtoul(page, 0, &tmp);
1863 if (ret < 0) { 1865 if (ret < 0) {
1864 pr_err("Unable to extract implict_trans_secs\n"); 1866 pr_err("Unable to extract implicit_trans_secs\n");
1865 return ret; 1867 return ret;
1866 } 1868 }
1867 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { 1869 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
1868 pr_err("Passed implict_trans_secs: %lu, exceeds" 1870 pr_err("Passed implicit_trans_secs: %lu, exceeds"
1869 " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, 1871 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
1870 ALUA_MAX_IMPLICT_TRANS_SECS); 1872 ALUA_MAX_IMPLICIT_TRANS_SECS);
1871 return -EINVAL; 1873 return -EINVAL;
1872 } 1874 }
1873 tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; 1875 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
1874 1876
1875 return count; 1877 return count;
1876} 1878}
@@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status(
1970 return ret; 1972 return ret;
1971 } 1973 }
1972 if ((tmp != ALUA_STATUS_NONE) && 1974 if ((tmp != ALUA_STATUS_NONE) &&
1973 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1975 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1974 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1976 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
1975 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 1977 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1976 tmp); 1978 tmp);
1977 return -EINVAL; 1979 return -EINVAL;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index e539c3e7f4ad..88e2e835f14a 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -7,15 +7,15 @@
7 * from spc4r17 section 6.4.2 Table 135 7 * from spc4r17 section 6.4.2 Table 135
8 */ 8 */
9#define TPGS_NO_ALUA 0x00 9#define TPGS_NO_ALUA 0x00
10#define TPGS_IMPLICT_ALUA 0x10 10#define TPGS_IMPLICIT_ALUA 0x10
11#define TPGS_EXPLICT_ALUA 0x20 11#define TPGS_EXPLICIT_ALUA 0x20
12 12
13/* 13/*
14 * ASYMMETRIC ACCESS STATE field 14 * ASYMMETRIC ACCESS STATE field
15 * 15 *
16 * from spc4r17 section 6.27 Table 245 16 * from spc4r17 section 6.27 Table 245
17 */ 17 */
18#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 18#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
20#define ALUA_ACCESS_STATE_STANDBY 0x2 20#define ALUA_ACCESS_STATE_STANDBY 0x2
21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
@@ -23,13 +23,24 @@
23#define ALUA_ACCESS_STATE_TRANSITION 0xf 23#define ALUA_ACCESS_STATE_TRANSITION 0xf
24 24
25/* 25/*
26 * from spc4r36j section 6.37 Table 306
27 */
28#define ALUA_T_SUP 0x80
29#define ALUA_O_SUP 0x40
30#define ALUA_LBD_SUP 0x10
31#define ALUA_U_SUP 0x08
32#define ALUA_S_SUP 0x04
33#define ALUA_AN_SUP 0x02
34#define ALUA_AO_SUP 0x01
35
36/*
26 * REPORT_TARGET_PORT_GROUP STATUS CODE 37 * REPORT_TARGET_PORT_GROUP STATUS CODE
27 * 38 *
28 * from spc4r17 section 6.27 Table 246 39 * from spc4r17 section 6.27 Table 246
29 */ 40 */
30#define ALUA_STATUS_NONE 0x00 41#define ALUA_STATUS_NONE 0x00
31#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 42#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
32#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 43#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
33 44
34/* 45/*
35 * From spc4r17, Table D.1: ASC and ASCQ Assignement 46 * From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,17 +57,17 @@
46#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 57#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
47#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ 58#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
48/* 59/*
49 * Used for implict and explict ALUA transitional delay, that is disabled 60 * Used for implicit and explicit ALUA transitional delay, that is disabled
50 * by default, and is intended to be used for debugging client side ALUA code. 61 * by default, and is intended to be used for debugging client side ALUA code.
51 */ 62 */
52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 63#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ 64#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
54/* 65/*
55 * Used for the recommended application client implict transition timeout 66 * Used for the recommended application client implicit transition timeout
56 * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. 67 * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
57 */ 68 */
58#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 69#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
59#define ALUA_MAX_IMPLICT_TRANS_SECS 255 70#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
60/* 71/*
61 * Used by core_alua_update_tpg_primary_metadata() and 72 * Used by core_alua_update_tpg_primary_metadata() and
62 * core_alua_update_tpg_secondary_metadata() 73 * core_alua_update_tpg_secondary_metadata()
@@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
113 char *); 124 char *);
114extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, 125extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
115 const char *, size_t); 126 const char *, size_t);
116extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, 127extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
117 char *); 128 char *);
118extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, 129extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
119 const char *, size_t); 130 const char *, size_t);
120extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, 131extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
121 char *); 132 char *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 82e81c542e43..272755d03e5a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
177 * struct target_fabric_configfs *tf will contain a usage reference. 177 * struct target_fabric_configfs *tf will contain a usage reference.
178 */ 178 */
179 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 179 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
180 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 180 &tf->tf_cit_tmpl.tfc_wwn_cit);
181 181
182 tf->tf_group.default_groups = tf->tf_default_groups; 182 tf->tf_group.default_groups = tf->tf_default_groups;
183 tf->tf_group.default_groups[0] = &tf->tf_disc_group; 183 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
184 tf->tf_group.default_groups[1] = NULL; 184 tf->tf_group.default_groups[1] = NULL;
185 185
186 config_group_init_type_name(&tf->tf_group, name, 186 config_group_init_type_name(&tf->tf_group, name,
187 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 187 &tf->tf_cit_tmpl.tfc_wwn_cit);
188 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 188 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
189 &TF_CIT_TMPL(tf)->tfc_discovery_cit); 189 &tf->tf_cit_tmpl.tfc_discovery_cit);
190 190
191 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 191 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
192 " %s\n", tf->tf_group.cg_item.ci_name); 192 " %s\n", tf->tf_group.cg_item.ci_name);
@@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2036 int new_state, ret; 2036 int new_state, ret;
2037 2037
2038 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2038 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2039 pr_err("Unable to do implict ALUA on non valid" 2039 pr_err("Unable to do implicit ALUA on non valid"
2040 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2040 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2041 return -EINVAL; 2041 return -EINVAL;
2042 } 2042 }
@@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2049 } 2049 }
2050 new_state = (int)tmp; 2050 new_state = (int)tmp;
2051 2051
2052 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { 2052 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2053 pr_err("Unable to process implict configfs ALUA" 2053 pr_err("Unable to process implicit configfs ALUA"
2054 " transition while TPGS_IMPLICT_ALUA is disabled\n"); 2054 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2055 return -EINVAL; 2055 return -EINVAL;
2056 } 2056 }
2057 2057
@@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2097 new_status = (int)tmp; 2097 new_status = (int)tmp;
2098 2098
2099 if ((new_status != ALUA_STATUS_NONE) && 2099 if ((new_status != ALUA_STATUS_NONE) &&
2100 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 2100 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2101 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 2101 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2102 pr_err("Illegal ALUA access status: 0x%02x\n", 2102 pr_err("Illegal ALUA access status: 0x%02x\n",
2103 new_status); 2103 new_status);
2104 return -EINVAL; 2104 return -EINVAL;
@@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2131SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); 2131SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2132 2132
2133/* 2133/*
2134 * alua_supported_states
2135 */
2136
2137#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
2138static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
2139 struct t10_alua_tg_pt_gp *t, char *p) \
2140{ \
2141 return sprintf(p, "%d\n", !!(t->_var & _bit)); \
2142}
2143
2144#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
2145static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
2146 struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
2147{ \
2148 unsigned long tmp; \
2149 int ret; \
2150 \
2151 if (!t->tg_pt_gp_valid_id) { \
2152 pr_err("Unable to do set ##_name ALUA state on non" \
2153 " valid tg_pt_gp ID: %hu\n", \
2154 t->tg_pt_gp_valid_id); \
2155 return -EINVAL; \
2156 } \
2157 \
2158 ret = kstrtoul(p, 0, &tmp); \
2159 if (ret < 0) { \
2160 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2161 return -EINVAL; \
2162 } \
2163 if (tmp > 1) { \
2164 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2165 return -EINVAL; \
2166 } \
2167 if (!tmp) \
2168 t->_var |= _bit; \
2169 else \
2170 t->_var &= ~_bit; \
2171 \
2172 return c; \
2173}
2174
2175SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
2176 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2177SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
2178 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2179SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
2180
2181SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
2182 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2183SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
2184 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2185SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
2186
2187SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
2188 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2189SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
2190 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2191SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
2192
2193SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
2194 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2195SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
2196 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2197SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
2198
2199SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
2200 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2201SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
2202 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2203SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
2204
2205SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
2206 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2207SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
2208 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2209SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
2210
2211SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
2212 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2213SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
2214 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2215SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
2216
2217/*
2134 * alua_write_metadata 2218 * alua_write_metadata
2135 */ 2219 */
2136static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( 2220static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
@@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2210SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); 2294SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2211 2295
2212/* 2296/*
2213 * implict_trans_secs 2297 * implicit_trans_secs
2214 */ 2298 */
2215static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( 2299static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
2216 struct t10_alua_tg_pt_gp *tg_pt_gp, 2300 struct t10_alua_tg_pt_gp *tg_pt_gp,
2217 char *page) 2301 char *page)
2218{ 2302{
2219 return core_alua_show_implict_trans_secs(tg_pt_gp, page); 2303 return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
2220} 2304}
2221 2305
2222static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( 2306static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
2223 struct t10_alua_tg_pt_gp *tg_pt_gp, 2307 struct t10_alua_tg_pt_gp *tg_pt_gp,
2224 const char *page, 2308 const char *page,
2225 size_t count) 2309 size_t count)
2226{ 2310{
2227 return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); 2311 return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
2228} 2312}
2229 2313
2230SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); 2314SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
2231 2315
2232/* 2316/*
2233 * preferred 2317 * preferred
@@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2350 &target_core_alua_tg_pt_gp_alua_access_state.attr, 2434 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2351 &target_core_alua_tg_pt_gp_alua_access_status.attr, 2435 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2352 &target_core_alua_tg_pt_gp_alua_access_type.attr, 2436 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2437 &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
2438 &target_core_alua_tg_pt_gp_alua_support_offline.attr,
2439 &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
2440 &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
2441 &target_core_alua_tg_pt_gp_alua_support_standby.attr,
2442 &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
2443 &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
2353 &target_core_alua_tg_pt_gp_alua_write_metadata.attr, 2444 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2354 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, 2445 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2355 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, 2446 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2356 &target_core_alua_tg_pt_gp_implict_trans_secs.attr, 2447 &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
2357 &target_core_alua_tg_pt_gp_preferred.attr, 2448 &target_core_alua_tg_pt_gp_preferred.attr,
2358 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, 2449 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2359 &target_core_alua_tg_pt_gp_members.attr, 2450 &target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d90dbb0f1a69..207b340498a3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
92 se_cmd->pr_res_key = deve->pr_res_key; 92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun; 93 se_cmd->orig_fe_lun = unpacked_lun;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95
96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true;
95 } 98 }
96 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
97 100
@@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
119 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->orig_fe_lun = 0; 123 se_cmd->orig_fe_lun = 0;
121 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
125
126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true;
122 } 128 }
123 129
124 /* Directly associate cmd with se_dev */ 130 /* Directly associate cmd with se_dev */
125 se_cmd->se_dev = se_lun->lun_se_dev; 131 se_cmd->se_dev = se_lun->lun_se_dev;
126 132
127 /* TODO: get rid of this and use atomics for stats */
128 dev = se_lun->lun_se_dev; 133 dev = se_lun->lun_se_dev;
129 spin_lock_irqsave(&dev->stats_lock, flags); 134 atomic_long_inc(&dev->num_cmds);
130 dev->num_cmds++;
131 if (se_cmd->data_direction == DMA_TO_DEVICE) 135 if (se_cmd->data_direction == DMA_TO_DEVICE)
132 dev->write_bytes += se_cmd->data_length; 136 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
133 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 137 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
134 dev->read_bytes += se_cmd->data_length; 138 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
135 spin_unlock_irqrestore(&dev->stats_lock, flags);
136
137 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
138 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
139 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
140 139
141 return 0; 140 return 0;
142} 141}
@@ -314,14 +313,14 @@ int core_enable_device_list_for_node(
314 deve = nacl->device_list[mapped_lun]; 313 deve = nacl->device_list[mapped_lun];
315 314
316 /* 315 /*
317 * Check if the call is handling demo mode -> explict LUN ACL 316 * Check if the call is handling demo mode -> explicit LUN ACL
318 * transition. This transition must be for the same struct se_lun 317 * transition. This transition must be for the same struct se_lun
319 * + mapped_lun that was setup in demo mode.. 318 * + mapped_lun that was setup in demo mode..
320 */ 319 */
321 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 320 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
322 if (deve->se_lun_acl != NULL) { 321 if (deve->se_lun_acl != NULL) {
323 pr_err("struct se_dev_entry->se_lun_acl" 322 pr_err("struct se_dev_entry->se_lun_acl"
324 " already set for demo mode -> explict" 323 " already set for demo mode -> explicit"
325 " LUN ACL transition\n"); 324 " LUN ACL transition\n");
326 spin_unlock_irq(&nacl->device_list_lock); 325 spin_unlock_irq(&nacl->device_list_lock);
327 return -EINVAL; 326 return -EINVAL;
@@ -329,7 +328,7 @@ int core_enable_device_list_for_node(
329 if (deve->se_lun != lun) { 328 if (deve->se_lun != lun) {
330 pr_err("struct se_dev_entry->se_lun does" 329 pr_err("struct se_dev_entry->se_lun does"
331 " match passed struct se_lun for demo mode" 330 " match passed struct se_lun for demo mode"
332 " -> explict LUN ACL transition\n"); 331 " -> explicit LUN ACL transition\n");
333 spin_unlock_irq(&nacl->device_list_lock); 332 spin_unlock_irq(&nacl->device_list_lock);
334 return -EINVAL; 333 return -EINVAL;
335 } 334 }
@@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev)
1407struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1406struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1408{ 1407{
1409 struct se_device *dev; 1408 struct se_device *dev;
1409 struct se_lun *xcopy_lun;
1410 1410
1411 dev = hba->transport->alloc_device(hba, name); 1411 dev = hba->transport->alloc_device(hba, name);
1412 if (!dev) 1412 if (!dev)
@@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1423 INIT_LIST_HEAD(&dev->state_list); 1423 INIT_LIST_HEAD(&dev->state_list);
1424 INIT_LIST_HEAD(&dev->qf_cmd_list); 1424 INIT_LIST_HEAD(&dev->qf_cmd_list);
1425 INIT_LIST_HEAD(&dev->g_dev_node); 1425 INIT_LIST_HEAD(&dev->g_dev_node);
1426 spin_lock_init(&dev->stats_lock);
1427 spin_lock_init(&dev->execute_task_lock); 1426 spin_lock_init(&dev->execute_task_lock);
1428 spin_lock_init(&dev->delayed_cmd_lock); 1427 spin_lock_init(&dev->delayed_cmd_lock);
1429 spin_lock_init(&dev->dev_reservation_lock); 1428 spin_lock_init(&dev->dev_reservation_lock);
@@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1469 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1468 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1470 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1469 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1471 1470
1471 xcopy_lun = &dev->xcopy_lun;
1472 xcopy_lun->lun_se_dev = dev;
1473 init_completion(&xcopy_lun->lun_shutdown_comp);
1474 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1475 spin_lock_init(&xcopy_lun->lun_acl_lock);
1476 spin_lock_init(&xcopy_lun->lun_sep_lock);
1477 init_completion(&xcopy_lun->lun_ref_comp);
1478
1472 return dev; 1479 return dev;
1473} 1480}
1474 1481
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 3503996d7d10..dae2ad6a669e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(
385 } 385 }
386 386
387 config_group_init_type_name(&lacl->se_lun_group, name, 387 config_group_init_type_name(&lacl->se_lun_group, name,
388 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); 388 &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
389 config_group_init_type_name(&lacl->ml_stat_grps.stat_group, 389 config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
390 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); 390 "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
391 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; 391 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
392 lacl_cg->default_groups[1] = NULL; 392 lacl_cg->default_groups[1] = NULL;
393 393
@@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
504 nacl_cg->default_groups[4] = NULL; 504 nacl_cg->default_groups[4] = NULL;
505 505
506 config_group_init_type_name(&se_nacl->acl_group, name, 506 config_group_init_type_name(&se_nacl->acl_group, name,
507 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); 507 &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
508 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", 508 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
509 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); 509 &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
510 config_group_init_type_name(&se_nacl->acl_auth_group, "auth", 510 config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
511 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); 511 &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
512 config_group_init_type_name(&se_nacl->acl_param_group, "param", 512 config_group_init_type_name(&se_nacl->acl_param_group, "param",
513 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); 513 &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
514 config_group_init_type_name(&se_nacl->acl_fabric_stat_group, 514 config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
515 "fabric_statistics", 515 "fabric_statistics",
516 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); 516 &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
517 517
518 return &se_nacl->acl_group; 518 return &se_nacl->acl_group;
519} 519}
@@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(
595 595
596 se_tpg_np->tpg_np_parent = se_tpg; 596 se_tpg_np->tpg_np_parent = se_tpg;
597 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 597 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
598 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 598 &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
599 599
600 return &se_tpg_np->tpg_np_group; 600 return &se_tpg_np->tpg_np_group;
601} 601}
@@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun(
899 } 899 }
900 900
901 config_group_init_type_name(&lun->lun_group, name, 901 config_group_init_type_name(&lun->lun_group, name,
902 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); 902 &tf->tf_cit_tmpl.tfc_tpg_port_cit);
903 config_group_init_type_name(&lun->port_stat_grps.stat_group, 903 config_group_init_type_name(&lun->port_stat_grps.stat_group,
904 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); 904 "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
905 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; 905 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
906 lun_cg->default_groups[1] = NULL; 906 lun_cg->default_groups[1] = NULL;
907 907
@@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(
1056 se_tpg->tpg_group.default_groups[6] = NULL; 1056 se_tpg->tpg_group.default_groups[6] = NULL;
1057 1057
1058 config_group_init_type_name(&se_tpg->tpg_group, name, 1058 config_group_init_type_name(&se_tpg->tpg_group, name,
1059 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); 1059 &tf->tf_cit_tmpl.tfc_tpg_base_cit);
1060 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", 1060 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
1061 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); 1061 &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
1062 config_group_init_type_name(&se_tpg->tpg_np_group, "np", 1062 config_group_init_type_name(&se_tpg->tpg_np_group, "np",
1063 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); 1063 &tf->tf_cit_tmpl.tfc_tpg_np_cit);
1064 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", 1064 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
1065 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); 1065 &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
1066 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", 1066 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
1067 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); 1067 &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
1068 config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", 1068 config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
1069 &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit); 1069 &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
1070 config_group_init_type_name(&se_tpg->tpg_param_group, "param", 1070 config_group_init_type_name(&se_tpg->tpg_param_group, "param",
1071 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); 1071 &tf->tf_cit_tmpl.tfc_tpg_param_cit);
1072 1072
1073 return &se_tpg->tpg_group; 1073 return &se_tpg->tpg_group;
1074} 1074}
@@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
1155 wwn->wwn_group.default_groups[1] = NULL; 1155 wwn->wwn_group.default_groups[1] = NULL;
1156 1156
1157 config_group_init_type_name(&wwn->wwn_group, name, 1157 config_group_init_type_name(&wwn->wwn_group, name,
1158 &TF_CIT_TMPL(tf)->tfc_tpg_cit); 1158 &tf->tf_cit_tmpl.tfc_tpg_cit);
1159 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", 1159 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
1160 &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); 1160 &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
1161 1161
1162 return &wwn->wwn_group; 1162 return &wwn->wwn_group;
1163} 1163}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b662f89dedac..0e34cda3271e 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
562 } else { 562 } else {
563 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 563 ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
564 /* 564 /*
565 * Perform implict vfs_fsync_range() for fd_do_writev() ops 565 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
566 * for SCSI WRITEs with Forced Unit Access (FUA) set. 566 * for SCSI WRITEs with Forced Unit Access (FUA) set.
567 * Allow this to happen independent of WCE=0 setting. 567 * Allow this to happen independent of WCE=0 setting.
568 */ 568 */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b9a3394fe479..c87959f12760 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
710 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 710 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
711} 711}
712 712
713static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
714{
715 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
716 struct block_device *bd = ib_dev->ibd_bd;
717 int ret;
718
719 ret = bdev_alignment_offset(bd);
720 if (ret == -1)
721 return 0;
722
723 /* convert offset-bytes to offset-lbas */
724 return ret / bdev_logical_block_size(bd);
725}
726
727static unsigned int iblock_get_lbppbe(struct se_device *dev)
728{
729 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
730 struct block_device *bd = ib_dev->ibd_bd;
731 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
732
733 return ilog2(logs_per_phys);
734}
735
736static unsigned int iblock_get_io_min(struct se_device *dev)
737{
738 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
739 struct block_device *bd = ib_dev->ibd_bd;
740
741 return bdev_io_min(bd);
742}
743
744static unsigned int iblock_get_io_opt(struct se_device *dev)
745{
746 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
747 struct block_device *bd = ib_dev->ibd_bd;
748
749 return bdev_io_opt(bd);
750}
751
713static struct sbc_ops iblock_sbc_ops = { 752static struct sbc_ops iblock_sbc_ops = {
714 .execute_rw = iblock_execute_rw, 753 .execute_rw = iblock_execute_rw,
715 .execute_sync_cache = iblock_execute_sync_cache, 754 .execute_sync_cache = iblock_execute_sync_cache,
@@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
749 .show_configfs_dev_params = iblock_show_configfs_dev_params, 788 .show_configfs_dev_params = iblock_show_configfs_dev_params,
750 .get_device_type = sbc_get_device_type, 789 .get_device_type = sbc_get_device_type,
751 .get_blocks = iblock_get_blocks, 790 .get_blocks = iblock_get_blocks,
791 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
792 .get_lbppbe = iblock_get_lbppbe,
793 .get_io_min = iblock_get_io_min,
794 .get_io_opt = iblock_get_io_opt,
752 .get_write_cache = iblock_get_write_cache, 795 .get_write_cache = iblock_get_write_cache,
753}; 796};
754 797
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 579128abe3f5..47b63b094cdc 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev;
75 75
76struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 76struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
77 const char *); 77 const char *);
78struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
79 unsigned char *);
80void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); 78void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
81void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 79void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
82struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 80struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
@@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
102int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 100int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
103int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 101int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
104bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); 102bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
105int transport_clear_lun_from_sessions(struct se_lun *); 103int transport_clear_lun_ref(struct se_lun *);
106void transport_send_task_abort(struct se_cmd *); 104void transport_send_task_abort(struct se_cmd *);
107sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 105sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
108void target_qf_do_work(struct work_struct *work); 106void target_qf_do_work(struct work_struct *work);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d1ae4c5c3ffd..2f5d77932c80 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(
474 * statement. 474 * statement.
475 */ 475 */
476 if (!ret && !other_cdb) { 476 if (!ret && !other_cdb) {
477 pr_debug("Allowing explict CDB: 0x%02x for %s" 477 pr_debug("Allowing explicit CDB: 0x%02x for %s"
478 " reservation holder\n", cdb[0], 478 " reservation holder\n", cdb[0],
479 core_scsi3_pr_dump_type(pr_reg_type)); 479 core_scsi3_pr_dump_type(pr_reg_type));
480 480
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
507 */ 507 */
508 508
509 if (!registered_nexus) { 509 if (!registered_nexus) {
510 pr_debug("Allowing implict CDB: 0x%02x" 510 pr_debug("Allowing implicit CDB: 0x%02x"
511 " for %s reservation on unregistered" 511 " for %s reservation on unregistered"
512 " nexus\n", cdb[0], 512 " nexus\n", cdb[0],
513 core_scsi3_pr_dump_type(pr_reg_type)); 513 core_scsi3_pr_dump_type(pr_reg_type));
@@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(
522 * allow commands from registered nexuses. 522 * allow commands from registered nexuses.
523 */ 523 */
524 524
525 pr_debug("Allowing implict CDB: 0x%02x for %s" 525 pr_debug("Allowing implicit CDB: 0x%02x for %s"
526 " reservation\n", cdb[0], 526 " reservation\n", cdb[0],
527 core_scsi3_pr_dump_type(pr_reg_type)); 527 core_scsi3_pr_dump_type(pr_reg_type));
528 528
@@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
683 alua_port_list) { 683 alua_port_list) {
684 /* 684 /*
685 * This pointer will be NULL for demo mode MappedLUNs 685 * This pointer will be NULL for demo mode MappedLUNs
686 * that have not been make explict via a ConfigFS 686 * that have not been make explicit via a ConfigFS
687 * MappedLUN group for the SCSI Initiator Node ACL. 687 * MappedLUN group for the SCSI Initiator Node ACL.
688 */ 688 */
689 if (!deve_tmp->se_lun_acl) 689 if (!deve_tmp->se_lun_acl)
@@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1158 smp_mb__after_atomic_dec(); 1158 smp_mb__after_atomic_dec();
1159} 1159}
1160 1160
1161static int core_scsi3_check_implict_release( 1161static int core_scsi3_check_implicit_release(
1162 struct se_device *dev, 1162 struct se_device *dev,
1163 struct t10_pr_registration *pr_reg) 1163 struct t10_pr_registration *pr_reg)
1164{ 1164{
@@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(
1174 } 1174 }
1175 if (pr_res_holder == pr_reg) { 1175 if (pr_res_holder == pr_reg) {
1176 /* 1176 /*
1177 * Perform an implict RELEASE if the registration that 1177 * Perform an implicit RELEASE if the registration that
1178 * is being released is holding the reservation. 1178 * is being released is holding the reservation.
1179 * 1179 *
1180 * From spc4r17, section 5.7.11.1: 1180 * From spc4r17, section 5.7.11.1:
@@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(
1192 * For 'All Registrants' reservation types, all existing 1192 * For 'All Registrants' reservation types, all existing
1193 * registrations are still processed as reservation holders 1193 * registrations are still processed as reservation holders
1194 * in core_scsi3_pr_seq_non_holder() after the initial 1194 * in core_scsi3_pr_seq_non_holder() after the initial
1195 * reservation holder is implictly released here. 1195 * reservation holder is implicitly released here.
1196 */ 1196 */
1197 } else if (pr_reg->pr_reg_all_tg_pt && 1197 } else if (pr_reg->pr_reg_all_tg_pt &&
1198 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, 1198 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2125 /* 2125 /*
2126 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. 2126 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
2127 */ 2127 */
2128 pr_holder = core_scsi3_check_implict_release( 2128 pr_holder = core_scsi3_check_implicit_release(
2129 cmd->se_dev, pr_reg); 2129 cmd->se_dev, pr_reg);
2130 if (pr_holder < 0) { 2130 if (pr_holder < 0) {
2131 ret = TCM_RESERVATION_CONFLICT; 2131 ret = TCM_RESERVATION_CONFLICT;
@@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release(
2402 struct se_device *dev, 2402 struct se_device *dev,
2403 struct se_node_acl *se_nacl, 2403 struct se_node_acl *se_nacl,
2404 struct t10_pr_registration *pr_reg, 2404 struct t10_pr_registration *pr_reg,
2405 int explict) 2405 int explicit)
2406{ 2406{
2407 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2407 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2408 char i_buf[PR_REG_ISID_ID_LEN]; 2408 char i_buf[PR_REG_ISID_ID_LEN];
@@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release(
2416 2416
2417 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2417 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2418 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2418 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2419 tfo->get_fabric_name(), (explict) ? "explict" : "implict", 2419 tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
2420 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2420 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2421 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2421 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2422 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2422 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
@@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt(
2692 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2692 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2693 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 2693 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
2694 /* 2694 /*
2695 * Do an implict RELEASE of the existing reservation. 2695 * Do an implicit RELEASE of the existing reservation.
2696 */ 2696 */
2697 if (dev->dev_pr_res_holder) 2697 if (dev->dev_pr_res_holder)
2698 __core_scsi3_complete_pro_release(dev, nacl, 2698 __core_scsi3_complete_pro_release(dev, nacl,
@@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2845 * 5.7.11.4 Preempting, Table 52 and Figure 7. 2845 * 5.7.11.4 Preempting, Table 52 and Figure 7.
2846 * 2846 *
2847 * For a ZERO SA Reservation key, release 2847 * For a ZERO SA Reservation key, release
2848 * all other registrations and do an implict 2848 * all other registrations and do an implicit
2849 * release of active persistent reservation. 2849 * release of active persistent reservation.
2850 * 2850 *
2851 * For a non-ZERO SA Reservation key, only 2851 * For a non-ZERO SA Reservation key, only
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 131327ac7f5b..4ffe5f2ec0e9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -27,7 +27,6 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/parser.h> 28#include <linux/parser.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <linux/spinlock.h> 31#include <linux/spinlock.h>
33#include <scsi/scsi.h> 32#include <scsi/scsi.h>
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index d9b92b2c524d..52ae54e60105 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 buf[11] = dev->dev_attrib.block_size & 0xff; 107 buf[11] = dev->dev_attrib.block_size & 0xff;
108
109 if (dev->transport->get_lbppbe)
110 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
111
112 if (dev->transport->get_alignment_offset_lbas) {
113 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
114 buf[14] = (lalba >> 8) & 0x3f;
115 buf[15] = lalba & 0xff;
116 }
117
108 /* 118 /*
109 * Set Thin Provisioning Enable bit following sbc3r22 in section 119 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 120 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
111 */ 121 */
112 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 122 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
113 buf[14] = 0x80; 123 buf[14] |= 0x80;
114 124
115 rbuf = transport_kmap_data_sg(cmd); 125 rbuf = transport_kmap_data_sg(cmd);
116 if (rbuf) { 126 if (rbuf) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 074539558a54..021c3f4a4f00 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
48 buf[5] = 0x80; 48 buf[5] = 0x80;
49 49
50 /* 50 /*
51 * Set TPGS field for explict and/or implict ALUA access type 51 * Set TPGS field for explicit and/or implicit ALUA access type
52 * and opteration. 52 * and opteration.
53 * 53 *
54 * See spc4r17 section 6.4.2 Table 135 54 * See spc4r17 section 6.4.2 Table 135
@@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
452 struct se_device *dev = cmd->se_dev; 452 struct se_device *dev = cmd->se_dev;
453 u32 max_sectors; 453 u32 max_sectors;
454 int have_tp = 0; 454 int have_tp = 0;
455 int opt, min;
455 456
456 /* 457 /*
457 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 458 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
475 /* 476 /*
476 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 477 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
477 */ 478 */
478 put_unaligned_be16(1, &buf[6]); 479 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
480 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
481 else
482 put_unaligned_be16(1, &buf[6]);
479 483
480 /* 484 /*
481 * Set MAXIMUM TRANSFER LENGTH 485 * Set MAXIMUM TRANSFER LENGTH
@@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
487 /* 491 /*
488 * Set OPTIMAL TRANSFER LENGTH 492 * Set OPTIMAL TRANSFER LENGTH
489 */ 493 */
490 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 494 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
495 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
496 else
497 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
491 498
492 /* 499 /*
493 * Exit now if we don't support TP. 500 * Exit now if we don't support TP.
@@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1250 *size = (cdb[3] << 8) + cdb[4]; 1257 *size = (cdb[3] << 8) + cdb[4];
1251 1258
1252 /* 1259 /*
1253 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 1260 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1254 * See spc4r17 section 5.3 1261 * See spc4r17 section 5.3
1255 */ 1262 */
1256 cmd->sam_task_attr = MSG_HEAD_TAG; 1263 cmd->sam_task_attr = MSG_HEAD_TAG;
@@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1284 cmd->execute_cmd = spc_emulate_report_luns; 1291 cmd->execute_cmd = spc_emulate_report_luns;
1285 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1292 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1286 /* 1293 /*
1287 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 1294 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1288 * See spc4r17 section 5.3 1295 * See spc4r17 section 5.3
1289 */ 1296 */
1290 cmd->sam_task_attr = MSG_HEAD_TAG; 1297 cmd->sam_task_attr = MSG_HEAD_TAG;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 9c642e02cba1..03538994d2f7 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -32,7 +32,6 @@
32#include <linux/utsname.h> 32#include <linux/utsname.h>
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/blkdev.h>
36#include <linux/configfs.h> 35#include <linux/configfs.h>
37#include <scsi/scsi.h> 36#include <scsi/scsi.h>
38#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
@@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
214 struct se_device *dev = 213 struct se_device *dev =
215 container_of(sgrps, struct se_device, dev_stat_grps); 214 container_of(sgrps, struct se_device, dev_stat_grps);
216 215
217 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 216 return snprintf(page, PAGE_SIZE, "%lu\n",
217 atomic_long_read(&dev->num_resets));
218} 218}
219DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); 219DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
220 220
@@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
397 container_of(sgrps, struct se_device, dev_stat_grps); 397 container_of(sgrps, struct se_device, dev_stat_grps);
398 398
399 /* scsiLuNumCommands */ 399 /* scsiLuNumCommands */
400 return snprintf(page, PAGE_SIZE, "%llu\n", 400 return snprintf(page, PAGE_SIZE, "%lu\n",
401 (unsigned long long)dev->num_cmds); 401 atomic_long_read(&dev->num_cmds));
402} 402}
403DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); 403DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
404 404
@@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
409 container_of(sgrps, struct se_device, dev_stat_grps); 409 container_of(sgrps, struct se_device, dev_stat_grps);
410 410
411 /* scsiLuReadMegaBytes */ 411 /* scsiLuReadMegaBytes */
412 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); 412 return snprintf(page, PAGE_SIZE, "%lu\n",
413 atomic_long_read(&dev->read_bytes) >> 20);
413} 414}
414DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); 415DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
415 416
@@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
420 container_of(sgrps, struct se_device, dev_stat_grps); 421 container_of(sgrps, struct se_device, dev_stat_grps);
421 422
422 /* scsiLuWrittenMegaBytes */ 423 /* scsiLuWrittenMegaBytes */
423 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); 424 return snprintf(page, PAGE_SIZE, "%lu\n",
425 atomic_long_read(&dev->write_bytes) >> 20);
424} 426}
425DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); 427DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
426 428
@@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(
431 container_of(sgrps, struct se_device, dev_stat_grps); 433 container_of(sgrps, struct se_device, dev_stat_grps);
432 434
433 /* scsiLuInResets */ 435 /* scsiLuInResets */
434 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 436 return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
435} 437}
436DEV_STAT_SCSI_LU_ATTR_RO(resets); 438DEV_STAT_SCSI_LU_ATTR_RO(resets);
437 439
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 250009909d49..70c638f730af 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -386,9 +386,7 @@ int core_tmr_lun_reset(
386 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 386 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
387 } 387 }
388 388
389 spin_lock_irq(&dev->stats_lock); 389 atomic_long_inc(&dev->num_resets);
390 dev->num_resets++;
391 spin_unlock_irq(&dev->stats_lock);
392 390
393 pr_debug("LUN_RESET: %s for [%s] Complete\n", 391 pr_debug("LUN_RESET: %s for [%s] Complete\n",
394 (preempt_and_abort_list) ? "Preempt" : "TMR", 392 (preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index b9a6ec0aa5fe..f697f8baec54 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
116 116
117 return acl; 117 return acl;
118} 118}
119EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
119 120
120/* core_tpg_add_node_to_devs(): 121/* core_tpg_add_node_to_devs():
121 * 122 *
@@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag(
633} 634}
634EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); 635EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
635 636
637static void core_tpg_lun_ref_release(struct percpu_ref *ref)
638{
639 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
640
641 complete(&lun->lun_ref_comp);
642}
643
636static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 644static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
637{ 645{
638 /* Set in core_dev_setup_virtual_lun0() */ 646 /* Set in core_dev_setup_virtual_lun0() */
@@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
646 atomic_set(&lun->lun_acl_count, 0); 654 atomic_set(&lun->lun_acl_count, 0);
647 init_completion(&lun->lun_shutdown_comp); 655 init_completion(&lun->lun_shutdown_comp);
648 INIT_LIST_HEAD(&lun->lun_acl_list); 656 INIT_LIST_HEAD(&lun->lun_acl_list);
649 INIT_LIST_HEAD(&lun->lun_cmd_list);
650 spin_lock_init(&lun->lun_acl_lock); 657 spin_lock_init(&lun->lun_acl_lock);
651 spin_lock_init(&lun->lun_cmd_lock);
652 spin_lock_init(&lun->lun_sep_lock); 658 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp);
653 660
654 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
655 if (ret < 0) 662 if (ret < 0)
656 return ret; 663 return ret;
657 664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) {
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret;
669 }
670
658 return 0; 671 return 0;
659} 672}
660 673
@@ -691,10 +704,9 @@ int core_tpg_register(
691 atomic_set(&lun->lun_acl_count, 0); 704 atomic_set(&lun->lun_acl_count, 0);
692 init_completion(&lun->lun_shutdown_comp); 705 init_completion(&lun->lun_shutdown_comp);
693 INIT_LIST_HEAD(&lun->lun_acl_list); 706 INIT_LIST_HEAD(&lun->lun_acl_list);
694 INIT_LIST_HEAD(&lun->lun_cmd_list);
695 spin_lock_init(&lun->lun_acl_lock); 707 spin_lock_init(&lun->lun_acl_lock);
696 spin_lock_init(&lun->lun_cmd_lock);
697 spin_lock_init(&lun->lun_sep_lock); 708 spin_lock_init(&lun->lun_sep_lock);
709 init_completion(&lun->lun_ref_comp);
698 } 710 }
699 711
700 se_tpg->se_tpg_type = se_tpg_type; 712 se_tpg->se_tpg_type = se_tpg_type;
@@ -815,10 +827,16 @@ int core_tpg_post_addlun(
815{ 827{
816 int ret; 828 int ret;
817 829
818 ret = core_dev_export(lun_ptr, tpg, lun); 830 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
819 if (ret < 0) 831 if (ret < 0)
820 return ret; 832 return ret;
821 833
834 ret = core_dev_export(lun_ptr, tpg, lun);
835 if (ret < 0) {
836 percpu_ref_cancel_init(&lun->lun_ref);
837 return ret;
838 }
839
822 spin_lock(&tpg->tpg_lun_lock); 840 spin_lock(&tpg->tpg_lun_lock);
823 lun->lun_access = lun_access; 841 lun->lun_access = lun_access;
824 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; 842 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
@@ -827,14 +845,6 @@ int core_tpg_post_addlun(
827 return 0; 845 return 0;
828} 846}
829 847
830static void core_tpg_shutdown_lun(
831 struct se_portal_group *tpg,
832 struct se_lun *lun)
833{
834 core_clear_lun_from_tpg(lun, tpg);
835 transport_clear_lun_from_sessions(lun);
836}
837
838struct se_lun *core_tpg_pre_dellun( 848struct se_lun *core_tpg_pre_dellun(
839 struct se_portal_group *tpg, 849 struct se_portal_group *tpg,
840 u32 unpacked_lun) 850 u32 unpacked_lun)
@@ -869,7 +879,8 @@ int core_tpg_post_dellun(
869 struct se_portal_group *tpg, 879 struct se_portal_group *tpg,
870 struct se_lun *lun) 880 struct se_lun *lun)
871{ 881{
872 core_tpg_shutdown_lun(tpg, lun); 882 core_clear_lun_from_tpg(lun, tpg);
883 transport_clear_lun_ref(lun);
873 884
874 core_dev_unexport(lun->lun_se_dev, tpg, lun); 885 core_dev_unexport(lun->lun_se_dev, tpg, lun);
875 886
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 81e945eefbbd..91953da0f623 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -28,7 +28,6 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/blkdev.h>
32#include <linux/spinlock.h> 31#include <linux/spinlock.h>
33#include <linux/kthread.h> 32#include <linux/kthread.h>
34#include <linux/in.h> 33#include <linux/in.h>
@@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess)
473 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 472 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
474 se_tpg->se_tpg_tfo->get_fabric_name()); 473 se_tpg->se_tpg_tfo->get_fabric_name());
475 /* 474 /*
476 * If last kref is dropping now for an explict NodeACL, awake sleeping 475 * If last kref is dropping now for an explicit NodeACL, awake sleeping
477 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 476 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
478 * removal context. 477 * removal context.
479 */ 478 */
@@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
515 if (write_pending) 514 if (write_pending)
516 cmd->t_state = TRANSPORT_WRITE_PENDING; 515 cmd->t_state = TRANSPORT_WRITE_PENDING;
517 516
518 /*
519 * Determine if IOCTL context caller in requesting the stopping of this
520 * command for LUN shutdown purposes.
521 */
522 if (cmd->transport_state & CMD_T_LUN_STOP) {
523 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
524 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
525
526 cmd->transport_state &= ~CMD_T_ACTIVE;
527 if (remove_from_lists)
528 target_remove_from_state_list(cmd);
529 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
530
531 complete(&cmd->transport_lun_stop_comp);
532 return 1;
533 }
534
535 if (remove_from_lists) { 517 if (remove_from_lists) {
536 target_remove_from_state_list(cmd); 518 target_remove_from_state_list(cmd);
537 519
@@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
585static void transport_lun_remove_cmd(struct se_cmd *cmd) 567static void transport_lun_remove_cmd(struct se_cmd *cmd)
586{ 568{
587 struct se_lun *lun = cmd->se_lun; 569 struct se_lun *lun = cmd->se_lun;
588 unsigned long flags;
589 570
590 if (!lun) 571 if (!lun || !cmd->lun_ref_active)
591 return; 572 return;
592 573
593 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 574 percpu_ref_put(&lun->lun_ref);
594 if (!list_empty(&cmd->se_lun_node))
595 list_del_init(&cmd->se_lun_node);
596 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
597} 575}
598 576
599void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 577void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
668 cmd->transport_state |= CMD_T_FAILED; 646 cmd->transport_state |= CMD_T_FAILED;
669 647
670 /* 648 /*
671 * Check for case where an explict ABORT_TASK has been received 649 * Check for case where an explicit ABORT_TASK has been received
672 * and transport_wait_for_tasks() will be waiting for completion.. 650 * and transport_wait_for_tasks() will be waiting for completion..
673 */ 651 */
674 if (cmd->transport_state & CMD_T_ABORTED && 652 if (cmd->transport_state & CMD_T_ABORTED &&
@@ -1092,13 +1070,10 @@ void transport_init_se_cmd(
1092 int task_attr, 1070 int task_attr,
1093 unsigned char *sense_buffer) 1071 unsigned char *sense_buffer)
1094{ 1072{
1095 INIT_LIST_HEAD(&cmd->se_lun_node);
1096 INIT_LIST_HEAD(&cmd->se_delayed_node); 1073 INIT_LIST_HEAD(&cmd->se_delayed_node);
1097 INIT_LIST_HEAD(&cmd->se_qf_node); 1074 INIT_LIST_HEAD(&cmd->se_qf_node);
1098 INIT_LIST_HEAD(&cmd->se_cmd_list); 1075 INIT_LIST_HEAD(&cmd->se_cmd_list);
1099 INIT_LIST_HEAD(&cmd->state_list); 1076 INIT_LIST_HEAD(&cmd->state_list);
1100 init_completion(&cmd->transport_lun_fe_stop_comp);
1101 init_completion(&cmd->transport_lun_stop_comp);
1102 init_completion(&cmd->t_transport_stop_comp); 1077 init_completion(&cmd->t_transport_stop_comp);
1103 init_completion(&cmd->cmd_wait_comp); 1078 init_completion(&cmd->cmd_wait_comp);
1104 init_completion(&cmd->task_stop_comp); 1079 init_completion(&cmd->task_stop_comp);
@@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd)
1719 /* 1694 /*
1720 * If the received CDB has aleady been aborted stop processing it here. 1695 * If the received CDB has aleady been aborted stop processing it here.
1721 */ 1696 */
1722 if (transport_check_aborted_status(cmd, 1)) { 1697 if (transport_check_aborted_status(cmd, 1))
1723 complete(&cmd->transport_lun_stop_comp);
1724 return; 1698 return;
1725 }
1726 1699
1727 /* 1700 /*
1728 * Determine if IOCTL context caller in requesting the stopping of this
1729 * command for LUN shutdown purposes.
1730 */
1731 spin_lock_irq(&cmd->t_state_lock);
1732 if (cmd->transport_state & CMD_T_LUN_STOP) {
1733 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
1734 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
1735
1736 cmd->transport_state &= ~CMD_T_ACTIVE;
1737 spin_unlock_irq(&cmd->t_state_lock);
1738 complete(&cmd->transport_lun_stop_comp);
1739 return;
1740 }
1741 /*
1742 * Determine if frontend context caller is requesting the stopping of 1701 * Determine if frontend context caller is requesting the stopping of
1743 * this command for frontend exceptions. 1702 * this command for frontend exceptions.
1744 */ 1703 */
1704 spin_lock_irq(&cmd->t_state_lock);
1745 if (cmd->transport_state & CMD_T_STOP) { 1705 if (cmd->transport_state & CMD_T_STOP) {
1746 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1706 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1747 __func__, __LINE__, 1707 __func__, __LINE__,
@@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2404} 2364}
2405EXPORT_SYMBOL(target_wait_for_sess_cmds); 2365EXPORT_SYMBOL(target_wait_for_sess_cmds);
2406 2366
2407/* transport_lun_wait_for_tasks(): 2367static int transport_clear_lun_ref_thread(void *p)
2408 *
2409 * Called from ConfigFS context to stop the passed struct se_cmd to allow
2410 * an struct se_lun to be successfully shutdown.
2411 */
2412static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
2413{
2414 unsigned long flags;
2415 int ret = 0;
2416
2417 /*
2418 * If the frontend has already requested this struct se_cmd to
2419 * be stopped, we can safely ignore this struct se_cmd.
2420 */
2421 spin_lock_irqsave(&cmd->t_state_lock, flags);
2422 if (cmd->transport_state & CMD_T_STOP) {
2423 cmd->transport_state &= ~CMD_T_LUN_STOP;
2424
2425 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
2426 cmd->se_tfo->get_task_tag(cmd));
2427 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2428 transport_cmd_check_stop(cmd, false, false);
2429 return -EPERM;
2430 }
2431 cmd->transport_state |= CMD_T_LUN_FE_STOP;
2432 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2433
2434 // XXX: audit task_flags checks.
2435 spin_lock_irqsave(&cmd->t_state_lock, flags);
2436 if ((cmd->transport_state & CMD_T_BUSY) &&
2437 (cmd->transport_state & CMD_T_SENT)) {
2438 if (!target_stop_cmd(cmd, &flags))
2439 ret++;
2440 }
2441 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2442
2443 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2444 " %d\n", cmd, ret);
2445 if (!ret) {
2446 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2447 cmd->se_tfo->get_task_tag(cmd));
2448 wait_for_completion(&cmd->transport_lun_stop_comp);
2449 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2450 cmd->se_tfo->get_task_tag(cmd));
2451 }
2452
2453 return 0;
2454}
2455
2456static void __transport_clear_lun_from_sessions(struct se_lun *lun)
2457{
2458 struct se_cmd *cmd = NULL;
2459 unsigned long lun_flags, cmd_flags;
2460 /*
2461 * Do exception processing and return CHECK_CONDITION status to the
2462 * Initiator Port.
2463 */
2464 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2465 while (!list_empty(&lun->lun_cmd_list)) {
2466 cmd = list_first_entry(&lun->lun_cmd_list,
2467 struct se_cmd, se_lun_node);
2468 list_del_init(&cmd->se_lun_node);
2469
2470 spin_lock(&cmd->t_state_lock);
2471 pr_debug("SE_LUN[%d] - Setting cmd->transport"
2472 "_lun_stop for ITT: 0x%08x\n",
2473 cmd->se_lun->unpacked_lun,
2474 cmd->se_tfo->get_task_tag(cmd));
2475 cmd->transport_state |= CMD_T_LUN_STOP;
2476 spin_unlock(&cmd->t_state_lock);
2477
2478 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2479
2480 if (!cmd->se_lun) {
2481 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2482 cmd->se_tfo->get_task_tag(cmd),
2483 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2484 BUG();
2485 }
2486 /*
2487 * If the Storage engine still owns the iscsi_cmd_t, determine
2488 * and/or stop its context.
2489 */
2490 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2491 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
2492 cmd->se_tfo->get_task_tag(cmd));
2493
2494 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
2495 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2496 continue;
2497 }
2498
2499 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2500 "_wait_for_tasks(): SUCCESS\n",
2501 cmd->se_lun->unpacked_lun,
2502 cmd->se_tfo->get_task_tag(cmd));
2503
2504 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2505 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
2506 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2507 goto check_cond;
2508 }
2509 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2510 target_remove_from_state_list(cmd);
2511 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2512
2513 /*
2514 * The Storage engine stopped this struct se_cmd before it was
2515 * send to the fabric frontend for delivery back to the
2516 * Initiator Node. Return this SCSI CDB back with an
2517 * CHECK_CONDITION status.
2518 */
2519check_cond:
2520 transport_send_check_condition_and_sense(cmd,
2521 TCM_NON_EXISTENT_LUN, 0);
2522 /*
2523 * If the fabric frontend is waiting for this iscsi_cmd_t to
2524 * be released, notify the waiting thread now that LU has
2525 * finished accessing it.
2526 */
2527 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2528 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
2529 pr_debug("SE_LUN[%d] - Detected FE stop for"
2530 " struct se_cmd: %p ITT: 0x%08x\n",
2531 lun->unpacked_lun,
2532 cmd, cmd->se_tfo->get_task_tag(cmd));
2533
2534 spin_unlock_irqrestore(&cmd->t_state_lock,
2535 cmd_flags);
2536 transport_cmd_check_stop(cmd, false, false);
2537 complete(&cmd->transport_lun_fe_stop_comp);
2538 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2539 continue;
2540 }
2541 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2542 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
2543
2544 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2545 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2546 }
2547 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2548}
2549
2550static int transport_clear_lun_thread(void *p)
2551{ 2368{
2552 struct se_lun *lun = p; 2369 struct se_lun *lun = p;
2553 2370
2554 __transport_clear_lun_from_sessions(lun); 2371 percpu_ref_kill(&lun->lun_ref);
2372
2373 wait_for_completion(&lun->lun_ref_comp);
2555 complete(&lun->lun_shutdown_comp); 2374 complete(&lun->lun_shutdown_comp);
2556 2375
2557 return 0; 2376 return 0;
2558} 2377}
2559 2378
2560int transport_clear_lun_from_sessions(struct se_lun *lun) 2379int transport_clear_lun_ref(struct se_lun *lun)
2561{ 2380{
2562 struct task_struct *kt; 2381 struct task_struct *kt;
2563 2382
2564 kt = kthread_run(transport_clear_lun_thread, lun, 2383 kt = kthread_run(transport_clear_lun_ref_thread, lun,
2565 "tcm_cl_%u", lun->unpacked_lun); 2384 "tcm_cl_%u", lun->unpacked_lun);
2566 if (IS_ERR(kt)) { 2385 if (IS_ERR(kt)) {
2567 pr_err("Unable to start clear_lun thread\n"); 2386 pr_err("Unable to start clear_lun thread\n");
@@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
2595 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2414 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2596 return false; 2415 return false;
2597 } 2416 }
2598 /*
2599 * If we are already stopped due to an external event (ie: LUN shutdown)
2600 * sleep until the connection can have the passed struct se_cmd back.
2601 * The cmd->transport_lun_stopped_sem will be upped by
2602 * transport_clear_lun_from_sessions() once the ConfigFS context caller
2603 * has completed its operation on the struct se_cmd.
2604 */
2605 if (cmd->transport_state & CMD_T_LUN_STOP) {
2606 pr_debug("wait_for_tasks: Stopping"
2607 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
2608 "_stop_comp); for ITT: 0x%08x\n",
2609 cmd->se_tfo->get_task_tag(cmd));
2610 /*
2611 * There is a special case for WRITES where a FE exception +
2612 * LUN shutdown means ConfigFS context is still sleeping on
2613 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
2614 * We go ahead and up transport_lun_stop_comp just to be sure
2615 * here.
2616 */
2617 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2618 complete(&cmd->transport_lun_stop_comp);
2619 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
2620 spin_lock_irqsave(&cmd->t_state_lock, flags);
2621
2622 target_remove_from_state_list(cmd);
2623 /*
2624 * At this point, the frontend who was the originator of this
2625 * struct se_cmd, now owns the structure and can be released through
2626 * normal means below.
2627 */
2628 pr_debug("wait_for_tasks: Stopped"
2629 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2630 "stop_comp); for ITT: 0x%08x\n",
2631 cmd->se_tfo->get_task_tag(cmd));
2632
2633 cmd->transport_state &= ~CMD_T_LUN_STOP;
2634 }
2635 2417
2636 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2418 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2637 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2419 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2910 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2692 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
2911 2693
2912 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2694 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
2695 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2913 trace_target_cmd_complete(cmd); 2696 trace_target_cmd_complete(cmd);
2914 cmd->se_tfo->queue_status(cmd); 2697 cmd->se_tfo->queue_status(cmd);
2915 2698
@@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2938 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2721 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2939 cmd->transport_state |= CMD_T_ABORTED; 2722 cmd->transport_state |= CMD_T_ABORTED;
2940 smp_mb__after_atomic_inc(); 2723 smp_mb__after_atomic_inc();
2724 return;
2941 } 2725 }
2942 } 2726 }
2943 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2727 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 0204952fe4d3..be912b36daae 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -19,7 +19,7 @@
19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
22#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 22#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
23#define ASCQ_2AH_PRIORITY_CHANGED 0x08 23#define ASCQ_2AH_PRIORITY_CHANGED 0x08
24 24
25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 474cd44fac14..6b88a9958f61 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
406 struct xcopy_pt_cmd, se_cmd); 406 struct xcopy_pt_cmd, se_cmd);
407 407
408 if (xpt_cmd->remote_port)
409 kfree(se_cmd->se_lun);
410
411 kfree(xpt_cmd); 408 kfree(xpt_cmd);
412} 409}
413 410
@@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun(
572 return 0; 569 return 0;
573 } 570 }
574 571
575 pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); 572 pt_cmd->se_lun = &se_dev->xcopy_lun;
576 if (!pt_cmd->se_lun) {
577 pr_err("Unable to allocate pt_cmd->se_lun\n");
578 return -ENOMEM;
579 }
580 init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
581 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
582 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
583 spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
584 spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
585 spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
586
587 pt_cmd->se_dev = se_dev; 573 pt_cmd->se_dev = se_dev;
588 574
589 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); 575 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
590 pt_cmd->se_lun->lun_se_dev = se_dev;
591 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 576 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
592 577
593 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", 578 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
@@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd(
658 return 0; 643 return 0;
659 644
660out: 645out:
661 if (remote_port == true)
662 kfree(cmd->se_lun);
663 return ret; 646 return ret;
664} 647}
665 648
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 0dd54a44abcf..752863acecb8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -22,6 +22,7 @@
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ 22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */ 23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */ 24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
25 26
26struct ft_transport_id { 27struct ft_transport_id {
27 __u8 format; 28 __u8 format;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 0e5a1caed176..479ec5621a4e 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,6 +28,7 @@
28#include <linux/configfs.h> 28#include <linux/configfs.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/hash.h> 30#include <linux/hash.h>
31#include <linux/percpu_ida.h>
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
33#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
@@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
89{ 90{
90 struct fc_frame *fp; 91 struct fc_frame *fp;
91 struct fc_lport *lport; 92 struct fc_lport *lport;
93 struct se_session *se_sess;
92 94
93 if (!cmd) 95 if (!cmd)
94 return; 96 return;
97 se_sess = cmd->sess->se_sess;
95 fp = cmd->req_frame; 98 fp = cmd->req_frame;
96 lport = fr_dev(fp); 99 lport = fr_dev(fp);
97 if (fr_seq(fp)) 100 if (fr_seq(fp))
98 lport->tt.seq_release(fr_seq(fp)); 101 lport->tt.seq_release(fr_seq(fp));
99 fc_frame_free(fp); 102 fc_frame_free(fp);
103 percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
100 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
101 kfree(cmd);
102} 105}
103 106
104void ft_release_cmd(struct se_cmd *se_cmd) 107void ft_release_cmd(struct se_cmd *se_cmd)
@@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
432{ 435{
433 struct ft_cmd *cmd; 436 struct ft_cmd *cmd;
434 struct fc_lport *lport = sess->tport->lport; 437 struct fc_lport *lport = sess->tport->lport;
438 struct se_session *se_sess = sess->se_sess;
439 int tag;
435 440
436 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 441 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
437 if (!cmd) 442 if (tag < 0)
438 goto busy; 443 goto busy;
444
445 cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
446 memset(cmd, 0, sizeof(struct ft_cmd));
447
448 cmd->se_cmd.map_tag = tag;
439 cmd->sess = sess; 449 cmd->sess = sess;
440 cmd->seq = lport->tt.seq_assign(lport, fp); 450 cmd->seq = lport->tt.seq_assign(lport, fp);
441 if (!cmd->seq) { 451 if (!cmd->seq) {
442 kfree(cmd); 452 percpu_ida_free(&se_sess->sess_tag_pool, tag);
443 goto busy; 453 goto busy;
444 } 454 }
445 cmd->req_frame = fp; /* hold frame during cmd */ 455 cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4e0050840a72..c6932fb53a8d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -571,16 +571,16 @@ int ft_register_configfs(void)
571 /* 571 /*
572 * Setup default attribute lists for various fabric->tf_cit_tmpl 572 * Setup default attribute lists for various fabric->tf_cit_tmpl
573 */ 573 */
574 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; 574 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
575 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 575 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
576 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 576 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
577 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 577 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
578 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 578 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
579 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = 579 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
580 ft_nacl_base_attrs; 580 ft_nacl_base_attrs;
581 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 581 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
582 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 582 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
583 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 583 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
584 /* 584 /*
585 * register the fabric for use within TCM 585 * register the fabric for use within TCM
586 */ 586 */
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4859505ae2ed..ae52c08dad09 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
210 if (!sess) 210 if (!sess)
211 return NULL; 211 return NULL;
212 212
213 sess->se_sess = transport_init_session(); 213 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
214 sizeof(struct ft_cmd));
214 if (IS_ERR(sess->se_sess)) { 215 if (IS_ERR(sess->se_sess)) {
215 kfree(sess); 216 kfree(sess);
216 return NULL; 217 return NULL;
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index eccea1df702d..6c3d7950d2a9 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void)
1923 } 1923 }
1924 1924
1925 fabric->tf_ops = usbg_ops; 1925 fabric->tf_ops = usbg_ops;
1926 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; 1926 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
1927 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; 1927 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
1928 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1928 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1929 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1929 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1930 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1930 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
1931 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1931 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1932 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1932 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1933 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1933 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1934 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1934 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1935 ret = target_fabric_configfs_register(fabric); 1935 ret = target_fabric_configfs_register(fabric);
1936 if (ret < 0) { 1936 if (ret < 0) {
1937 printk(KERN_ERR "target_fabric_configfs_register() failed" 1937 printk(KERN_ERR "target_fabric_configfs_register() failed"
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e663921eebb6..f175629513ed 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void)
2168 /* 2168 /*
2169 * Setup default attribute lists for various fabric->tf_cit_tmpl 2169 * Setup default attribute lists for various fabric->tf_cit_tmpl
2170 */ 2170 */
2171 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2171 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2172 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2172 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2173 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 2173 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2174 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 2174 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2175 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 2175 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2176 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2176 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2177 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2177 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2178 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2178 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2179 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2179 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2180 /* 2180 /*
2181 * Register the fabric for use within TCM 2181 * Register the fabric for use within TCM
2182 */ 2182 */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 5ebe21cd5d1c..39e0114d70c5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -34,6 +34,11 @@ struct se_subsystem_api {
34 sense_reason_t (*parse_cdb)(struct se_cmd *cmd); 34 sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
35 u32 (*get_device_type)(struct se_device *); 35 u32 (*get_device_type)(struct se_device *);
36 sector_t (*get_blocks)(struct se_device *); 36 sector_t (*get_blocks)(struct se_device *);
37 sector_t (*get_alignment_offset_lbas)(struct se_device *);
38 /* lbppbe = logical blocks per physical block exponent. see SBC-3 */
39 unsigned int (*get_lbppbe)(struct se_device *);
40 unsigned int (*get_io_min)(struct se_device *);
41 unsigned int (*get_io_opt)(struct se_device *);
37 unsigned char *(*get_sense_buffer)(struct se_cmd *); 42 unsigned char *(*get_sense_buffer)(struct se_cmd *);
38 bool (*get_write_cache)(struct se_device *); 43 bool (*get_write_cache)(struct se_device *);
39}; 44};
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5bdb8b7d2a69..45412a6afa69 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -227,6 +227,7 @@ enum tcm_tmreq_table {
227 227
228/* fabric independent task management response values */ 228/* fabric independent task management response values */
229enum tcm_tmrsp_table { 229enum tcm_tmrsp_table {
230 TMR_FUNCTION_FAILED = 0,
230 TMR_FUNCTION_COMPLETE = 1, 231 TMR_FUNCTION_COMPLETE = 1,
231 TMR_TASK_DOES_NOT_EXIST = 2, 232 TMR_TASK_DOES_NOT_EXIST = 2,
232 TMR_LUN_DOES_NOT_EXIST = 3, 233 TMR_LUN_DOES_NOT_EXIST = 3,
@@ -282,11 +283,12 @@ struct t10_alua_lu_gp_member {
282struct t10_alua_tg_pt_gp { 283struct t10_alua_tg_pt_gp {
283 u16 tg_pt_gp_id; 284 u16 tg_pt_gp_id;
284 int tg_pt_gp_valid_id; 285 int tg_pt_gp_valid_id;
286 int tg_pt_gp_alua_supported_states;
285 int tg_pt_gp_alua_access_status; 287 int tg_pt_gp_alua_access_status;
286 int tg_pt_gp_alua_access_type; 288 int tg_pt_gp_alua_access_type;
287 int tg_pt_gp_nonop_delay_msecs; 289 int tg_pt_gp_nonop_delay_msecs;
288 int tg_pt_gp_trans_delay_msecs; 290 int tg_pt_gp_trans_delay_msecs;
289 int tg_pt_gp_implict_trans_secs; 291 int tg_pt_gp_implicit_trans_secs;
290 int tg_pt_gp_pref; 292 int tg_pt_gp_pref;
291 int tg_pt_gp_write_metadata; 293 int tg_pt_gp_write_metadata;
292 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 294 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
@@ -442,7 +444,6 @@ struct se_cmd {
442 /* Used for sense data */ 444 /* Used for sense data */
443 void *sense_buffer; 445 void *sense_buffer;
444 struct list_head se_delayed_node; 446 struct list_head se_delayed_node;
445 struct list_head se_lun_node;
446 struct list_head se_qf_node; 447 struct list_head se_qf_node;
447 struct se_device *se_dev; 448 struct se_device *se_dev;
448 struct se_dev_entry *se_deve; 449 struct se_dev_entry *se_deve;
@@ -470,15 +471,11 @@ struct se_cmd {
470#define CMD_T_SENT (1 << 4) 471#define CMD_T_SENT (1 << 4)
471#define CMD_T_STOP (1 << 5) 472#define CMD_T_STOP (1 << 5)
472#define CMD_T_FAILED (1 << 6) 473#define CMD_T_FAILED (1 << 6)
473#define CMD_T_LUN_STOP (1 << 7) 474#define CMD_T_DEV_ACTIVE (1 << 7)
474#define CMD_T_LUN_FE_STOP (1 << 8) 475#define CMD_T_REQUEST_STOP (1 << 8)
475#define CMD_T_DEV_ACTIVE (1 << 9) 476#define CMD_T_BUSY (1 << 9)
476#define CMD_T_REQUEST_STOP (1 << 10)
477#define CMD_T_BUSY (1 << 11)
478 spinlock_t t_state_lock; 477 spinlock_t t_state_lock;
479 struct completion t_transport_stop_comp; 478 struct completion t_transport_stop_comp;
480 struct completion transport_lun_fe_stop_comp;
481 struct completion transport_lun_stop_comp;
482 479
483 struct work_struct work; 480 struct work_struct work;
484 481
@@ -498,6 +495,9 @@ struct se_cmd {
498 495
499 /* backend private data */ 496 /* backend private data */
500 void *priv; 497 void *priv;
498
499 /* Used for lun->lun_ref counting */
500 bool lun_ref_active;
501}; 501};
502 502
503struct se_ua { 503struct se_ua {
@@ -628,6 +628,34 @@ struct se_dev_attrib {
628 struct config_group da_group; 628 struct config_group da_group;
629}; 629};
630 630
631struct se_port_stat_grps {
632 struct config_group stat_group;
633 struct config_group scsi_port_group;
634 struct config_group scsi_tgt_port_group;
635 struct config_group scsi_transport_group;
636};
637
638struct se_lun {
639#define SE_LUN_LINK_MAGIC 0xffff7771
640 u32 lun_link_magic;
641 /* See transport_lun_status_table */
642 enum transport_lun_status_table lun_status;
643 u32 lun_access;
644 u32 lun_flags;
645 u32 unpacked_lun;
646 atomic_t lun_acl_count;
647 spinlock_t lun_acl_lock;
648 spinlock_t lun_sep_lock;
649 struct completion lun_shutdown_comp;
650 struct list_head lun_acl_list;
651 struct se_device *lun_se_dev;
652 struct se_port *lun_sep;
653 struct config_group lun_group;
654 struct se_port_stat_grps port_stat_grps;
655 struct completion lun_ref_comp;
656 struct percpu_ref lun_ref;
657};
658
631struct se_dev_stat_grps { 659struct se_dev_stat_grps {
632 struct config_group stat_group; 660 struct config_group stat_group;
633 struct config_group scsi_dev_group; 661 struct config_group scsi_dev_group;
@@ -656,11 +684,10 @@ struct se_device {
656 /* Pointer to transport specific device structure */ 684 /* Pointer to transport specific device structure */
657 u32 dev_index; 685 u32 dev_index;
658 u64 creation_time; 686 u64 creation_time;
659 u32 num_resets; 687 atomic_long_t num_resets;
660 u64 num_cmds; 688 atomic_long_t num_cmds;
661 u64 read_bytes; 689 atomic_long_t read_bytes;
662 u64 write_bytes; 690 atomic_long_t write_bytes;
663 spinlock_t stats_lock;
664 /* Active commands on this virtual SE device */ 691 /* Active commands on this virtual SE device */
665 atomic_t simple_cmds; 692 atomic_t simple_cmds;
666 atomic_t dev_ordered_id; 693 atomic_t dev_ordered_id;
@@ -711,6 +738,7 @@ struct se_device {
711 struct se_subsystem_api *transport; 738 struct se_subsystem_api *transport;
712 /* Linked list for struct se_hba struct se_device list */ 739 /* Linked list for struct se_hba struct se_device list */
713 struct list_head dev_list; 740 struct list_head dev_list;
741 struct se_lun xcopy_lun;
714}; 742};
715 743
716struct se_hba { 744struct se_hba {
@@ -730,34 +758,6 @@ struct se_hba {
730 struct se_subsystem_api *transport; 758 struct se_subsystem_api *transport;
731}; 759};
732 760
733struct se_port_stat_grps {
734 struct config_group stat_group;
735 struct config_group scsi_port_group;
736 struct config_group scsi_tgt_port_group;
737 struct config_group scsi_transport_group;
738};
739
740struct se_lun {
741#define SE_LUN_LINK_MAGIC 0xffff7771
742 u32 lun_link_magic;
743 /* See transport_lun_status_table */
744 enum transport_lun_status_table lun_status;
745 u32 lun_access;
746 u32 lun_flags;
747 u32 unpacked_lun;
748 atomic_t lun_acl_count;
749 spinlock_t lun_acl_lock;
750 spinlock_t lun_cmd_lock;
751 spinlock_t lun_sep_lock;
752 struct completion lun_shutdown_comp;
753 struct list_head lun_cmd_list;
754 struct list_head lun_acl_list;
755 struct se_device *lun_se_dev;
756 struct se_port *lun_sep;
757 struct config_group lun_group;
758 struct se_port_stat_grps port_stat_grps;
759};
760
761struct scsi_port_stats { 761struct scsi_port_stats {
762 u64 cmd_pdus; 762 u64 cmd_pdus;
763 u64 tx_data_octets; 763 u64 tx_data_octets;
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index 713c5004f4ae..e0801386e4dc 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -54,4 +54,3 @@ struct target_fabric_configfs {
54 struct target_fabric_configfs_template tf_cit_tmpl; 54 struct target_fabric_configfs_template tf_cit_tmpl;
55}; 55};
56 56
57#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 882b650e32be..4cf4fda404a3 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -137,6 +137,8 @@ void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
137void __target_execute_cmd(struct se_cmd *); 137void __target_execute_cmd(struct se_cmd *);
138int transport_lookup_tmr_lun(struct se_cmd *, u32); 138int transport_lookup_tmr_lun(struct se_cmd *, u32);
139 139
140struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
141 unsigned char *);
140struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 142struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
141 unsigned char *); 143 unsigned char *);
142void core_tpg_clear_object_luns(struct se_portal_group *); 144void core_tpg_clear_object_luns(struct se_portal_group *);
diff --git a/lib/Makefile b/lib/Makefile
index b46065fd67a4..a459c31e8c6b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o percpu-refcount.o percpu_ida.o 16 earlycpio.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu_ida.o 29 percpu-refcount.o percpu_ida.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index b0698ea972c6..9d054bf91d0f 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -117,8 +117,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
117 min(pool->nr_free, pool->percpu_batch_size)); 117 min(pool->nr_free, pool->percpu_batch_size));
118} 118}
119 119
120static inline unsigned alloc_local_tag(struct percpu_ida *pool, 120static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
121 struct percpu_ida_cpu *tags)
122{ 121{
123 int tag = -ENOSPC; 122 int tag = -ENOSPC;
124 123
@@ -159,7 +158,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
159 tags = this_cpu_ptr(pool->tag_cpu); 158 tags = this_cpu_ptr(pool->tag_cpu);
160 159
161 /* Fastpath */ 160 /* Fastpath */
162 tag = alloc_local_tag(pool, tags); 161 tag = alloc_local_tag(tags);
163 if (likely(tag >= 0)) { 162 if (likely(tag >= 0)) {
164 local_irq_restore(flags); 163 local_irq_restore(flags);
165 return tag; 164 return tag;