aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c821
-rw-r--r--drivers/target/iscsi/iscsi_target.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c35
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c23
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c145
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c192
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h3
-rw-r--r--drivers/target/loopback/tcm_loop.c4
-rw-r--r--drivers/target/target_core_alua.c100
-rw-r--r--drivers/target/target_core_alua.h14
-rw-r--r--drivers/target/target_core_cdb.c118
-rw-r--r--drivers/target/target_core_configfs.c25
-rw-r--r--drivers/target/target_core_device.c84
-rw-r--r--drivers/target/target_core_file.c131
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_iblock.c152
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h25
-rw-r--r--drivers/target/target_core_pr.c67
-rw-r--r--drivers/target/target_core_pr.h8
-rw-r--r--drivers/target/target_core_pscsi.c179
-rw-r--r--drivers/target/target_core_pscsi.h1
-rw-r--r--drivers/target/target_core_rd.c168
-rw-r--r--drivers/target/target_core_rd.h20
-rw-r--r--drivers/target/target_core_tmr.c71
-rw-r--r--drivers/target/target_core_tpg.c5
-rw-r--r--drivers/target/target_core_transport.c994
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c18
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
37 files changed, 1286 insertions, 2237 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8b1d5e62ed40..d57d10cb2e47 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,10 @@
27#include <asm/unaligned.h> 27#include <asm/unaligned.h>
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/iscsi_proto.h> 29#include <scsi/iscsi_proto.h>
30#include <scsi/scsi_tcq.h>
30#include <target/target_core_base.h> 31#include <target/target_core_base.h>
31#include <target/target_core_fabric.h> 32#include <target/target_core_fabric.h>
33#include <target/target_core_configfs.h>
32 34
33#include "iscsi_target_core.h" 35#include "iscsi_target_core.h"
34#include "iscsi_target_parameters.h" 36#include "iscsi_target_parameters.h"
@@ -593,7 +595,7 @@ static void __exit iscsi_target_cleanup_module(void)
593 kfree(iscsit_global); 595 kfree(iscsit_global);
594} 596}
595 597
596int iscsit_add_reject( 598static int iscsit_add_reject(
597 u8 reason, 599 u8 reason,
598 int fail_conn, 600 int fail_conn,
599 unsigned char *buf, 601 unsigned char *buf,
@@ -622,7 +624,7 @@ int iscsit_add_reject(
622 } 624 }
623 625
624 spin_lock_bh(&conn->cmd_lock); 626 spin_lock_bh(&conn->cmd_lock);
625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 627 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
626 spin_unlock_bh(&conn->cmd_lock); 628 spin_unlock_bh(&conn->cmd_lock);
627 629
628 cmd->i_state = ISTATE_SEND_REJECT; 630 cmd->i_state = ISTATE_SEND_REJECT;
@@ -669,7 +671,7 @@ int iscsit_add_reject_from_cmd(
669 671
670 if (add_to_conn) { 672 if (add_to_conn) {
671 spin_lock_bh(&conn->cmd_lock); 673 spin_lock_bh(&conn->cmd_lock);
672 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 674 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
673 spin_unlock_bh(&conn->cmd_lock); 675 spin_unlock_bh(&conn->cmd_lock);
674 } 676 }
675 677
@@ -685,9 +687,7 @@ int iscsit_add_reject_from_cmd(
685 687
686/* 688/*
687 * Map some portion of the allocated scatterlist to an iovec, suitable for 689 * Map some portion of the allocated scatterlist to an iovec, suitable for
688 * kernel sockets to copy data in/out. This handles both pages and slab-allocated 690 * kernel sockets to copy data in/out.
689 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
690 * either case (see iscsit_alloc_buffs)
691 */ 691 */
692static int iscsit_map_iovec( 692static int iscsit_map_iovec(
693 struct iscsi_cmd *cmd, 693 struct iscsi_cmd *cmd,
@@ -700,10 +700,9 @@ static int iscsit_map_iovec(
700 unsigned int page_off; 700 unsigned int page_off;
701 701
702 /* 702 /*
703 * We have a private mapping of the allocated pages in t_mem_sg. 703 * We know each entry in t_data_sg contains a page.
704 * At this point, we also know each contains a page.
705 */ 704 */
706 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; 705 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
707 page_off = (data_offset % PAGE_SIZE); 706 page_off = (data_offset % PAGE_SIZE);
708 707
709 cmd->first_data_sg = sg; 708 cmd->first_data_sg = sg;
@@ -744,7 +743,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
744 conn->exp_statsn = exp_statsn; 743 conn->exp_statsn = exp_statsn;
745 744
746 spin_lock_bh(&conn->cmd_lock); 745 spin_lock_bh(&conn->cmd_lock);
747 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 746 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
748 spin_lock(&cmd->istate_lock); 747 spin_lock(&cmd->istate_lock);
749 if ((cmd->i_state == ISTATE_SENT_STATUS) && 748 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
750 (cmd->stat_sn < exp_statsn)) { 749 (cmd->stat_sn < exp_statsn)) {
@@ -761,8 +760,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 760
762static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 761static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
763{ 762{
764 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : 763 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
765 cmd->se_cmd.t_data_nents;
766 764
767 iov_count += ISCSI_IOV_DATA_BUFFER; 765 iov_count += ISCSI_IOV_DATA_BUFFER;
768 766
@@ -776,64 +774,6 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
776 return 0; 774 return 0;
777} 775}
778 776
779static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
780{
781 struct scatterlist *sgl;
782 u32 length = cmd->se_cmd.data_length;
783 int nents = DIV_ROUND_UP(length, PAGE_SIZE);
784 int i = 0, j = 0, ret;
785 /*
786 * If no SCSI payload is present, allocate the default iovecs used for
787 * iSCSI PDU Header
788 */
789 if (!length)
790 return iscsit_allocate_iovecs(cmd);
791
792 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
793 if (!sgl)
794 return -ENOMEM;
795
796 sg_init_table(sgl, nents);
797
798 while (length) {
799 int buf_size = min_t(int, length, PAGE_SIZE);
800 struct page *page;
801
802 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
803 if (!page)
804 goto page_alloc_failed;
805
806 sg_set_page(&sgl[i], page, buf_size, 0);
807
808 length -= buf_size;
809 i++;
810 }
811
812 cmd->t_mem_sg = sgl;
813 cmd->t_mem_sg_nents = nents;
814
815 /* BIDI ops not supported */
816
817 /* Tell the core about our preallocated memory */
818 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
819 /*
820 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
821 * so that cmd->se_cmd.t_tasks_se_num has been set.
822 */
823 ret = iscsit_allocate_iovecs(cmd);
824 if (ret < 0)
825 return -ENOMEM;
826
827 return 0;
828
829page_alloc_failed:
830 while (j < i)
831 __free_page(sg_page(&sgl[j++]));
832
833 kfree(sgl);
834 return -ENOMEM;
835}
836
837static int iscsit_handle_scsi_cmd( 777static int iscsit_handle_scsi_cmd(
838 struct iscsi_conn *conn, 778 struct iscsi_conn *conn,
839 unsigned char *buf) 779 unsigned char *buf)
@@ -842,6 +782,8 @@ static int iscsit_handle_scsi_cmd(
842 int dump_immediate_data = 0, send_check_condition = 0, payload_length; 782 int dump_immediate_data = 0, send_check_condition = 0, payload_length;
843 struct iscsi_cmd *cmd = NULL; 783 struct iscsi_cmd *cmd = NULL;
844 struct iscsi_scsi_req *hdr; 784 struct iscsi_scsi_req *hdr;
785 int iscsi_task_attr;
786 int sam_task_attr;
845 787
846 spin_lock_bh(&conn->sess->session_stats_lock); 788 spin_lock_bh(&conn->sess->session_stats_lock);
847 conn->sess->cmd_pdus++; 789 conn->sess->cmd_pdus++;
@@ -958,15 +900,30 @@ done:
958 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 900 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
959 DMA_NONE; 901 DMA_NONE;
960 902
961 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, 903 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
962 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
963 if (!cmd) 904 if (!cmd)
964 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 905 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
965 buf, conn); 906 buf, conn);
966 907
967 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 908 cmd->data_direction = data_direction;
968 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 909 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
969 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 910 /*
911 * Figure out the SAM Task Attribute for the incoming SCSI CDB
912 */
913 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
914 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
915 sam_task_attr = MSG_SIMPLE_TAG;
916 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
917 sam_task_attr = MSG_ORDERED_TAG;
918 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
919 sam_task_attr = MSG_HEAD_TAG;
920 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
921 sam_task_attr = MSG_ACA_TAG;
922 else {
923 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
924 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
925 sam_task_attr = MSG_SIMPLE_TAG;
926 }
970 927
971 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 928 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
972 cmd->i_state = ISTATE_NEW_CMD; 929 cmd->i_state = ISTATE_NEW_CMD;
@@ -1003,6 +960,17 @@ done:
1003 } 960 }
1004 961
1005 /* 962 /*
963 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
964 */
965 transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
966 conn->sess->se_sess, hdr->data_length, cmd->data_direction,
967 sam_task_attr, &cmd->sense_buffer[0]);
968
969 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
970 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
971 hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
972
973 /*
1006 * The CDB is going to an se_device_t. 974 * The CDB is going to an se_device_t.
1007 */ 975 */
1008 ret = transport_lookup_cmd_lun(&cmd->se_cmd, 976 ret = transport_lookup_cmd_lun(&cmd->se_cmd,
@@ -1016,13 +984,8 @@ done:
1016 send_check_condition = 1; 984 send_check_condition = 1;
1017 goto attach_cmd; 985 goto attach_cmd;
1018 } 986 }
1019 /* 987
1020 * The Initiator Node has access to the LUN (the addressing method 988 transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
1021 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
1022 * allocate 1->N transport tasks (depending on sector count and
1023 * maximum request size the physical HBA(s) can handle.
1024 */
1025 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
1026 if (transport_ret == -ENOMEM) { 989 if (transport_ret == -ENOMEM) {
1027 return iscsit_add_reject_from_cmd( 990 return iscsit_add_reject_from_cmd(
1028 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 991 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1035,9 +998,7 @@ done:
1035 */ 998 */
1036 send_check_condition = 1; 999 send_check_condition = 1;
1037 } else { 1000 } else {
1038 cmd->data_length = cmd->se_cmd.data_length; 1001 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
1039
1040 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1041 return iscsit_add_reject_from_cmd( 1002 return iscsit_add_reject_from_cmd(
1042 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1003 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1043 1, 1, buf, cmd); 1004 1, 1, buf, cmd);
@@ -1045,18 +1006,15 @@ done:
1045 1006
1046attach_cmd: 1007attach_cmd:
1047 spin_lock_bh(&conn->cmd_lock); 1008 spin_lock_bh(&conn->cmd_lock);
1048 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1009 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1049 spin_unlock_bh(&conn->cmd_lock); 1010 spin_unlock_bh(&conn->cmd_lock);
1050 /* 1011 /*
1051 * Check if we need to delay processing because of ALUA 1012 * Check if we need to delay processing because of ALUA
1052 * Active/NonOptimized primary access state.. 1013 * Active/NonOptimized primary access state..
1053 */ 1014 */
1054 core_alua_check_nonop_delay(&cmd->se_cmd); 1015 core_alua_check_nonop_delay(&cmd->se_cmd);
1055 /* 1016
1056 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). 1017 ret = iscsit_allocate_iovecs(cmd);
1057 * also call iscsit_allocate_iovecs()
1058 */
1059 ret = iscsit_alloc_buffs(cmd);
1060 if (ret < 0) 1018 if (ret < 0)
1061 return iscsit_add_reject_from_cmd( 1019 return iscsit_add_reject_from_cmd(
1062 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1020 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1303,10 +1261,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1303 se_cmd = &cmd->se_cmd; 1261 se_cmd = &cmd->se_cmd;
1304 iscsit_mod_dataout_timer(cmd); 1262 iscsit_mod_dataout_timer(cmd);
1305 1263
1306 if ((hdr->offset + payload_length) > cmd->data_length) { 1264 if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) {
1307 pr_err("DataOut Offset: %u, Length %u greater than" 1265 pr_err("DataOut Offset: %u, Length %u greater than"
1308 " iSCSI Command EDTL %u, protocol error.\n", 1266 " iSCSI Command EDTL %u, protocol error.\n",
1309 hdr->offset, payload_length, cmd->data_length); 1267 hdr->offset, payload_length, cmd->se_cmd.data_length);
1310 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1268 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
1311 1, 0, buf, cmd); 1269 1, 0, buf, cmd);
1312 } 1270 }
@@ -1442,7 +1400,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1442 return 0; 1400 return 0;
1443 else if (ret == DATAOUT_SEND_R2T) { 1401 else if (ret == DATAOUT_SEND_R2T) {
1444 iscsit_set_dataout_sequence_values(cmd); 1402 iscsit_set_dataout_sequence_values(cmd);
1445 iscsit_build_r2ts_for_cmd(cmd, conn, 0); 1403 iscsit_build_r2ts_for_cmd(cmd, conn, false);
1446 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1404 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
1447 /* 1405 /*
1448 * Handle extra special case for out of order 1406 * Handle extra special case for out of order
@@ -1617,7 +1575,7 @@ static int iscsit_handle_nop_out(
1617 * Initiator is expecting a NopIN ping reply, 1575 * Initiator is expecting a NopIN ping reply,
1618 */ 1576 */
1619 spin_lock_bh(&conn->cmd_lock); 1577 spin_lock_bh(&conn->cmd_lock);
1620 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1578 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1621 spin_unlock_bh(&conn->cmd_lock); 1579 spin_unlock_bh(&conn->cmd_lock);
1622 1580
1623 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1581 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
@@ -1723,10 +1681,75 @@ static int iscsit_handle_task_mgt_cmd(
1723 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1681 (hdr->refcmdsn != ISCSI_RESERVED_TAG))
1724 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1682 hdr->refcmdsn = ISCSI_RESERVED_TAG;
1725 1683
1726 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); 1684 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1727 if (!cmd) 1685 if (!cmd)
1728 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1686 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1729 1, buf, conn); 1687 1, buf, conn);
1688
1689 cmd->data_direction = DMA_NONE;
1690
1691 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
1692 if (!cmd->tmr_req) {
1693 pr_err("Unable to allocate memory for"
1694 " Task Management command!\n");
1695 return iscsit_add_reject_from_cmd(
1696 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1697 1, 1, buf, cmd);
1698 }
1699
1700 /*
1701 * TASK_REASSIGN for ERL=2 / connection stays inside of
1702 * LIO-Target $FABRIC_MOD
1703 */
1704 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1705
1706 u8 tcm_function;
1707 int ret;
1708
1709 transport_init_se_cmd(&cmd->se_cmd,
1710 &lio_target_fabric_configfs->tf_ops,
1711 conn->sess->se_sess, 0, DMA_NONE,
1712 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
1713
1714 switch (function) {
1715 case ISCSI_TM_FUNC_ABORT_TASK:
1716 tcm_function = TMR_ABORT_TASK;
1717 break;
1718 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1719 tcm_function = TMR_ABORT_TASK_SET;
1720 break;
1721 case ISCSI_TM_FUNC_CLEAR_ACA:
1722 tcm_function = TMR_CLEAR_ACA;
1723 break;
1724 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1725 tcm_function = TMR_CLEAR_TASK_SET;
1726 break;
1727 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1728 tcm_function = TMR_LUN_RESET;
1729 break;
1730 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1731 tcm_function = TMR_TARGET_WARM_RESET;
1732 break;
1733 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1734 tcm_function = TMR_TARGET_COLD_RESET;
1735 break;
1736 default:
1737 pr_err("Unknown iSCSI TMR Function:"
1738 " 0x%02x\n", function);
1739 return iscsit_add_reject_from_cmd(
1740 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1741 1, 1, buf, cmd);
1742 }
1743
1744 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
1745 tcm_function, GFP_KERNEL);
1746 if (ret < 0)
1747 return iscsit_add_reject_from_cmd(
1748 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1749 1, 1, buf, cmd);
1750
1751 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
1752 }
1730 1753
1731 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1754 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
1732 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1755 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
@@ -1804,7 +1827,7 @@ static int iscsit_handle_task_mgt_cmd(
1804 se_tmr->call_transport = 1; 1827 se_tmr->call_transport = 1;
1805attach: 1828attach:
1806 spin_lock_bh(&conn->cmd_lock); 1829 spin_lock_bh(&conn->cmd_lock);
1807 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1830 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1808 spin_unlock_bh(&conn->cmd_lock); 1831 spin_unlock_bh(&conn->cmd_lock);
1809 1832
1810 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1833 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -1980,7 +2003,7 @@ static int iscsit_handle_text_cmd(
1980 cmd->data_direction = DMA_NONE; 2003 cmd->data_direction = DMA_NONE;
1981 2004
1982 spin_lock_bh(&conn->cmd_lock); 2005 spin_lock_bh(&conn->cmd_lock);
1983 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 2006 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1984 spin_unlock_bh(&conn->cmd_lock); 2007 spin_unlock_bh(&conn->cmd_lock);
1985 2008
1986 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2009 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
@@ -2168,7 +2191,7 @@ static int iscsit_handle_logout_cmd(
2168 logout_remove = 1; 2191 logout_remove = 1;
2169 2192
2170 spin_lock_bh(&conn->cmd_lock); 2193 spin_lock_bh(&conn->cmd_lock);
2171 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 2194 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2172 spin_unlock_bh(&conn->cmd_lock); 2195 spin_unlock_bh(&conn->cmd_lock);
2173 2196
2174 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2197 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
@@ -2178,7 +2201,7 @@ static int iscsit_handle_logout_cmd(
2178 * Immediate commands are executed, well, immediately. 2201 * Immediate commands are executed, well, immediately.
2179 * Non-Immediate Logout Commands are executed in CmdSN order. 2202 * Non-Immediate Logout Commands are executed in CmdSN order.
2180 */ 2203 */
2181 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 2204 if (cmd->immediate_cmd) {
2182 int ret = iscsit_execute_cmd(cmd, 0); 2205 int ret = iscsit_execute_cmd(cmd, 0);
2183 2206
2184 if (ret < 0) 2207 if (ret < 0)
@@ -2336,7 +2359,7 @@ static int iscsit_handle_immediate_data(
2336 2359
2337 cmd->write_data_done += length; 2360 cmd->write_data_done += length;
2338 2361
2339 if (cmd->write_data_done == cmd->data_length) { 2362 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2340 spin_lock_bh(&cmd->istate_lock); 2363 spin_lock_bh(&cmd->istate_lock);
2341 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2364 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2342 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2365 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -2381,7 +2404,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2381 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2404 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2382 2405
2383 spin_lock_bh(&conn_p->cmd_lock); 2406 spin_lock_bh(&conn_p->cmd_lock);
2384 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); 2407 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2385 spin_unlock_bh(&conn_p->cmd_lock); 2408 spin_unlock_bh(&conn_p->cmd_lock);
2386 2409
2387 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2410 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
@@ -2434,10 +2457,19 @@ static int iscsit_send_conn_drop_async_message(
2434 return 0; 2457 return 0;
2435} 2458}
2436 2459
2460static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2461{
2462 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2463 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2464 wait_for_completion_interruptible_timeout(
2465 &conn->tx_half_close_comp,
2466 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2467 }
2468}
2469
2437static int iscsit_send_data_in( 2470static int iscsit_send_data_in(
2438 struct iscsi_cmd *cmd, 2471 struct iscsi_cmd *cmd,
2439 struct iscsi_conn *conn, 2472 struct iscsi_conn *conn)
2440 int *eodr)
2441{ 2473{
2442 int iov_ret = 0, set_statsn = 0; 2474 int iov_ret = 0, set_statsn = 0;
2443 u32 iov_count = 0, tx_size = 0; 2475 u32 iov_count = 0, tx_size = 0;
@@ -2445,6 +2477,8 @@ static int iscsit_send_data_in(
2445 struct iscsi_datain_req *dr; 2477 struct iscsi_datain_req *dr;
2446 struct iscsi_data_rsp *hdr; 2478 struct iscsi_data_rsp *hdr;
2447 struct kvec *iov; 2479 struct kvec *iov;
2480 int eodr = 0;
2481 int ret;
2448 2482
2449 memset(&datain, 0, sizeof(struct iscsi_datain)); 2483 memset(&datain, 0, sizeof(struct iscsi_datain));
2450 dr = iscsit_get_datain_values(cmd, &datain); 2484 dr = iscsit_get_datain_values(cmd, &datain);
@@ -2457,11 +2491,11 @@ static int iscsit_send_data_in(
2457 /* 2491 /*
2458 * Be paranoid and double check the logic for now. 2492 * Be paranoid and double check the logic for now.
2459 */ 2493 */
2460 if ((datain.offset + datain.length) > cmd->data_length) { 2494 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2461 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2495 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2462 " datain.length: %u exceeds cmd->data_length: %u\n", 2496 " datain.length: %u exceeds cmd->data_length: %u\n",
2463 cmd->init_task_tag, datain.offset, datain.length, 2497 cmd->init_task_tag, datain.offset, datain.length,
2464 cmd->data_length); 2498 cmd->se_cmd.data_length);
2465 return -1; 2499 return -1;
2466 } 2500 }
2467 2501
@@ -2577,13 +2611,26 @@ static int iscsit_send_data_in(
2577 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2611 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2578 ntohl(hdr->offset), datain.length, conn->cid); 2612 ntohl(hdr->offset), datain.length, conn->cid);
2579 2613
2614 /* sendpage is preferred but can't insert markers */
2615 if (!conn->conn_ops->IFMarker)
2616 ret = iscsit_fe_sendpage_sg(cmd, conn);
2617 else
2618 ret = iscsit_send_tx_data(cmd, conn, 0);
2619
2620 iscsit_unmap_iovec(cmd);
2621
2622 if (ret < 0) {
2623 iscsit_tx_thread_wait_for_tcp(conn);
2624 return ret;
2625 }
2626
2580 if (dr->dr_complete) { 2627 if (dr->dr_complete) {
2581 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2628 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2582 2 : 1; 2629 2 : 1;
2583 iscsit_free_datain_req(cmd, dr); 2630 iscsit_free_datain_req(cmd, dr);
2584 } 2631 }
2585 2632
2586 return 0; 2633 return eodr;
2587} 2634}
2588 2635
2589static int iscsit_send_logout_response( 2636static int iscsit_send_logout_response(
@@ -2715,6 +2762,7 @@ static int iscsit_send_unsolicited_nopin(
2715{ 2762{
2716 int tx_size = ISCSI_HDR_LEN; 2763 int tx_size = ISCSI_HDR_LEN;
2717 struct iscsi_nopin *hdr; 2764 struct iscsi_nopin *hdr;
2765 int ret;
2718 2766
2719 hdr = (struct iscsi_nopin *) cmd->pdu; 2767 hdr = (struct iscsi_nopin *) cmd->pdu;
2720 memset(hdr, 0, ISCSI_HDR_LEN); 2768 memset(hdr, 0, ISCSI_HDR_LEN);
@@ -2747,6 +2795,17 @@ static int iscsit_send_unsolicited_nopin(
2747 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2795 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2748 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2796 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2749 2797
2798 ret = iscsit_send_tx_data(cmd, conn, 1);
2799 if (ret < 0) {
2800 iscsit_tx_thread_wait_for_tcp(conn);
2801 return ret;
2802 }
2803
2804 spin_lock_bh(&cmd->istate_lock);
2805 cmd->i_state = want_response ?
2806 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
2807 spin_unlock_bh(&cmd->istate_lock);
2808
2750 return 0; 2809 return 0;
2751} 2810}
2752 2811
@@ -2837,13 +2896,14 @@ static int iscsit_send_nopin_response(
2837 return 0; 2896 return 0;
2838} 2897}
2839 2898
2840int iscsit_send_r2t( 2899static int iscsit_send_r2t(
2841 struct iscsi_cmd *cmd, 2900 struct iscsi_cmd *cmd,
2842 struct iscsi_conn *conn) 2901 struct iscsi_conn *conn)
2843{ 2902{
2844 int tx_size = 0; 2903 int tx_size = 0;
2845 struct iscsi_r2t *r2t; 2904 struct iscsi_r2t *r2t;
2846 struct iscsi_r2t_rsp *hdr; 2905 struct iscsi_r2t_rsp *hdr;
2906 int ret;
2847 2907
2848 r2t = iscsit_get_r2t_from_list(cmd); 2908 r2t = iscsit_get_r2t_from_list(cmd);
2849 if (!r2t) 2909 if (!r2t)
@@ -2899,19 +2959,27 @@ int iscsit_send_r2t(
2899 r2t->sent_r2t = 1; 2959 r2t->sent_r2t = 1;
2900 spin_unlock_bh(&cmd->r2t_lock); 2960 spin_unlock_bh(&cmd->r2t_lock);
2901 2961
2962 ret = iscsit_send_tx_data(cmd, conn, 1);
2963 if (ret < 0) {
2964 iscsit_tx_thread_wait_for_tcp(conn);
2965 return ret;
2966 }
2967
2968 spin_lock_bh(&cmd->dataout_timeout_lock);
2969 iscsit_start_dataout_timer(cmd, conn);
2970 spin_unlock_bh(&cmd->dataout_timeout_lock);
2971
2902 return 0; 2972 return 0;
2903} 2973}
2904 2974
2905/* 2975/*
2906 * type 0: Normal Operation. 2976 * @recovery: If called from iscsi_task_reassign_complete_write() for
2907 * type 1: Called from Storage Transport. 2977 * connection recovery.
2908 * type 2: Called from iscsi_task_reassign_complete_write() for
2909 * connection recovery.
2910 */ 2978 */
2911int iscsit_build_r2ts_for_cmd( 2979int iscsit_build_r2ts_for_cmd(
2912 struct iscsi_cmd *cmd, 2980 struct iscsi_cmd *cmd,
2913 struct iscsi_conn *conn, 2981 struct iscsi_conn *conn,
2914 int type) 2982 bool recovery)
2915{ 2983{
2916 int first_r2t = 1; 2984 int first_r2t = 1;
2917 u32 offset = 0, xfer_len = 0; 2985 u32 offset = 0, xfer_len = 0;
@@ -2922,32 +2990,37 @@ int iscsit_build_r2ts_for_cmd(
2922 return 0; 2990 return 0;
2923 } 2991 }
2924 2992
2925 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) 2993 if (conn->sess->sess_ops->DataSequenceInOrder &&
2926 if (cmd->r2t_offset < cmd->write_data_done) 2994 !recovery)
2927 cmd->r2t_offset = cmd->write_data_done; 2995 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
2928 2996
2929 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 2997 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
2930 if (conn->sess->sess_ops->DataSequenceInOrder) { 2998 if (conn->sess->sess_ops->DataSequenceInOrder) {
2931 offset = cmd->r2t_offset; 2999 offset = cmd->r2t_offset;
2932 3000
2933 if (first_r2t && (type == 2)) { 3001 if (first_r2t && recovery) {
2934 xfer_len = ((offset + 3002 int new_data_end = offset +
2935 (conn->sess->sess_ops->MaxBurstLength - 3003 conn->sess->sess_ops->MaxBurstLength -
2936 cmd->next_burst_len) > 3004 cmd->next_burst_len;
2937 cmd->data_length) ? 3005
2938 (cmd->data_length - offset) : 3006 if (new_data_end > cmd->se_cmd.data_length)
2939 (conn->sess->sess_ops->MaxBurstLength - 3007 xfer_len = cmd->se_cmd.data_length - offset;
2940 cmd->next_burst_len)); 3008 else
3009 xfer_len =
3010 conn->sess->sess_ops->MaxBurstLength -
3011 cmd->next_burst_len;
2941 } else { 3012 } else {
2942 xfer_len = ((offset + 3013 int new_data_end = offset +
2943 conn->sess->sess_ops->MaxBurstLength) > 3014 conn->sess->sess_ops->MaxBurstLength;
2944 cmd->data_length) ? 3015
2945 (cmd->data_length - offset) : 3016 if (new_data_end > cmd->se_cmd.data_length)
2946 conn->sess->sess_ops->MaxBurstLength; 3017 xfer_len = cmd->se_cmd.data_length - offset;
3018 else
3019 xfer_len = conn->sess->sess_ops->MaxBurstLength;
2947 } 3020 }
2948 cmd->r2t_offset += xfer_len; 3021 cmd->r2t_offset += xfer_len;
2949 3022
2950 if (cmd->r2t_offset == cmd->data_length) 3023 if (cmd->r2t_offset == cmd->se_cmd.data_length)
2951 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3024 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2952 } else { 3025 } else {
2953 struct iscsi_seq *seq; 3026 struct iscsi_seq *seq;
@@ -3179,6 +3252,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3179 return ret; 3252 return ret;
3180} 3253}
3181 3254
3255#define SENDTARGETS_BUF_LIMIT 32768U
3256
3182static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3257static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3183{ 3258{
3184 char *payload = NULL; 3259 char *payload = NULL;
@@ -3187,12 +3262,10 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3187 struct iscsi_tiqn *tiqn; 3262 struct iscsi_tiqn *tiqn;
3188 struct iscsi_tpg_np *tpg_np; 3263 struct iscsi_tpg_np *tpg_np;
3189 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3264 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3190 unsigned char buf[256]; 3265 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3191 3266
3192 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? 3267 buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
3193 32768 : conn->conn_ops->MaxRecvDataSegmentLength; 3268 SENDTARGETS_BUF_LIMIT);
3194
3195 memset(buf, 0, 256);
3196 3269
3197 payload = kzalloc(buffer_len, GFP_KERNEL); 3270 payload = kzalloc(buffer_len, GFP_KERNEL);
3198 if (!payload) { 3271 if (!payload) {
@@ -3408,18 +3481,6 @@ static int iscsit_send_reject(
3408 return 0; 3481 return 0;
3409} 3482}
3410 3483
3411static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
3412{
3413 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
3414 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
3415 wait_for_completion_interruptible_timeout(
3416 &conn->tx_half_close_comp,
3417 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
3418 }
3419}
3420
3421#ifdef CONFIG_SMP
3422
3423void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3484void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3424{ 3485{
3425 struct iscsi_thread_set *ts = conn->thread_set; 3486 struct iscsi_thread_set *ts = conn->thread_set;
@@ -3433,10 +3494,6 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3433 * execute upon. 3494 * execute upon.
3434 */ 3495 */
3435 ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3496 ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3436#if 0
3437 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
3438 " thread_id: %d\n", ord, ts->thread_id);
3439#endif
3440 for_each_online_cpu(cpu) { 3497 for_each_online_cpu(cpu) {
3441 if (ord-- == 0) { 3498 if (ord-- == 0) {
3442 cpumask_set_cpu(cpu, conn->conn_cpumask); 3499 cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3476,34 +3533,196 @@ static inline void iscsit_thread_check_cpumask(
3476 */ 3533 */
3477 memset(buf, 0, 128); 3534 memset(buf, 0, 128);
3478 cpumask_scnprintf(buf, 128, conn->conn_cpumask); 3535 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3479#if 0
3480 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
3481 " %s for %s\n", buf, p->comm);
3482#endif
3483 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3536 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3484} 3537}
3485 3538
3486#else 3539static int handle_immediate_queue(struct iscsi_conn *conn)
3487
3488void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3489{ 3540{
3490 return; 3541 struct iscsi_queue_req *qr;
3542 struct iscsi_cmd *cmd;
3543 u8 state;
3544 int ret;
3545
3546 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3547 atomic_set(&conn->check_immediate_queue, 0);
3548 cmd = qr->cmd;
3549 state = qr->state;
3550 kmem_cache_free(lio_qr_cache, qr);
3551
3552 switch (state) {
3553 case ISTATE_SEND_R2T:
3554 ret = iscsit_send_r2t(cmd, conn);
3555 if (ret < 0)
3556 goto err;
3557 break;
3558 case ISTATE_REMOVE:
3559 if (cmd->data_direction == DMA_TO_DEVICE)
3560 iscsit_stop_dataout_timer(cmd);
3561
3562 spin_lock_bh(&conn->cmd_lock);
3563 list_del(&cmd->i_conn_node);
3564 spin_unlock_bh(&conn->cmd_lock);
3565
3566 iscsit_free_cmd(cmd);
3567 continue;
3568 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3569 iscsit_mod_nopin_response_timer(conn);
3570 ret = iscsit_send_unsolicited_nopin(cmd,
3571 conn, 1);
3572 if (ret < 0)
3573 goto err;
3574 break;
3575 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3576 ret = iscsit_send_unsolicited_nopin(cmd,
3577 conn, 0);
3578 if (ret < 0)
3579 goto err;
3580 break;
3581 default:
3582 pr_err("Unknown Opcode: 0x%02x ITT:"
3583 " 0x%08x, i_state: %d on CID: %hu\n",
3584 cmd->iscsi_opcode, cmd->init_task_tag, state,
3585 conn->cid);
3586 goto err;
3587 }
3588 }
3589
3590 return 0;
3591
3592err:
3593 return -1;
3491} 3594}
3492 3595
3493#define iscsit_thread_check_cpumask(X, Y, Z) ({}) 3596static int handle_response_queue(struct iscsi_conn *conn)
3494#endif /* CONFIG_SMP */ 3597{
3598 struct iscsi_queue_req *qr;
3599 struct iscsi_cmd *cmd;
3600 u8 state;
3601 int ret;
3602
3603 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3604 cmd = qr->cmd;
3605 state = qr->state;
3606 kmem_cache_free(lio_qr_cache, qr);
3607
3608check_rsp_state:
3609 switch (state) {
3610 case ISTATE_SEND_DATAIN:
3611 ret = iscsit_send_data_in(cmd, conn);
3612 if (ret < 0)
3613 goto err;
3614 else if (!ret)
3615 /* more drs */
3616 goto check_rsp_state;
3617 else if (ret == 1) {
3618 /* all done */
3619 spin_lock_bh(&cmd->istate_lock);
3620 cmd->i_state = ISTATE_SENT_STATUS;
3621 spin_unlock_bh(&cmd->istate_lock);
3622 continue;
3623 } else if (ret == 2) {
3624 /* Still must send status,
3625 SCF_TRANSPORT_TASK_SENSE was set */
3626 spin_lock_bh(&cmd->istate_lock);
3627 cmd->i_state = ISTATE_SEND_STATUS;
3628 spin_unlock_bh(&cmd->istate_lock);
3629 state = ISTATE_SEND_STATUS;
3630 goto check_rsp_state;
3631 }
3632
3633 break;
3634 case ISTATE_SEND_STATUS:
3635 case ISTATE_SEND_STATUS_RECOVERY:
3636 ret = iscsit_send_status(cmd, conn);
3637 break;
3638 case ISTATE_SEND_LOGOUTRSP:
3639 ret = iscsit_send_logout_response(cmd, conn);
3640 break;
3641 case ISTATE_SEND_ASYNCMSG:
3642 ret = iscsit_send_conn_drop_async_message(
3643 cmd, conn);
3644 break;
3645 case ISTATE_SEND_NOPIN:
3646 ret = iscsit_send_nopin_response(cmd, conn);
3647 break;
3648 case ISTATE_SEND_REJECT:
3649 ret = iscsit_send_reject(cmd, conn);
3650 break;
3651 case ISTATE_SEND_TASKMGTRSP:
3652 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3653 if (ret != 0)
3654 break;
3655 ret = iscsit_tmr_post_handler(cmd, conn);
3656 if (ret != 0)
3657 iscsit_fall_back_to_erl0(conn->sess);
3658 break;
3659 case ISTATE_SEND_TEXTRSP:
3660 ret = iscsit_send_text_rsp(cmd, conn);
3661 break;
3662 default:
3663 pr_err("Unknown Opcode: 0x%02x ITT:"
3664 " 0x%08x, i_state: %d on CID: %hu\n",
3665 cmd->iscsi_opcode, cmd->init_task_tag,
3666 state, conn->cid);
3667 goto err;
3668 }
3669 if (ret < 0)
3670 goto err;
3671
3672 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3673 iscsit_tx_thread_wait_for_tcp(conn);
3674 iscsit_unmap_iovec(cmd);
3675 goto err;
3676 }
3677 iscsit_unmap_iovec(cmd);
3678
3679 switch (state) {
3680 case ISTATE_SEND_LOGOUTRSP:
3681 if (!iscsit_logout_post_handler(cmd, conn))
3682 goto restart;
3683 /* fall through */
3684 case ISTATE_SEND_STATUS:
3685 case ISTATE_SEND_ASYNCMSG:
3686 case ISTATE_SEND_NOPIN:
3687 case ISTATE_SEND_STATUS_RECOVERY:
3688 case ISTATE_SEND_TEXTRSP:
3689 case ISTATE_SEND_TASKMGTRSP:
3690 spin_lock_bh(&cmd->istate_lock);
3691 cmd->i_state = ISTATE_SENT_STATUS;
3692 spin_unlock_bh(&cmd->istate_lock);
3693 break;
3694 case ISTATE_SEND_REJECT:
3695 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3696 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3697 complete(&cmd->reject_comp);
3698 goto err;
3699 }
3700 complete(&cmd->reject_comp);
3701 break;
3702 default:
3703 pr_err("Unknown Opcode: 0x%02x ITT:"
3704 " 0x%08x, i_state: %d on CID: %hu\n",
3705 cmd->iscsi_opcode, cmd->init_task_tag,
3706 cmd->i_state, conn->cid);
3707 goto err;
3708 }
3709
3710 if (atomic_read(&conn->check_immediate_queue))
3711 break;
3712 }
3713
3714 return 0;
3715
3716err:
3717 return -1;
3718restart:
3719 return -EAGAIN;
3720}
3495 3721
3496int iscsi_target_tx_thread(void *arg) 3722int iscsi_target_tx_thread(void *arg)
3497{ 3723{
3498 u8 state;
3499 int eodr = 0;
3500 int ret = 0; 3724 int ret = 0;
3501 int sent_status = 0;
3502 int use_misc = 0;
3503 int map_sg = 0;
3504 struct iscsi_cmd *cmd = NULL;
3505 struct iscsi_conn *conn; 3725 struct iscsi_conn *conn;
3506 struct iscsi_queue_req *qr = NULL;
3507 struct iscsi_thread_set *ts = arg; 3726 struct iscsi_thread_set *ts = arg;
3508 /* 3727 /*
3509 * Allow ourselves to be interrupted by SIGINT so that a 3728 * Allow ourselves to be interrupted by SIGINT so that a
@@ -3516,7 +3735,7 @@ restart:
3516 if (!conn) 3735 if (!conn)
3517 goto out; 3736 goto out;
3518 3737
3519 eodr = map_sg = ret = sent_status = use_misc = 0; 3738 ret = 0;
3520 3739
3521 while (!kthread_should_stop()) { 3740 while (!kthread_should_stop()) {
3522 /* 3741 /*
@@ -3531,251 +3750,15 @@ restart:
3531 signal_pending(current)) 3750 signal_pending(current))
3532 goto transport_err; 3751 goto transport_err;
3533 3752
3534get_immediate: 3753 ret = handle_immediate_queue(conn);
3535 qr = iscsit_get_cmd_from_immediate_queue(conn); 3754 if (ret < 0)
3536 if (qr) { 3755 goto transport_err;
3537 atomic_set(&conn->check_immediate_queue, 0);
3538 cmd = qr->cmd;
3539 state = qr->state;
3540 kmem_cache_free(lio_qr_cache, qr);
3541
3542 spin_lock_bh(&cmd->istate_lock);
3543 switch (state) {
3544 case ISTATE_SEND_R2T:
3545 spin_unlock_bh(&cmd->istate_lock);
3546 ret = iscsit_send_r2t(cmd, conn);
3547 break;
3548 case ISTATE_REMOVE:
3549 spin_unlock_bh(&cmd->istate_lock);
3550
3551 if (cmd->data_direction == DMA_TO_DEVICE)
3552 iscsit_stop_dataout_timer(cmd);
3553
3554 spin_lock_bh(&conn->cmd_lock);
3555 list_del(&cmd->i_list);
3556 spin_unlock_bh(&conn->cmd_lock);
3557
3558 iscsit_free_cmd(cmd);
3559 goto get_immediate;
3560 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3561 spin_unlock_bh(&cmd->istate_lock);
3562 iscsit_mod_nopin_response_timer(conn);
3563 ret = iscsit_send_unsolicited_nopin(cmd,
3564 conn, 1);
3565 break;
3566 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3567 spin_unlock_bh(&cmd->istate_lock);
3568 ret = iscsit_send_unsolicited_nopin(cmd,
3569 conn, 0);
3570 break;
3571 default:
3572 pr_err("Unknown Opcode: 0x%02x ITT:"
3573 " 0x%08x, i_state: %d on CID: %hu\n",
3574 cmd->iscsi_opcode, cmd->init_task_tag, state,
3575 conn->cid);
3576 spin_unlock_bh(&cmd->istate_lock);
3577 goto transport_err;
3578 }
3579 if (ret < 0) {
3580 conn->tx_immediate_queue = 0;
3581 goto transport_err;
3582 }
3583
3584 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3585 conn->tx_immediate_queue = 0;
3586 iscsit_tx_thread_wait_for_tcp(conn);
3587 goto transport_err;
3588 }
3589
3590 spin_lock_bh(&cmd->istate_lock);
3591 switch (state) {
3592 case ISTATE_SEND_R2T:
3593 spin_unlock_bh(&cmd->istate_lock);
3594 spin_lock_bh(&cmd->dataout_timeout_lock);
3595 iscsit_start_dataout_timer(cmd, conn);
3596 spin_unlock_bh(&cmd->dataout_timeout_lock);
3597 break;
3598 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3599 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
3600 spin_unlock_bh(&cmd->istate_lock);
3601 break;
3602 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3603 cmd->i_state = ISTATE_SENT_STATUS;
3604 spin_unlock_bh(&cmd->istate_lock);
3605 break;
3606 default:
3607 pr_err("Unknown Opcode: 0x%02x ITT:"
3608 " 0x%08x, i_state: %d on CID: %hu\n",
3609 cmd->iscsi_opcode, cmd->init_task_tag,
3610 state, conn->cid);
3611 spin_unlock_bh(&cmd->istate_lock);
3612 goto transport_err;
3613 }
3614 goto get_immediate;
3615 } else
3616 conn->tx_immediate_queue = 0;
3617
3618get_response:
3619 qr = iscsit_get_cmd_from_response_queue(conn);
3620 if (qr) {
3621 cmd = qr->cmd;
3622 state = qr->state;
3623 kmem_cache_free(lio_qr_cache, qr);
3624
3625 spin_lock_bh(&cmd->istate_lock);
3626check_rsp_state:
3627 switch (state) {
3628 case ISTATE_SEND_DATAIN:
3629 spin_unlock_bh(&cmd->istate_lock);
3630 ret = iscsit_send_data_in(cmd, conn,
3631 &eodr);
3632 map_sg = 1;
3633 break;
3634 case ISTATE_SEND_STATUS:
3635 case ISTATE_SEND_STATUS_RECOVERY:
3636 spin_unlock_bh(&cmd->istate_lock);
3637 use_misc = 1;
3638 ret = iscsit_send_status(cmd, conn);
3639 break;
3640 case ISTATE_SEND_LOGOUTRSP:
3641 spin_unlock_bh(&cmd->istate_lock);
3642 use_misc = 1;
3643 ret = iscsit_send_logout_response(cmd, conn);
3644 break;
3645 case ISTATE_SEND_ASYNCMSG:
3646 spin_unlock_bh(&cmd->istate_lock);
3647 use_misc = 1;
3648 ret = iscsit_send_conn_drop_async_message(
3649 cmd, conn);
3650 break;
3651 case ISTATE_SEND_NOPIN:
3652 spin_unlock_bh(&cmd->istate_lock);
3653 use_misc = 1;
3654 ret = iscsit_send_nopin_response(cmd, conn);
3655 break;
3656 case ISTATE_SEND_REJECT:
3657 spin_unlock_bh(&cmd->istate_lock);
3658 use_misc = 1;
3659 ret = iscsit_send_reject(cmd, conn);
3660 break;
3661 case ISTATE_SEND_TASKMGTRSP:
3662 spin_unlock_bh(&cmd->istate_lock);
3663 use_misc = 1;
3664 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3665 if (ret != 0)
3666 break;
3667 ret = iscsit_tmr_post_handler(cmd, conn);
3668 if (ret != 0)
3669 iscsit_fall_back_to_erl0(conn->sess);
3670 break;
3671 case ISTATE_SEND_TEXTRSP:
3672 spin_unlock_bh(&cmd->istate_lock);
3673 use_misc = 1;
3674 ret = iscsit_send_text_rsp(cmd, conn);
3675 break;
3676 default:
3677 pr_err("Unknown Opcode: 0x%02x ITT:"
3678 " 0x%08x, i_state: %d on CID: %hu\n",
3679 cmd->iscsi_opcode, cmd->init_task_tag,
3680 state, conn->cid);
3681 spin_unlock_bh(&cmd->istate_lock);
3682 goto transport_err;
3683 }
3684 if (ret < 0) {
3685 conn->tx_response_queue = 0;
3686 goto transport_err;
3687 }
3688
3689 if (map_sg && !conn->conn_ops->IFMarker) {
3690 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
3691 conn->tx_response_queue = 0;
3692 iscsit_tx_thread_wait_for_tcp(conn);
3693 iscsit_unmap_iovec(cmd);
3694 goto transport_err;
3695 }
3696 } else {
3697 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
3698 conn->tx_response_queue = 0;
3699 iscsit_tx_thread_wait_for_tcp(conn);
3700 iscsit_unmap_iovec(cmd);
3701 goto transport_err;
3702 }
3703 }
3704 map_sg = 0;
3705 iscsit_unmap_iovec(cmd);
3706
3707 spin_lock_bh(&cmd->istate_lock);
3708 switch (state) {
3709 case ISTATE_SEND_DATAIN:
3710 if (!eodr)
3711 goto check_rsp_state;
3712
3713 if (eodr == 1) {
3714 cmd->i_state = ISTATE_SENT_LAST_DATAIN;
3715 sent_status = 1;
3716 eodr = use_misc = 0;
3717 } else if (eodr == 2) {
3718 cmd->i_state = state =
3719 ISTATE_SEND_STATUS;
3720 sent_status = 0;
3721 eodr = use_misc = 0;
3722 goto check_rsp_state;
3723 }
3724 break;
3725 case ISTATE_SEND_STATUS:
3726 use_misc = 0;
3727 sent_status = 1;
3728 break;
3729 case ISTATE_SEND_ASYNCMSG:
3730 case ISTATE_SEND_NOPIN:
3731 case ISTATE_SEND_STATUS_RECOVERY:
3732 case ISTATE_SEND_TEXTRSP:
3733 use_misc = 0;
3734 sent_status = 1;
3735 break;
3736 case ISTATE_SEND_REJECT:
3737 use_misc = 0;
3738 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3739 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3740 spin_unlock_bh(&cmd->istate_lock);
3741 complete(&cmd->reject_comp);
3742 goto transport_err;
3743 }
3744 complete(&cmd->reject_comp);
3745 break;
3746 case ISTATE_SEND_TASKMGTRSP:
3747 use_misc = 0;
3748 sent_status = 1;
3749 break;
3750 case ISTATE_SEND_LOGOUTRSP:
3751 spin_unlock_bh(&cmd->istate_lock);
3752 if (!iscsit_logout_post_handler(cmd, conn))
3753 goto restart;
3754 spin_lock_bh(&cmd->istate_lock);
3755 use_misc = 0;
3756 sent_status = 1;
3757 break;
3758 default:
3759 pr_err("Unknown Opcode: 0x%02x ITT:"
3760 " 0x%08x, i_state: %d on CID: %hu\n",
3761 cmd->iscsi_opcode, cmd->init_task_tag,
3762 cmd->i_state, conn->cid);
3763 spin_unlock_bh(&cmd->istate_lock);
3764 goto transport_err;
3765 }
3766
3767 if (sent_status) {
3768 cmd->i_state = ISTATE_SENT_STATUS;
3769 sent_status = 0;
3770 }
3771 spin_unlock_bh(&cmd->istate_lock);
3772
3773 if (atomic_read(&conn->check_immediate_queue))
3774 goto get_immediate;
3775 3756
3776 goto get_response; 3757 ret = handle_response_queue(conn);
3777 } else 3758 if (ret == -EAGAIN)
3778 conn->tx_response_queue = 0; 3759 goto restart;
3760 else if (ret < 0)
3761 goto transport_err;
3779 } 3762 }
3780 3763
3781transport_err: 3764transport_err:
@@ -3952,9 +3935,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3952 * has been reset -> returned sleeping pre-handler state. 3935 * has been reset -> returned sleeping pre-handler state.
3953 */ 3936 */
3954 spin_lock_bh(&conn->cmd_lock); 3937 spin_lock_bh(&conn->cmd_lock);
3955 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 3938 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
3956 3939
3957 list_del(&cmd->i_list); 3940 list_del(&cmd->i_conn_node);
3958 spin_unlock_bh(&conn->cmd_lock); 3941 spin_unlock_bh(&conn->cmd_lock);
3959 3942
3960 iscsit_increment_maxcmdsn(cmd, sess); 3943 iscsit_increment_maxcmdsn(cmd, sess);
@@ -3972,7 +3955,7 @@ static void iscsit_stop_timers_for_cmds(
3972 struct iscsi_cmd *cmd; 3955 struct iscsi_cmd *cmd;
3973 3956
3974 spin_lock_bh(&conn->cmd_lock); 3957 spin_lock_bh(&conn->cmd_lock);
3975 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 3958 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
3976 if (cmd->data_direction == DMA_TO_DEVICE) 3959 if (cmd->data_direction == DMA_TO_DEVICE)
3977 iscsit_stop_dataout_timer(cmd); 3960 iscsit_stop_dataout_timer(cmd);
3978 } 3961 }
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 5db2ddeed5eb..12abb4c9e34e 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -18,8 +18,7 @@ extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
18extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); 18extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
19extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); 19extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
20extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); 20extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
21extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *); 21extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery);
22extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
23extern void iscsit_thread_get_cpumask(struct iscsi_conn *); 22extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
24extern int iscsi_target_tx_thread(void *); 23extern int iscsi_target_tx_thread(void *);
25extern int iscsi_target_rx_thread(void *); 24extern int iscsi_target_rx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 00c58cc82c85..69dc8e35c03a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1538,7 +1538,7 @@ static int lio_write_pending(struct se_cmd *se_cmd)
1538 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1538 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1539 1539
1540 if (!cmd->immediate_data && !cmd->unsolicited_data) 1540 if (!cmd->immediate_data && !cmd->unsolicited_data)
1541 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1); 1541 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
1542 1542
1543 return 0; 1543 return 0;
1544} 1544}
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 2aaee7efa683..1c70144cdaf1 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -296,12 +296,11 @@ struct iscsi_datain_req {
296 u32 runlength; 296 u32 runlength;
297 u32 data_length; 297 u32 data_length;
298 u32 data_offset; 298 u32 data_offset;
299 u32 data_offset_end;
300 u32 data_sn; 299 u32 data_sn;
301 u32 next_burst_len; 300 u32 next_burst_len;
302 u32 read_data_done; 301 u32 read_data_done;
303 u32 seq_send_order; 302 u32 seq_send_order;
304 struct list_head dr_list; 303 struct list_head cmd_datain_node;
305} ____cacheline_aligned; 304} ____cacheline_aligned;
306 305
307struct iscsi_ooo_cmdsn { 306struct iscsi_ooo_cmdsn {
@@ -381,8 +380,6 @@ struct iscsi_cmd {
381 u32 buf_ptr_size; 380 u32 buf_ptr_size;
382 /* Used to store DataDigest */ 381 /* Used to store DataDigest */
383 u32 data_crc; 382 u32 data_crc;
384 /* Total size in bytes associated with command */
385 u32 data_length;
386 /* Counter for MaxOutstandingR2T */ 383 /* Counter for MaxOutstandingR2T */
387 u32 outstanding_r2ts; 384 u32 outstanding_r2ts;
388 /* Next R2T Offset when DataSequenceInOrder=Yes */ 385 /* Next R2T Offset when DataSequenceInOrder=Yes */
@@ -464,16 +461,13 @@ struct iscsi_cmd {
464 /* Session the command is part of, used for connection recovery */ 461 /* Session the command is part of, used for connection recovery */
465 struct iscsi_session *sess; 462 struct iscsi_session *sess;
466 /* list_head for connection list */ 463 /* list_head for connection list */
467 struct list_head i_list; 464 struct list_head i_conn_node;
468 /* The TCM I/O descriptor that is accessed via container_of() */ 465 /* The TCM I/O descriptor that is accessed via container_of() */
469 struct se_cmd se_cmd; 466 struct se_cmd se_cmd;
470 /* Sense buffer that will be mapped into outgoing status */ 467 /* Sense buffer that will be mapped into outgoing status */
471#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2) 468#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
472 unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN]; 469 unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
473 470
474 struct scatterlist *t_mem_sg;
475 u32 t_mem_sg_nents;
476
477 u32 padding; 471 u32 padding;
478 u8 pad_bytes[4]; 472 u8 pad_bytes[4];
479 473
@@ -500,8 +494,6 @@ struct iscsi_conn {
500 u8 network_transport; 494 u8 network_transport;
501 enum iscsi_timer_flags_table nopin_timer_flags; 495 enum iscsi_timer_flags_table nopin_timer_flags;
502 enum iscsi_timer_flags_table nopin_response_timer_flags; 496 enum iscsi_timer_flags_table nopin_response_timer_flags;
503 u8 tx_immediate_queue;
504 u8 tx_response_queue;
505 /* Used to know what thread encountered a transport failure */ 497 /* Used to know what thread encountered a transport failure */
506 u8 which_thread; 498 u8 which_thread;
507 /* connection id assigned by the Initiator */ 499 /* connection id assigned by the Initiator */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 8c0495129513..848fee768948 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -37,7 +37,7 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
37 " struct iscsi_datain_req\n"); 37 " struct iscsi_datain_req\n");
38 return NULL; 38 return NULL;
39 } 39 }
40 INIT_LIST_HEAD(&dr->dr_list); 40 INIT_LIST_HEAD(&dr->cmd_datain_node);
41 41
42 return dr; 42 return dr;
43} 43}
@@ -45,14 +45,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
45void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) 45void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
46{ 46{
47 spin_lock(&cmd->datain_lock); 47 spin_lock(&cmd->datain_lock);
48 list_add_tail(&dr->dr_list, &cmd->datain_list); 48 list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
49 spin_unlock(&cmd->datain_lock); 49 spin_unlock(&cmd->datain_lock);
50} 50}
51 51
52void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) 52void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
53{ 53{
54 spin_lock(&cmd->datain_lock); 54 spin_lock(&cmd->datain_lock);
55 list_del(&dr->dr_list); 55 list_del(&dr->cmd_datain_node);
56 spin_unlock(&cmd->datain_lock); 56 spin_unlock(&cmd->datain_lock);
57 57
58 kmem_cache_free(lio_dr_cache, dr); 58 kmem_cache_free(lio_dr_cache, dr);
@@ -63,8 +63,8 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
63 struct iscsi_datain_req *dr, *dr_tmp; 63 struct iscsi_datain_req *dr, *dr_tmp;
64 64
65 spin_lock(&cmd->datain_lock); 65 spin_lock(&cmd->datain_lock);
66 list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) { 66 list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
67 list_del(&dr->dr_list); 67 list_del(&dr->cmd_datain_node);
68 kmem_cache_free(lio_dr_cache, dr); 68 kmem_cache_free(lio_dr_cache, dr);
69 } 69 }
70 spin_unlock(&cmd->datain_lock); 70 spin_unlock(&cmd->datain_lock);
@@ -72,17 +72,14 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
72 72
73struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd) 73struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
74{ 74{
75 struct iscsi_datain_req *dr;
76
77 if (list_empty(&cmd->datain_list)) { 75 if (list_empty(&cmd->datain_list)) {
78 pr_err("cmd->datain_list is empty for ITT:" 76 pr_err("cmd->datain_list is empty for ITT:"
79 " 0x%08x\n", cmd->init_task_tag); 77 " 0x%08x\n", cmd->init_task_tag);
80 return NULL; 78 return NULL;
81 } 79 }
82 list_for_each_entry(dr, &cmd->datain_list, dr_list)
83 break;
84 80
85 return dr; 81 return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
82 cmd_datain_node);
86} 83}
87 84
88/* 85/*
@@ -113,7 +110,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
113 read_data_done = (!dr->recovery) ? 110 read_data_done = (!dr->recovery) ?
114 cmd->read_data_done : dr->read_data_done; 111 cmd->read_data_done : dr->read_data_done;
115 112
116 read_data_left = (cmd->data_length - read_data_done); 113 read_data_left = (cmd->se_cmd.data_length - read_data_done);
117 if (!read_data_left) { 114 if (!read_data_left) {
118 pr_err("ITT: 0x%08x read_data_left is zero!\n", 115 pr_err("ITT: 0x%08x read_data_left is zero!\n",
119 cmd->init_task_tag); 116 cmd->init_task_tag);
@@ -212,7 +209,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
212 seq_send_order = (!dr->recovery) ? 209 seq_send_order = (!dr->recovery) ?
213 cmd->seq_send_order : dr->seq_send_order; 210 cmd->seq_send_order : dr->seq_send_order;
214 211
215 read_data_left = (cmd->data_length - read_data_done); 212 read_data_left = (cmd->se_cmd.data_length - read_data_done);
216 if (!read_data_left) { 213 if (!read_data_left) {
217 pr_err("ITT: 0x%08x read_data_left is zero!\n", 214 pr_err("ITT: 0x%08x read_data_left is zero!\n",
218 cmd->init_task_tag); 215 cmd->init_task_tag);
@@ -231,8 +228,8 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
231 offset = (seq->offset + seq->next_burst_len); 228 offset = (seq->offset + seq->next_burst_len);
232 229
233 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= 230 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
234 cmd->data_length) { 231 cmd->se_cmd.data_length) {
235 datain->length = (cmd->data_length - offset); 232 datain->length = (cmd->se_cmd.data_length - offset);
236 datain->offset = offset; 233 datain->offset = offset;
237 234
238 datain->flags |= ISCSI_FLAG_CMD_FINAL; 235 datain->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -264,7 +261,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
264 } 261 }
265 } 262 }
266 263
267 if ((read_data_done + datain->length) == cmd->data_length) 264 if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
268 datain->flags |= ISCSI_FLAG_DATA_STATUS; 265 datain->flags |= ISCSI_FLAG_DATA_STATUS;
269 266
270 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; 267 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
@@ -333,7 +330,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
333 read_data_done = (!dr->recovery) ? 330 read_data_done = (!dr->recovery) ?
334 cmd->read_data_done : dr->read_data_done; 331 cmd->read_data_done : dr->read_data_done;
335 332
336 read_data_left = (cmd->data_length - read_data_done); 333 read_data_left = (cmd->se_cmd.data_length - read_data_done);
337 if (!read_data_left) { 334 if (!read_data_left) {
338 pr_err("ITT: 0x%08x read_data_left is zero!\n", 335 pr_err("ITT: 0x%08x read_data_left is zero!\n",
339 cmd->init_task_tag); 336 cmd->init_task_tag);
@@ -344,7 +341,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
344 if (!pdu) 341 if (!pdu)
345 return dr; 342 return dr;
346 343
347 if ((read_data_done + pdu->length) == cmd->data_length) { 344 if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
348 pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS); 345 pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
349 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) 346 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
350 pdu->flags |= ISCSI_FLAG_DATA_ACK; 347 pdu->flags |= ISCSI_FLAG_DATA_ACK;
@@ -433,7 +430,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
433 seq_send_order = (!dr->recovery) ? 430 seq_send_order = (!dr->recovery) ?
434 cmd->seq_send_order : dr->seq_send_order; 431 cmd->seq_send_order : dr->seq_send_order;
435 432
436 read_data_left = (cmd->data_length - read_data_done); 433 read_data_left = (cmd->se_cmd.data_length - read_data_done);
437 if (!read_data_left) { 434 if (!read_data_left) {
438 pr_err("ITT: 0x%08x read_data_left is zero!\n", 435 pr_err("ITT: 0x%08x read_data_left is zero!\n",
439 cmd->init_task_tag); 436 cmd->init_task_tag);
@@ -463,7 +460,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
463 } else 460 } else
464 seq->next_burst_len += pdu->length; 461 seq->next_burst_len += pdu->length;
465 462
466 if ((read_data_done + pdu->length) == cmd->data_length) 463 if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
467 pdu->flags |= ISCSI_FLAG_DATA_STATUS; 464 pdu->flags |= ISCSI_FLAG_DATA_STATUS;
468 465
469 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; 466 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1ab0560b0924..1a02016ecdab 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -48,9 +48,9 @@ void iscsit_set_dataout_sequence_values(
48 if (cmd->unsolicited_data) { 48 if (cmd->unsolicited_data) {
49 cmd->seq_start_offset = cmd->write_data_done; 49 cmd->seq_start_offset = cmd->write_data_done;
50 cmd->seq_end_offset = (cmd->write_data_done + 50 cmd->seq_end_offset = (cmd->write_data_done +
51 (cmd->data_length > 51 (cmd->se_cmd.data_length >
52 conn->sess->sess_ops->FirstBurstLength) ? 52 conn->sess->sess_ops->FirstBurstLength) ?
53 conn->sess->sess_ops->FirstBurstLength : cmd->data_length); 53 conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length);
54 return; 54 return;
55 } 55 }
56 56
@@ -59,15 +59,15 @@ void iscsit_set_dataout_sequence_values(
59 59
60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) { 60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
61 cmd->seq_start_offset = cmd->write_data_done; 61 cmd->seq_start_offset = cmd->write_data_done;
62 cmd->seq_end_offset = (cmd->data_length > 62 cmd->seq_end_offset = (cmd->se_cmd.data_length >
63 conn->sess->sess_ops->MaxBurstLength) ? 63 conn->sess->sess_ops->MaxBurstLength) ?
64 (cmd->write_data_done + 64 (cmd->write_data_done +
65 conn->sess->sess_ops->MaxBurstLength) : cmd->data_length; 65 conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
66 } else { 66 } else {
67 cmd->seq_start_offset = cmd->seq_end_offset; 67 cmd->seq_start_offset = cmd->seq_end_offset;
68 cmd->seq_end_offset = ((cmd->seq_end_offset + 68 cmd->seq_end_offset = ((cmd->seq_end_offset +
69 conn->sess->sess_ops->MaxBurstLength) >= 69 conn->sess->sess_ops->MaxBurstLength) >=
70 cmd->data_length) ? cmd->data_length : 70 cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
71 (cmd->seq_end_offset + 71 (cmd->seq_end_offset +
72 conn->sess->sess_ops->MaxBurstLength); 72 conn->sess->sess_ops->MaxBurstLength);
73 } 73 }
@@ -182,13 +182,13 @@ static int iscsit_dataout_check_unsolicited_sequence(
182 if (!conn->sess->sess_ops->DataPDUInOrder) 182 if (!conn->sess->sess_ops->DataPDUInOrder)
183 goto out; 183 goto out;
184 184
185 if ((first_burst_len != cmd->data_length) && 185 if ((first_burst_len != cmd->se_cmd.data_length) &&
186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { 186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
187 pr_err("Unsolicited non-immediate data" 187 pr_err("Unsolicited non-immediate data"
188 " received %u does not equal FirstBurstLength: %u, and" 188 " received %u does not equal FirstBurstLength: %u, and"
189 " does not equal ExpXferLen %u.\n", first_burst_len, 189 " does not equal ExpXferLen %u.\n", first_burst_len,
190 conn->sess->sess_ops->FirstBurstLength, 190 conn->sess->sess_ops->FirstBurstLength,
191 cmd->data_length); 191 cmd->se_cmd.data_length);
192 transport_send_check_condition_and_sense(&cmd->se_cmd, 192 transport_send_check_condition_and_sense(&cmd->se_cmd,
193 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 193 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
194 return DATAOUT_CANNOT_RECOVER; 194 return DATAOUT_CANNOT_RECOVER;
@@ -201,10 +201,10 @@ static int iscsit_dataout_check_unsolicited_sequence(
201 conn->sess->sess_ops->FirstBurstLength); 201 conn->sess->sess_ops->FirstBurstLength);
202 return DATAOUT_CANNOT_RECOVER; 202 return DATAOUT_CANNOT_RECOVER;
203 } 203 }
204 if (first_burst_len == cmd->data_length) { 204 if (first_burst_len == cmd->se_cmd.data_length) {
205 pr_err("Command ITT: 0x%08x reached" 205 pr_err("Command ITT: 0x%08x reached"
206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" 206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
207 " error.\n", cmd->init_task_tag, cmd->data_length); 207 " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
208 return DATAOUT_CANNOT_RECOVER; 208 return DATAOUT_CANNOT_RECOVER;
209 } 209 }
210 } 210 }
@@ -294,7 +294,7 @@ static int iscsit_dataout_check_sequence(
294 if ((next_burst_len < 294 if ((next_burst_len <
295 conn->sess->sess_ops->MaxBurstLength) && 295 conn->sess->sess_ops->MaxBurstLength) &&
296 ((cmd->write_data_done + payload_length) < 296 ((cmd->write_data_done + payload_length) <
297 cmd->data_length)) { 297 cmd->se_cmd.data_length)) {
298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" 298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
299 " before end of DataOUT sequence, protocol" 299 " before end of DataOUT sequence, protocol"
300 " error.\n", cmd->init_task_tag); 300 " error.\n", cmd->init_task_tag);
@@ -319,7 +319,7 @@ static int iscsit_dataout_check_sequence(
319 return DATAOUT_CANNOT_RECOVER; 319 return DATAOUT_CANNOT_RECOVER;
320 } 320 }
321 if ((cmd->write_data_done + payload_length) == 321 if ((cmd->write_data_done + payload_length) ==
322 cmd->data_length) { 322 cmd->se_cmd.data_length) {
323 pr_err("Command ITT: 0x%08x reached" 323 pr_err("Command ITT: 0x%08x reached"
324 " last DataOUT PDU in sequence but ISCSI_FLAG_" 324 " last DataOUT PDU in sequence but ISCSI_FLAG_"
325 "CMD_FINAL is not set, protocol error.\n", 325 "CMD_FINAL is not set, protocol error.\n",
@@ -640,9 +640,12 @@ static int iscsit_dataout_post_crc_passed(
640 640
641 cmd->write_data_done += payload_length; 641 cmd->write_data_done += payload_length;
642 642
643 return (cmd->write_data_done == cmd->data_length) ? 643 if (cmd->write_data_done == cmd->se_cmd.data_length)
644 DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ? 644 return DATAOUT_SEND_TO_TRANSPORT;
645 DATAOUT_SEND_R2T : DATAOUT_NORMAL; 645 else if (send_r2t)
646 return DATAOUT_SEND_R2T;
647 else
648 return DATAOUT_NORMAL;
646} 649}
647 650
648static int iscsit_dataout_post_crc_failed( 651static int iscsit_dataout_post_crc_failed(
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 006f605edb08..ecdd46deedda 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -279,11 +279,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
279 * seq->first_datasn and seq->last_datasn have not been set. 279 * seq->first_datasn and seq->last_datasn have not been set.
280 */ 280 */
281 if (!seq->sent) { 281 if (!seq->sent) {
282#if 0
283 pr_err("Ignoring non-sent sequence 0x%08x ->" 282 pr_err("Ignoring non-sent sequence 0x%08x ->"
284 " 0x%08x\n\n", seq->first_datasn, 283 " 0x%08x\n\n", seq->first_datasn,
285 seq->last_datasn); 284 seq->last_datasn);
286#endif
287 continue; 285 continue;
288 } 286 }
289 287
@@ -294,11 +292,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
294 */ 292 */
295 if ((seq->first_datasn < begrun) && 293 if ((seq->first_datasn < begrun) &&
296 (seq->last_datasn < begrun)) { 294 (seq->last_datasn < begrun)) {
297#if 0
298 pr_err("Pre BegRun sequence 0x%08x ->" 295 pr_err("Pre BegRun sequence 0x%08x ->"
299 " 0x%08x\n", seq->first_datasn, 296 " 0x%08x\n", seq->first_datasn,
300 seq->last_datasn); 297 seq->last_datasn);
301#endif 298
302 read_data_done += cmd->seq_list[i].xfer_len; 299 read_data_done += cmd->seq_list[i].xfer_len;
303 seq->next_burst_len = seq->pdu_send_order = 0; 300 seq->next_burst_len = seq->pdu_send_order = 0;
304 continue; 301 continue;
@@ -309,11 +306,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
309 */ 306 */
310 if ((seq->first_datasn <= begrun) && 307 if ((seq->first_datasn <= begrun) &&
311 (seq->last_datasn >= begrun)) { 308 (seq->last_datasn >= begrun)) {
312#if 0
313 pr_err("Found sequence begrun: 0x%08x in" 309 pr_err("Found sequence begrun: 0x%08x in"
314 " 0x%08x -> 0x%08x\n", begrun, 310 " 0x%08x -> 0x%08x\n", begrun,
315 seq->first_datasn, seq->last_datasn); 311 seq->first_datasn, seq->last_datasn);
316#endif 312
317 seq_send_order = seq->seq_send_order; 313 seq_send_order = seq->seq_send_order;
318 data_sn = seq->first_datasn; 314 data_sn = seq->first_datasn;
319 seq->next_burst_len = seq->pdu_send_order = 0; 315 seq->next_burst_len = seq->pdu_send_order = 0;
@@ -369,10 +365,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
369 */ 365 */
370 if ((seq->first_datasn > begrun) || 366 if ((seq->first_datasn > begrun) ||
371 (seq->last_datasn > begrun)) { 367 (seq->last_datasn > begrun)) {
372#if 0
373 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n", 368 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
374 seq->first_datasn, seq->last_datasn); 369 seq->first_datasn, seq->last_datasn);
375#endif 370
376 seq->next_burst_len = seq->pdu_send_order = 0; 371 seq->next_burst_len = seq->pdu_send_order = 0;
377 continue; 372 continue;
378 } 373 }
@@ -526,7 +521,7 @@ int iscsit_handle_status_snack(
526 found_cmd = 0; 521 found_cmd = 0;
527 522
528 spin_lock_bh(&conn->cmd_lock); 523 spin_lock_bh(&conn->cmd_lock);
529 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 524 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
530 if (cmd->stat_sn == begrun) { 525 if (cmd->stat_sn == begrun) {
531 found_cmd = 1; 526 found_cmd = 1;
532 break; 527 break;
@@ -987,7 +982,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
987 return 0; 982 return 0;
988 983
989 iscsit_set_dataout_sequence_values(cmd); 984 iscsit_set_dataout_sequence_values(cmd);
990 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0); 985 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
991 } 986 }
992 return 0; 987 return 0;
993 } 988 }
@@ -1121,8 +1116,8 @@ static int iscsit_set_dataout_timeout_values(
1121 if (cmd->unsolicited_data) { 1116 if (cmd->unsolicited_data) {
1122 *offset = 0; 1117 *offset = 0;
1123 *length = (conn->sess->sess_ops->FirstBurstLength > 1118 *length = (conn->sess->sess_ops->FirstBurstLength >
1124 cmd->data_length) ? 1119 cmd->se_cmd.data_length) ?
1125 cmd->data_length : 1120 cmd->se_cmd.data_length :
1126 conn->sess->sess_ops->FirstBurstLength; 1121 conn->sess->sess_ops->FirstBurstLength;
1127 return 0; 1122 return 0;
1128 } 1123 }
@@ -1193,8 +1188,8 @@ static void iscsit_handle_dataout_timeout(unsigned long data)
1193 if (conn->sess->sess_ops->DataPDUInOrder) { 1188 if (conn->sess->sess_ops->DataPDUInOrder) {
1194 pdu_offset = cmd->write_data_done; 1189 pdu_offset = cmd->write_data_done;
1195 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength - 1190 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
1196 cmd->next_burst_len)) > cmd->data_length) 1191 cmd->next_burst_len)) > cmd->se_cmd.data_length)
1197 pdu_length = (cmd->data_length - 1192 pdu_length = (cmd->se_cmd.data_length -
1198 cmd->write_data_done); 1193 cmd->write_data_done);
1199 else 1194 else
1200 pdu_length = (conn->sess->sess_ops->MaxBurstLength - 1195 pdu_length = (conn->sess->sess_ops->MaxBurstLength -
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 1af1f21af21f..65aac14fd831 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,9 +138,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
138 138
139 spin_lock(&cr->conn_recovery_cmd_lock); 139 spin_lock(&cr->conn_recovery_cmd_lock);
140 list_for_each_entry_safe(cmd, cmd_tmp, 140 list_for_each_entry_safe(cmd, cmd_tmp,
141 &cr->conn_recovery_cmd_list, i_list) { 141 &cr->conn_recovery_cmd_list, i_conn_node) {
142 142
143 list_del(&cmd->i_list); 143 list_del(&cmd->i_conn_node);
144 cmd->conn = NULL; 144 cmd->conn = NULL;
145 spin_unlock(&cr->conn_recovery_cmd_lock); 145 spin_unlock(&cr->conn_recovery_cmd_lock);
146 iscsit_free_cmd(cmd); 146 iscsit_free_cmd(cmd);
@@ -160,9 +160,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
160 160
161 spin_lock(&cr->conn_recovery_cmd_lock); 161 spin_lock(&cr->conn_recovery_cmd_lock);
162 list_for_each_entry_safe(cmd, cmd_tmp, 162 list_for_each_entry_safe(cmd, cmd_tmp,
163 &cr->conn_recovery_cmd_list, i_list) { 163 &cr->conn_recovery_cmd_list, i_conn_node) {
164 164
165 list_del(&cmd->i_list); 165 list_del(&cmd->i_conn_node);
166 cmd->conn = NULL; 166 cmd->conn = NULL;
167 spin_unlock(&cr->conn_recovery_cmd_lock); 167 spin_unlock(&cr->conn_recovery_cmd_lock);
168 iscsit_free_cmd(cmd); 168 iscsit_free_cmd(cmd);
@@ -220,7 +220,7 @@ int iscsit_remove_cmd_from_connection_recovery(
220 } 220 }
221 cr = cmd->cr; 221 cr = cmd->cr;
222 222
223 list_del(&cmd->i_list); 223 list_del(&cmd->i_conn_node);
224 return --cr->cmd_count; 224 return --cr->cmd_count;
225} 225}
226 226
@@ -234,7 +234,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
234 234
235 spin_lock(&cr->conn_recovery_cmd_lock); 235 spin_lock(&cr->conn_recovery_cmd_lock);
236 list_for_each_entry_safe(cmd, cmd_tmp, 236 list_for_each_entry_safe(cmd, cmd_tmp,
237 &cr->conn_recovery_cmd_list, i_list) { 237 &cr->conn_recovery_cmd_list, i_conn_node) {
238 238
239 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) && 239 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
240 (cmd->deferred_i_state != ISTATE_REMOVE)) || 240 (cmd->deferred_i_state != ISTATE_REMOVE)) ||
@@ -297,11 +297,11 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
297 mutex_unlock(&sess->cmdsn_mutex); 297 mutex_unlock(&sess->cmdsn_mutex);
298 298
299 spin_lock_bh(&conn->cmd_lock); 299 spin_lock_bh(&conn->cmd_lock);
300 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 300 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
301 if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) 301 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
302 continue; 302 continue;
303 303
304 list_del(&cmd->i_list); 304 list_del(&cmd->i_conn_node);
305 305
306 spin_unlock_bh(&conn->cmd_lock); 306 spin_unlock_bh(&conn->cmd_lock);
307 iscsit_free_cmd(cmd); 307 iscsit_free_cmd(cmd);
@@ -339,14 +339,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
339 /* 339 /*
340 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or 340 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
341 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call 341 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
342 * list_del(&cmd->i_list); to release the command to the 342 * list_del(&cmd->i_conn_node); to release the command to the
343 * session pool and remove it from the connection's list. 343 * session pool and remove it from the connection's list.
344 * 344 *
345 * Also stop the DataOUT timer, which will be restarted after 345 * Also stop the DataOUT timer, which will be restarted after
346 * sending the TMR response. 346 * sending the TMR response.
347 */ 347 */
348 spin_lock_bh(&conn->cmd_lock); 348 spin_lock_bh(&conn->cmd_lock);
349 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 349 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
350 350
351 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && 351 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
352 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { 352 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
@@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
355 " CID: %hu\n", cmd->iscsi_opcode, 355 " CID: %hu\n", cmd->iscsi_opcode,
356 cmd->init_task_tag, cmd->cmd_sn, conn->cid); 356 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
357 357
358 list_del(&cmd->i_list); 358 list_del(&cmd->i_conn_node);
359 spin_unlock_bh(&conn->cmd_lock); 359 spin_unlock_bh(&conn->cmd_lock);
360 iscsit_free_cmd(cmd); 360 iscsit_free_cmd(cmd);
361 spin_lock_bh(&conn->cmd_lock); 361 spin_lock_bh(&conn->cmd_lock);
@@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
375 */ 375 */
376 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && 376 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
377 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { 377 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
378 list_del(&cmd->i_list); 378 list_del(&cmd->i_conn_node);
379 spin_unlock_bh(&conn->cmd_lock); 379 spin_unlock_bh(&conn->cmd_lock);
380 iscsit_free_cmd(cmd); 380 iscsit_free_cmd(cmd);
381 spin_lock_bh(&conn->cmd_lock); 381 spin_lock_bh(&conn->cmd_lock);
@@ -397,7 +397,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
397 397
398 cmd->sess = conn->sess; 398 cmd->sess = conn->sess;
399 399
400 list_del(&cmd->i_list); 400 list_del(&cmd->i_conn_node);
401 spin_unlock_bh(&conn->cmd_lock); 401 spin_unlock_bh(&conn->cmd_lock);
402 402
403 iscsit_free_all_datain_reqs(cmd); 403 iscsit_free_all_datain_reqs(cmd);
@@ -407,7 +407,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
407 * Add the struct iscsi_cmd to the connection recovery cmd list 407 * Add the struct iscsi_cmd to the connection recovery cmd list
408 */ 408 */
409 spin_lock(&cr->conn_recovery_cmd_lock); 409 spin_lock(&cr->conn_recovery_cmd_lock);
410 list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list); 410 list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
411 spin_unlock(&cr->conn_recovery_cmd_lock); 411 spin_unlock(&cr->conn_recovery_cmd_lock);
412 412
413 spin_lock_bh(&conn->cmd_lock); 413 spin_lock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index eb05c9d751ea..ed5241e7f12a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -803,14 +803,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
803 803
804 value = simple_strtoul(value_ptr, &tmpptr, 0); 804 value = simple_strtoul(value_ptr, &tmpptr, 0);
805 805
806/* #warning FIXME: Fix this */
807#if 0
808 if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
809 pr_err("Illegal value \"%s\" for \"%s\".\n",
810 value, param->name);
811 return -1;
812 }
813#endif
814 if (IS_TYPERANGE_0_TO_2(param)) { 806 if (IS_TYPERANGE_0_TO_2(param)) {
815 if ((value < 0) || (value > 2)) { 807 if ((value < 0) || (value > 2)) {
816 pr_err("Illegal value for \"%s\", must be" 808 pr_err("Illegal value for \"%s\", must be"
@@ -1045,13 +1037,6 @@ static char *iscsi_check_valuelist_for_support(
1045 tmp2 = strchr(acceptor_values, ','); 1037 tmp2 = strchr(acceptor_values, ',');
1046 if (tmp2) 1038 if (tmp2)
1047 *tmp2 = '\0'; 1039 *tmp2 = '\0';
1048 if (!acceptor_values || !proposer_values) {
1049 if (tmp1)
1050 *tmp1 = ',';
1051 if (tmp2)
1052 *tmp2 = ',';
1053 return NULL;
1054 }
1055 if (!strcmp(acceptor_values, proposer_values)) { 1040 if (!strcmp(acceptor_values, proposer_values)) {
1056 if (tmp2) 1041 if (tmp2)
1057 *tmp2 = ','; 1042 *tmp2 = ',';
@@ -1061,8 +1046,6 @@ static char *iscsi_check_valuelist_for_support(
1061 *tmp2++ = ','; 1046 *tmp2++ = ',';
1062 1047
1063 acceptor_values = tmp2; 1048 acceptor_values = tmp2;
1064 if (!acceptor_values)
1065 break;
1066 } while (acceptor_values); 1049 } while (acceptor_values);
1067 if (tmp1) 1050 if (tmp1)
1068 *tmp1++ = ','; 1051 *tmp1++ = ',';
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index fc694082bfc0..85a306e067ba 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -24,11 +24,13 @@
24 24
25#include "iscsi_target_core.h" 25#include "iscsi_target_core.h"
26#include "iscsi_target_util.h" 26#include "iscsi_target_util.h"
27#include "iscsi_target_tpg.h"
27#include "iscsi_target_seq_pdu_list.h" 28#include "iscsi_target_seq_pdu_list.h"
28 29
29#define OFFLOAD_BUF_SIZE 32768 30#define OFFLOAD_BUF_SIZE 32768
30 31
31void iscsit_dump_seq_list(struct iscsi_cmd *cmd) 32#ifdef DEBUG
33static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
32{ 34{
33 int i; 35 int i;
34 struct iscsi_seq *seq; 36 struct iscsi_seq *seq;
@@ -46,7 +48,7 @@ void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
46 } 48 }
47} 49}
48 50
49void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) 51static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
50{ 52{
51 int i; 53 int i;
52 struct iscsi_pdu *pdu; 54 struct iscsi_pdu *pdu;
@@ -61,6 +63,10 @@ void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
61 pdu->length, pdu->pdu_send_order, pdu->seq_no); 63 pdu->length, pdu->pdu_send_order, pdu->seq_no);
62 } 64 }
63} 65}
66#else
67static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
68static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
69#endif
64 70
65static void iscsit_ordered_seq_lists( 71static void iscsit_ordered_seq_lists(
66 struct iscsi_cmd *cmd, 72 struct iscsi_cmd *cmd,
@@ -135,11 +141,11 @@ redo:
135 seq_count++; 141 seq_count++;
136 continue; 142 continue;
137 } 143 }
138 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); 144 array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
139 if (!array) { 145 if (!array) {
140 pr_err("Unable to allocate memory" 146 pr_err("Unable to allocate memory"
141 " for random array.\n"); 147 " for random array.\n");
142 return -1; 148 return -ENOMEM;
143 } 149 }
144 iscsit_create_random_array(array, seq_count); 150 iscsit_create_random_array(array, seq_count);
145 151
@@ -155,11 +161,11 @@ redo:
155 } 161 }
156 162
157 if (seq_count) { 163 if (seq_count) {
158 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); 164 array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
159 if (!array) { 165 if (!array) {
160 pr_err("Unable to allocate memory for" 166 pr_err("Unable to allocate memory for"
161 " random array.\n"); 167 " random array.\n");
162 return -1; 168 return -ENOMEM;
163 } 169 }
164 iscsit_create_random_array(array, seq_count); 170 iscsit_create_random_array(array, seq_count);
165 171
@@ -187,10 +193,10 @@ static int iscsit_randomize_seq_lists(
187 if (!seq_count) 193 if (!seq_count)
188 return 0; 194 return 0;
189 195
190 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); 196 array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
191 if (!array) { 197 if (!array) {
192 pr_err("Unable to allocate memory for random array.\n"); 198 pr_err("Unable to allocate memory for random array.\n");
193 return -1; 199 return -ENOMEM;
194 } 200 }
195 iscsit_create_random_array(array, seq_count); 201 iscsit_create_random_array(array, seq_count);
196 202
@@ -221,11 +227,10 @@ static void iscsit_determine_counts_for_list(
221 227
222 if ((bl->type == PDULIST_UNSOLICITED) || 228 if ((bl->type == PDULIST_UNSOLICITED) ||
223 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) 229 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
224 unsolicited_data_length = (cmd->data_length > 230 unsolicited_data_length = min(cmd->se_cmd.data_length,
225 conn->sess->sess_ops->FirstBurstLength) ? 231 conn->sess->sess_ops->FirstBurstLength);
226 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
227 232
228 while (offset < cmd->data_length) { 233 while (offset < cmd->se_cmd.data_length) {
229 *pdu_count += 1; 234 *pdu_count += 1;
230 235
231 if (check_immediate) { 236 if (check_immediate) {
@@ -239,10 +244,10 @@ static void iscsit_determine_counts_for_list(
239 } 244 }
240 if (unsolicited_data_length > 0) { 245 if (unsolicited_data_length > 0) {
241 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) 246 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
242 >= cmd->data_length) { 247 >= cmd->se_cmd.data_length) {
243 unsolicited_data_length -= 248 unsolicited_data_length -=
244 (cmd->data_length - offset); 249 (cmd->se_cmd.data_length - offset);
245 offset += (cmd->data_length - offset); 250 offset += (cmd->se_cmd.data_length - offset);
246 continue; 251 continue;
247 } 252 }
248 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) 253 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
@@ -263,8 +268,8 @@ static void iscsit_determine_counts_for_list(
263 continue; 268 continue;
264 } 269 }
265 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= 270 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
266 cmd->data_length) { 271 cmd->se_cmd.data_length) {
267 offset += (cmd->data_length - offset); 272 offset += (cmd->se_cmd.data_length - offset);
268 continue; 273 continue;
269 } 274 }
270 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= 275 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@@ -283,10 +288,10 @@ static void iscsit_determine_counts_for_list(
283 288
284 289
285/* 290/*
286 * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No 291 * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
287 * and DataPDUInOrder=No. 292 * or DataPDUInOrder=No.
288 */ 293 */
289static int iscsit_build_pdu_and_seq_list( 294static int iscsit_do_build_pdu_and_seq_lists(
290 struct iscsi_cmd *cmd, 295 struct iscsi_cmd *cmd,
291 struct iscsi_build_list *bl) 296 struct iscsi_build_list *bl)
292{ 297{
@@ -306,11 +311,10 @@ static int iscsit_build_pdu_and_seq_list(
306 311
307 if ((bl->type == PDULIST_UNSOLICITED) || 312 if ((bl->type == PDULIST_UNSOLICITED) ||
308 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) 313 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
309 unsolicited_data_length = (cmd->data_length > 314 unsolicited_data_length = min(cmd->se_cmd.data_length,
310 conn->sess->sess_ops->FirstBurstLength) ? 315 conn->sess->sess_ops->FirstBurstLength);
311 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
312 316
313 while (offset < cmd->data_length) { 317 while (offset < cmd->se_cmd.data_length) {
314 pdu_count++; 318 pdu_count++;
315 if (!datapduinorder) { 319 if (!datapduinorder) {
316 pdu[i].offset = offset; 320 pdu[i].offset = offset;
@@ -346,21 +350,21 @@ static int iscsit_build_pdu_and_seq_list(
346 if (unsolicited_data_length > 0) { 350 if (unsolicited_data_length > 0) {
347 if ((offset + 351 if ((offset +
348 conn->conn_ops->MaxRecvDataSegmentLength) >= 352 conn->conn_ops->MaxRecvDataSegmentLength) >=
349 cmd->data_length) { 353 cmd->se_cmd.data_length) {
350 if (!datapduinorder) { 354 if (!datapduinorder) {
351 pdu[i].type = PDUTYPE_UNSOLICITED; 355 pdu[i].type = PDUTYPE_UNSOLICITED;
352 pdu[i].length = 356 pdu[i].length =
353 (cmd->data_length - offset); 357 (cmd->se_cmd.data_length - offset);
354 } 358 }
355 if (!datasequenceinorder) { 359 if (!datasequenceinorder) {
356 seq[seq_no].type = SEQTYPE_UNSOLICITED; 360 seq[seq_no].type = SEQTYPE_UNSOLICITED;
357 seq[seq_no].pdu_count = pdu_count; 361 seq[seq_no].pdu_count = pdu_count;
358 seq[seq_no].xfer_len = (burstlength + 362 seq[seq_no].xfer_len = (burstlength +
359 (cmd->data_length - offset)); 363 (cmd->se_cmd.data_length - offset));
360 } 364 }
361 unsolicited_data_length -= 365 unsolicited_data_length -=
362 (cmd->data_length - offset); 366 (cmd->se_cmd.data_length - offset);
363 offset += (cmd->data_length - offset); 367 offset += (cmd->se_cmd.data_length - offset);
364 continue; 368 continue;
365 } 369 }
366 if ((offset + 370 if ((offset +
@@ -402,18 +406,18 @@ static int iscsit_build_pdu_and_seq_list(
402 continue; 406 continue;
403 } 407 }
404 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= 408 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
405 cmd->data_length) { 409 cmd->se_cmd.data_length) {
406 if (!datapduinorder) { 410 if (!datapduinorder) {
407 pdu[i].type = PDUTYPE_NORMAL; 411 pdu[i].type = PDUTYPE_NORMAL;
408 pdu[i].length = (cmd->data_length - offset); 412 pdu[i].length = (cmd->se_cmd.data_length - offset);
409 } 413 }
410 if (!datasequenceinorder) { 414 if (!datasequenceinorder) {
411 seq[seq_no].type = SEQTYPE_NORMAL; 415 seq[seq_no].type = SEQTYPE_NORMAL;
412 seq[seq_no].pdu_count = pdu_count; 416 seq[seq_no].pdu_count = pdu_count;
413 seq[seq_no].xfer_len = (burstlength + 417 seq[seq_no].xfer_len = (burstlength +
414 (cmd->data_length - offset)); 418 (cmd->se_cmd.data_length - offset));
415 } 419 }
416 offset += (cmd->data_length - offset); 420 offset += (cmd->se_cmd.data_length - offset);
417 continue; 421 continue;
418 } 422 }
419 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= 423 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@@ -464,9 +468,8 @@ static int iscsit_build_pdu_and_seq_list(
464 } else 468 } else
465 iscsit_ordered_seq_lists(cmd, bl->type); 469 iscsit_ordered_seq_lists(cmd, bl->type);
466 } 470 }
467#if 0 471
468 iscsit_dump_seq_list(cmd); 472 iscsit_dump_seq_list(cmd);
469#endif
470 } 473 }
471 if (!datapduinorder) { 474 if (!datapduinorder) {
472 if (bl->data_direction & ISCSI_PDU_WRITE) { 475 if (bl->data_direction & ISCSI_PDU_WRITE) {
@@ -484,50 +487,86 @@ static int iscsit_build_pdu_and_seq_list(
484 } else 487 } else
485 iscsit_ordered_pdu_lists(cmd, bl->type); 488 iscsit_ordered_pdu_lists(cmd, bl->type);
486 } 489 }
487#if 0 490
488 iscsit_dump_pdu_list(cmd); 491 iscsit_dump_pdu_list(cmd);
489#endif
490 } 492 }
491 493
492 return 0; 494 return 0;
493} 495}
494 496
495/* 497int iscsit_build_pdu_and_seq_lists(
496 * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
497 */
498int iscsit_do_build_list(
499 struct iscsi_cmd *cmd, 498 struct iscsi_cmd *cmd,
500 struct iscsi_build_list *bl) 499 u32 immediate_data_length)
501{ 500{
501 struct iscsi_build_list bl;
502 u32 pdu_count = 0, seq_count = 1; 502 u32 pdu_count = 0, seq_count = 1;
503 struct iscsi_conn *conn = cmd->conn; 503 struct iscsi_conn *conn = cmd->conn;
504 struct iscsi_pdu *pdu = NULL; 504 struct iscsi_pdu *pdu = NULL;
505 struct iscsi_seq *seq = NULL; 505 struct iscsi_seq *seq = NULL;
506 506
507 iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count); 507 struct iscsi_session *sess = conn->sess;
508 struct iscsi_node_attrib *na;
509
510 /*
511 * Do nothing if no OOO shenanigans
512 */
513 if (sess->sess_ops->DataSequenceInOrder &&
514 sess->sess_ops->DataPDUInOrder)
515 return 0;
516
517 if (cmd->data_direction == DMA_NONE)
518 return 0;
519
520 na = iscsit_tpg_get_node_attrib(sess);
521 memset(&bl, 0, sizeof(struct iscsi_build_list));
522
523 if (cmd->data_direction == DMA_FROM_DEVICE) {
524 bl.data_direction = ISCSI_PDU_READ;
525 bl.type = PDULIST_NORMAL;
526 if (na->random_datain_pdu_offsets)
527 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
528 if (na->random_datain_seq_offsets)
529 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
530 } else {
531 bl.data_direction = ISCSI_PDU_WRITE;
532 bl.immediate_data_length = immediate_data_length;
533 if (na->random_r2t_offsets)
534 bl.randomize |= RANDOM_R2T_OFFSETS;
535
536 if (!cmd->immediate_data && !cmd->unsolicited_data)
537 bl.type = PDULIST_NORMAL;
538 else if (cmd->immediate_data && !cmd->unsolicited_data)
539 bl.type = PDULIST_IMMEDIATE;
540 else if (!cmd->immediate_data && cmd->unsolicited_data)
541 bl.type = PDULIST_UNSOLICITED;
542 else if (cmd->immediate_data && cmd->unsolicited_data)
543 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
544 }
545
546 iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
508 547
509 if (!conn->sess->sess_ops->DataSequenceInOrder) { 548 if (!conn->sess->sess_ops->DataSequenceInOrder) {
510 seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC); 549 seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
511 if (!seq) { 550 if (!seq) {
512 pr_err("Unable to allocate struct iscsi_seq list\n"); 551 pr_err("Unable to allocate struct iscsi_seq list\n");
513 return -1; 552 return -ENOMEM;
514 } 553 }
515 cmd->seq_list = seq; 554 cmd->seq_list = seq;
516 cmd->seq_count = seq_count; 555 cmd->seq_count = seq_count;
517 } 556 }
518 557
519 if (!conn->sess->sess_ops->DataPDUInOrder) { 558 if (!conn->sess->sess_ops->DataPDUInOrder) {
520 pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC); 559 pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
521 if (!pdu) { 560 if (!pdu) {
522 pr_err("Unable to allocate struct iscsi_pdu list.\n"); 561 pr_err("Unable to allocate struct iscsi_pdu list.\n");
523 kfree(seq); 562 kfree(seq);
524 return -1; 563 return -ENOMEM;
525 } 564 }
526 cmd->pdu_list = pdu; 565 cmd->pdu_list = pdu;
527 cmd->pdu_count = pdu_count; 566 cmd->pdu_count = pdu_count;
528 } 567 }
529 568
530 return iscsit_build_pdu_and_seq_list(cmd, bl); 569 return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
531} 570}
532 571
533struct iscsi_pdu *iscsit_get_pdu_holder( 572struct iscsi_pdu *iscsit_get_pdu_holder(
@@ -572,13 +611,12 @@ redo:
572 pdu = &cmd->pdu_list[cmd->pdu_start]; 611 pdu = &cmd->pdu_list[cmd->pdu_start];
573 612
574 for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) { 613 for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
575#if 0
576 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu" 614 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
577 "_send_order: %d, pdu[i].offset: %d," 615 "_send_order: %d, pdu[i].offset: %d,"
578 " pdu[i].length: %d\n", pdu[i].seq_no, 616 " pdu[i].length: %d\n", pdu[i].seq_no,
579 pdu[i].pdu_send_order, pdu[i].offset, 617 pdu[i].pdu_send_order, pdu[i].offset,
580 pdu[i].length); 618 pdu[i].length);
581#endif 619
582 if (pdu[i].pdu_send_order == cmd->pdu_send_order) { 620 if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
583 cmd->pdu_send_order++; 621 cmd->pdu_send_order++;
584 return &pdu[i]; 622 return &pdu[i];
@@ -601,11 +639,11 @@ redo:
601 pr_err("struct iscsi_seq is NULL!\n"); 639 pr_err("struct iscsi_seq is NULL!\n");
602 return NULL; 640 return NULL;
603 } 641 }
604#if 0 642
605 pr_debug("seq->pdu_start: %d, seq->pdu_count: %d," 643 pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
606 " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count, 644 " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
607 seq->seq_no); 645 seq->seq_no);
608#endif 646
609 pdu = &cmd->pdu_list[seq->pdu_start]; 647 pdu = &cmd->pdu_list[seq->pdu_start];
610 648
611 if (seq->pdu_send_order == seq->pdu_count) { 649 if (seq->pdu_send_order == seq->pdu_count) {
@@ -645,12 +683,11 @@ struct iscsi_seq *iscsit_get_seq_holder(
645 } 683 }
646 684
647 for (i = 0; i < cmd->seq_count; i++) { 685 for (i = 0; i < cmd->seq_count; i++) {
648#if 0
649 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]." 686 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
650 "xfer_len: %d, seq_list[i].seq_no %u\n", 687 "xfer_len: %d, seq_list[i].seq_no %u\n",
651 cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len, 688 cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
652 cmd->seq_list[i].seq_no); 689 cmd->seq_list[i].seq_no);
653#endif 690
654 if ((cmd->seq_list[i].orig_offset + 691 if ((cmd->seq_list[i].orig_offset +
655 cmd->seq_list[i].xfer_len) >= 692 cmd->seq_list[i].xfer_len) >=
656 (offset + length)) 693 (offset + length))
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index 0d52a10e3069..d5b153751a8d 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -78,7 +78,7 @@ struct iscsi_seq {
78 u32 xfer_len; 78 u32 xfer_len;
79} ____cacheline_aligned; 79} ____cacheline_aligned;
80 80
81extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *); 81extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
82extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32); 82extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
83extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *); 83extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
84extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32); 84extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index e01da9d2b37e..f4e640b51fd1 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -78,10 +78,7 @@ int iscsit_tmr_task_warm_reset(
78{ 78{
79 struct iscsi_session *sess = conn->sess; 79 struct iscsi_session *sess = conn->sess;
80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
81#if 0 81
82 struct iscsi_init_task_mgt_cmnd *hdr =
83 (struct iscsi_init_task_mgt_cmnd *) buf;
84#endif
85 if (!na->tmr_warm_reset) { 82 if (!na->tmr_warm_reset) {
86 pr_err("TMR Opcode TARGET_WARM_RESET authorization" 83 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
87 " failed for Initiator Node: %s\n", 84 " failed for Initiator Node: %s\n",
@@ -216,7 +213,7 @@ static int iscsit_task_reassign_complete_nop_out(
216 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); 213 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
217 214
218 spin_lock_bh(&conn->cmd_lock); 215 spin_lock_bh(&conn->cmd_lock);
219 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 216 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
220 spin_unlock_bh(&conn->cmd_lock); 217 spin_unlock_bh(&conn->cmd_lock);
221 218
222 cmd->i_state = ISTATE_SEND_NOPIN; 219 cmd->i_state = ISTATE_SEND_NOPIN;
@@ -272,9 +269,9 @@ static int iscsit_task_reassign_complete_write(
272 offset = cmd->next_burst_len = cmd->write_data_done; 269 offset = cmd->next_burst_len = cmd->write_data_done;
273 270
274 if ((conn->sess->sess_ops->FirstBurstLength - offset) >= 271 if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
275 cmd->data_length) { 272 cmd->se_cmd.data_length) {
276 no_build_r2ts = 1; 273 no_build_r2ts = 1;
277 length = (cmd->data_length - offset); 274 length = (cmd->se_cmd.data_length - offset);
278 } else 275 } else
279 length = (conn->sess->sess_ops->FirstBurstLength - offset); 276 length = (conn->sess->sess_ops->FirstBurstLength - offset);
280 277
@@ -292,7 +289,7 @@ static int iscsit_task_reassign_complete_write(
292 /* 289 /*
293 * iscsit_build_r2ts_for_cmd() can handle the rest from here. 290 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
294 */ 291 */
295 return iscsit_build_r2ts_for_cmd(cmd, conn, 2); 292 return iscsit_build_r2ts_for_cmd(cmd, conn, true);
296} 293}
297 294
298static int iscsit_task_reassign_complete_read( 295static int iscsit_task_reassign_complete_read(
@@ -385,7 +382,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
385 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); 382 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
386 383
387 spin_lock_bh(&conn->cmd_lock); 384 spin_lock_bh(&conn->cmd_lock);
388 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 385 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
389 spin_unlock_bh(&conn->cmd_lock); 386 spin_unlock_bh(&conn->cmd_lock);
390 387
391 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 388 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4eba86d2bd82..b42cdeb153df 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -163,7 +163,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
163 } 163 }
164 164
165 cmd->conn = conn; 165 cmd->conn = conn;
166 INIT_LIST_HEAD(&cmd->i_list); 166 INIT_LIST_HEAD(&cmd->i_conn_node);
167 INIT_LIST_HEAD(&cmd->datain_list); 167 INIT_LIST_HEAD(&cmd->datain_list);
168 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 168 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
169 init_completion(&cmd->reject_comp); 169 init_completion(&cmd->reject_comp);
@@ -176,174 +176,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
176 return cmd; 176 return cmd;
177} 177}
178 178
179/*
180 * Called from iscsi_handle_scsi_cmd()
181 */
182struct iscsi_cmd *iscsit_allocate_se_cmd(
183 struct iscsi_conn *conn,
184 u32 data_length,
185 int data_direction,
186 int iscsi_task_attr)
187{
188 struct iscsi_cmd *cmd;
189 struct se_cmd *se_cmd;
190 int sam_task_attr;
191
192 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
193 if (!cmd)
194 return NULL;
195
196 cmd->data_direction = data_direction;
197 cmd->data_length = data_length;
198 /*
199 * Figure out the SAM Task Attribute for the incoming SCSI CDB
200 */
201 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
202 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
203 sam_task_attr = MSG_SIMPLE_TAG;
204 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
205 sam_task_attr = MSG_ORDERED_TAG;
206 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
207 sam_task_attr = MSG_HEAD_TAG;
208 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
209 sam_task_attr = MSG_ACA_TAG;
210 else {
211 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
212 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
213 sam_task_attr = MSG_SIMPLE_TAG;
214 }
215
216 se_cmd = &cmd->se_cmd;
217 /*
218 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
219 */
220 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
221 conn->sess->se_sess, data_length, data_direction,
222 sam_task_attr, &cmd->sense_buffer[0]);
223 return cmd;
224}
225
226struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
227 struct iscsi_conn *conn,
228 u8 function)
229{
230 struct iscsi_cmd *cmd;
231 struct se_cmd *se_cmd;
232 int rc;
233 u8 tcm_function;
234
235 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
236 if (!cmd)
237 return NULL;
238
239 cmd->data_direction = DMA_NONE;
240
241 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
242 if (!cmd->tmr_req) {
243 pr_err("Unable to allocate memory for"
244 " Task Management command!\n");
245 goto out;
246 }
247 /*
248 * TASK_REASSIGN for ERL=2 / connection stays inside of
249 * LIO-Target $FABRIC_MOD
250 */
251 if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
252 return cmd;
253
254 se_cmd = &cmd->se_cmd;
255 /*
256 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
257 */
258 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
259 conn->sess->se_sess, 0, DMA_NONE,
260 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
261
262 switch (function) {
263 case ISCSI_TM_FUNC_ABORT_TASK:
264 tcm_function = TMR_ABORT_TASK;
265 break;
266 case ISCSI_TM_FUNC_ABORT_TASK_SET:
267 tcm_function = TMR_ABORT_TASK_SET;
268 break;
269 case ISCSI_TM_FUNC_CLEAR_ACA:
270 tcm_function = TMR_CLEAR_ACA;
271 break;
272 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
273 tcm_function = TMR_CLEAR_TASK_SET;
274 break;
275 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
276 tcm_function = TMR_LUN_RESET;
277 break;
278 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
279 tcm_function = TMR_TARGET_WARM_RESET;
280 break;
281 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
282 tcm_function = TMR_TARGET_COLD_RESET;
283 break;
284 default:
285 pr_err("Unknown iSCSI TMR Function:"
286 " 0x%02x\n", function);
287 goto out;
288 }
289
290 rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
291 if (rc < 0)
292 goto out;
293
294 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
295
296 return cmd;
297out:
298 iscsit_release_cmd(cmd);
299 return NULL;
300}
301
302int iscsit_decide_list_to_build(
303 struct iscsi_cmd *cmd,
304 u32 immediate_data_length)
305{
306 struct iscsi_build_list bl;
307 struct iscsi_conn *conn = cmd->conn;
308 struct iscsi_session *sess = conn->sess;
309 struct iscsi_node_attrib *na;
310
311 if (sess->sess_ops->DataSequenceInOrder &&
312 sess->sess_ops->DataPDUInOrder)
313 return 0;
314
315 if (cmd->data_direction == DMA_NONE)
316 return 0;
317
318 na = iscsit_tpg_get_node_attrib(sess);
319 memset(&bl, 0, sizeof(struct iscsi_build_list));
320
321 if (cmd->data_direction == DMA_FROM_DEVICE) {
322 bl.data_direction = ISCSI_PDU_READ;
323 bl.type = PDULIST_NORMAL;
324 if (na->random_datain_pdu_offsets)
325 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
326 if (na->random_datain_seq_offsets)
327 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
328 } else {
329 bl.data_direction = ISCSI_PDU_WRITE;
330 bl.immediate_data_length = immediate_data_length;
331 if (na->random_r2t_offsets)
332 bl.randomize |= RANDOM_R2T_OFFSETS;
333
334 if (!cmd->immediate_data && !cmd->unsolicited_data)
335 bl.type = PDULIST_NORMAL;
336 else if (cmd->immediate_data && !cmd->unsolicited_data)
337 bl.type = PDULIST_IMMEDIATE;
338 else if (!cmd->immediate_data && cmd->unsolicited_data)
339 bl.type = PDULIST_UNSOLICITED;
340 else if (cmd->immediate_data && cmd->unsolicited_data)
341 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
342 }
343
344 return iscsit_do_build_list(cmd, &bl);
345}
346
347struct iscsi_seq *iscsit_get_seq_holder_for_datain( 179struct iscsi_seq *iscsit_get_seq_holder_for_datain(
348 struct iscsi_cmd *cmd, 180 struct iscsi_cmd *cmd,
349 u32 seq_send_order) 181 u32 seq_send_order)
@@ -502,14 +334,14 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
502 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 334 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
503 return 0; 335 return 0;
504 336
505 if (((cmd->first_burst_len + payload_length) != cmd->data_length) && 337 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
506 ((cmd->first_burst_len + payload_length) != 338 ((cmd->first_burst_len + payload_length) !=
507 conn->sess->sess_ops->FirstBurstLength)) { 339 conn->sess->sess_ops->FirstBurstLength)) {
508 pr_err("Unsolicited non-immediate data received %u" 340 pr_err("Unsolicited non-immediate data received %u"
509 " does not equal FirstBurstLength: %u, and does" 341 " does not equal FirstBurstLength: %u, and does"
510 " not equal ExpXferLen %u.\n", 342 " not equal ExpXferLen %u.\n",
511 (cmd->first_burst_len + payload_length), 343 (cmd->first_burst_len + payload_length),
512 conn->sess->sess_ops->FirstBurstLength, cmd->data_length); 344 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
513 transport_send_check_condition_and_sense(se_cmd, 345 transport_send_check_condition_and_sense(se_cmd,
514 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 346 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
515 return -1; 347 return -1;
@@ -524,7 +356,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
524 struct iscsi_cmd *cmd; 356 struct iscsi_cmd *cmd;
525 357
526 spin_lock_bh(&conn->cmd_lock); 358 spin_lock_bh(&conn->cmd_lock);
527 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 359 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
528 if (cmd->init_task_tag == init_task_tag) { 360 if (cmd->init_task_tag == init_task_tag) {
529 spin_unlock_bh(&conn->cmd_lock); 361 spin_unlock_bh(&conn->cmd_lock);
530 return cmd; 362 return cmd;
@@ -545,7 +377,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
545 struct iscsi_cmd *cmd; 377 struct iscsi_cmd *cmd;
546 378
547 spin_lock_bh(&conn->cmd_lock); 379 spin_lock_bh(&conn->cmd_lock);
548 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 380 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
549 if (cmd->init_task_tag == init_task_tag) { 381 if (cmd->init_task_tag == init_task_tag) {
550 spin_unlock_bh(&conn->cmd_lock); 382 spin_unlock_bh(&conn->cmd_lock);
551 return cmd; 383 return cmd;
@@ -568,7 +400,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt(
568 struct iscsi_cmd *cmd = NULL; 400 struct iscsi_cmd *cmd = NULL;
569 401
570 spin_lock_bh(&conn->cmd_lock); 402 spin_lock_bh(&conn->cmd_lock);
571 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 403 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
572 if (cmd->targ_xfer_tag == targ_xfer_tag) { 404 if (cmd->targ_xfer_tag == targ_xfer_tag) {
573 spin_unlock_bh(&conn->cmd_lock); 405 spin_unlock_bh(&conn->cmd_lock);
574 return cmd; 406 return cmd;
@@ -596,7 +428,7 @@ int iscsit_find_cmd_for_recovery(
596 spin_lock(&sess->cr_i_lock); 428 spin_lock(&sess->cr_i_lock);
597 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 429 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
598 spin_lock(&cr->conn_recovery_cmd_lock); 430 spin_lock(&cr->conn_recovery_cmd_lock);
599 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 431 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
600 if (cmd->init_task_tag == init_task_tag) { 432 if (cmd->init_task_tag == init_task_tag) {
601 spin_unlock(&cr->conn_recovery_cmd_lock); 433 spin_unlock(&cr->conn_recovery_cmd_lock);
602 spin_unlock(&sess->cr_i_lock); 434 spin_unlock(&sess->cr_i_lock);
@@ -616,7 +448,7 @@ int iscsit_find_cmd_for_recovery(
616 spin_lock(&sess->cr_a_lock); 448 spin_lock(&sess->cr_a_lock);
617 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 449 list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
618 spin_lock(&cr->conn_recovery_cmd_lock); 450 spin_lock(&cr->conn_recovery_cmd_lock);
619 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 451 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
620 if (cmd->init_task_tag == init_task_tag) { 452 if (cmd->init_task_tag == init_task_tag) {
621 spin_unlock(&cr->conn_recovery_cmd_lock); 453 spin_unlock(&cr->conn_recovery_cmd_lock);
622 spin_unlock(&sess->cr_a_lock); 454 spin_unlock(&sess->cr_a_lock);
@@ -813,7 +645,6 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
813void iscsit_release_cmd(struct iscsi_cmd *cmd) 645void iscsit_release_cmd(struct iscsi_cmd *cmd)
814{ 646{
815 struct iscsi_conn *conn = cmd->conn; 647 struct iscsi_conn *conn = cmd->conn;
816 int i;
817 648
818 iscsit_free_r2ts_from_list(cmd); 649 iscsit_free_r2ts_from_list(cmd);
819 iscsit_free_all_datain_reqs(cmd); 650 iscsit_free_all_datain_reqs(cmd);
@@ -824,11 +655,6 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
824 kfree(cmd->tmr_req); 655 kfree(cmd->tmr_req);
825 kfree(cmd->iov_data); 656 kfree(cmd->iov_data);
826 657
827 for (i = 0; i < cmd->t_mem_sg_nents; i++)
828 __free_page(sg_page(&cmd->t_mem_sg[i]));
829
830 kfree(cmd->t_mem_sg);
831
832 if (conn) { 658 if (conn) {
833 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 659 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
834 iscsit_remove_cmd_from_response_queue(cmd, conn); 660 iscsit_remove_cmd_from_response_queue(cmd, conn);
@@ -1038,7 +864,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1038 spin_unlock_bh(&conn->sess->ttt_lock); 864 spin_unlock_bh(&conn->sess->ttt_lock);
1039 865
1040 spin_lock_bh(&conn->cmd_lock); 866 spin_lock_bh(&conn->cmd_lock);
1041 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 867 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1042 spin_unlock_bh(&conn->cmd_lock); 868 spin_unlock_bh(&conn->cmd_lock);
1043 869
1044 if (want_response) 870 if (want_response)
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 835bf7de0281..e1c729b8a1c5 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,9 +9,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
11extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 11extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
13extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
14extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
15extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 12extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
16extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 13extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
17extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); 14extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a9b4eeefe9fc..38dfac2b0a1c 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -213,7 +213,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
213 * associated read buffers, go ahead and do that here for type 213 * associated read buffers, go ahead and do that here for type
214 * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently 214 * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
215 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB 215 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
216 * by target core in transport_generic_allocate_tasks() -> 216 * by target core in target_setup_cmd_from_cdb() ->
217 * transport_generic_cmd_sequencer(). 217 * transport_generic_cmd_sequencer().
218 */ 218 */
219 if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && 219 if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
@@ -227,7 +227,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
227 } 227 }
228 } 228 }
229 229
230 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); 230 ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
231 if (ret == -ENOMEM) { 231 if (ret == -ENOMEM) {
232 transport_send_check_condition_and_sense(se_cmd, 232 transport_send_check_condition_and_sense(se_cmd,
233 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 233 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c7746a3339d4..e624b836469c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -59,26 +59,31 @@ struct t10_alua_lu_gp *default_lu_gp;
59 * 59 *
60 * See spc4r17 section 6.27 60 * See spc4r17 section 6.27
61 */ 61 */
62int target_emulate_report_target_port_groups(struct se_task *task) 62int target_emulate_report_target_port_groups(struct se_cmd *cmd)
63{ 63{
64 struct se_cmd *cmd = task->task_se_cmd;
65 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 64 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
66 struct se_port *port; 65 struct se_port *port;
67 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
68 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
69 unsigned char *buf; 68 unsigned char *buf;
70 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first 69 u32 rd_len = 0, off;
71 Target port group descriptor */ 70 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
72 /* 71 /*
73 * Need at least 4 bytes of response data or else we can't 72 * Skip over RESERVED area to first Target port group descriptor
74 * even fit the return data length. 73 * depending on the PARAMETER DATA FORMAT type..
75 */ 74 */
76 if (cmd->data_length < 4) { 75 if (ext_hdr != 0)
77 pr_warn("REPORT TARGET PORT GROUPS allocation length %u" 76 off = 8;
78 " too small\n", cmd->data_length); 77 else
78 off = 4;
79
80 if (cmd->data_length < off) {
81 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
82 " small for %s header\n", cmd->data_length,
83 (ext_hdr) ? "extended" : "normal");
84 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
79 return -EINVAL; 85 return -EINVAL;
80 } 86 }
81
82 buf = transport_kmap_data_sg(cmd); 87 buf = transport_kmap_data_sg(cmd);
83 88
84 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 89 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@@ -159,15 +164,34 @@ int target_emulate_report_target_port_groups(struct se_task *task)
159 /* 164 /*
160 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 165 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
161 */ 166 */
162 buf[0] = ((rd_len >> 24) & 0xff); 167 put_unaligned_be32(rd_len, &buf[0]);
163 buf[1] = ((rd_len >> 16) & 0xff);
164 buf[2] = ((rd_len >> 8) & 0xff);
165 buf[3] = (rd_len & 0xff);
166 168
169 /*
170 * Fill in the Extended header parameter data format if requested
171 */
172 if (ext_hdr != 0) {
173 buf[4] = 0x10;
174 /*
175 * Set the implict transition time (in seconds) for the application
176 * client to use as a base for it's transition timeout value.
177 *
178 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
179 * this CDB was received upon to determine this value individually
180 * for ALUA target port group.
181 */
182 port = cmd->se_lun->lun_sep;
183 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
184 if (tg_pt_gp_mem) {
185 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
186 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
187 if (tg_pt_gp)
188 buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
189 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
190 }
191 }
167 transport_kunmap_data_sg(cmd); 192 transport_kunmap_data_sg(cmd);
168 193
169 task->task_scsi_status = GOOD; 194 target_complete_cmd(cmd, GOOD);
170 transport_complete_task(task, 1);
171 return 0; 195 return 0;
172} 196}
173 197
@@ -176,9 +200,8 @@ int target_emulate_report_target_port_groups(struct se_task *task)
176 * 200 *
177 * See spc4r17 section 6.35 201 * See spc4r17 section 6.35
178 */ 202 */
179int target_emulate_set_target_port_groups(struct se_task *task) 203int target_emulate_set_target_port_groups(struct se_cmd *cmd)
180{ 204{
181 struct se_cmd *cmd = task->task_se_cmd;
182 struct se_device *dev = cmd->se_dev; 205 struct se_device *dev = cmd->se_dev;
183 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 206 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
184 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 207 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@@ -351,8 +374,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
351 374
352out: 375out:
353 transport_kunmap_data_sg(cmd); 376 transport_kunmap_data_sg(cmd);
354 task->task_scsi_status = GOOD; 377 target_complete_cmd(cmd, GOOD);
355 transport_complete_task(task, 1);
356 return 0; 378 return 0;
357} 379}
358 380
@@ -391,7 +413,7 @@ static inline int core_alua_state_standby(
391 case RECEIVE_DIAGNOSTIC: 413 case RECEIVE_DIAGNOSTIC:
392 case SEND_DIAGNOSTIC: 414 case SEND_DIAGNOSTIC:
393 case MAINTENANCE_IN: 415 case MAINTENANCE_IN:
394 switch (cdb[1]) { 416 switch (cdb[1] & 0x1f) {
395 case MI_REPORT_TARGET_PGS: 417 case MI_REPORT_TARGET_PGS:
396 return 0; 418 return 0;
397 default: 419 default:
@@ -433,7 +455,7 @@ static inline int core_alua_state_unavailable(
433 case INQUIRY: 455 case INQUIRY:
434 case REPORT_LUNS: 456 case REPORT_LUNS:
435 case MAINTENANCE_IN: 457 case MAINTENANCE_IN:
436 switch (cdb[1]) { 458 switch (cdb[1] & 0x1f) {
437 case MI_REPORT_TARGET_PGS: 459 case MI_REPORT_TARGET_PGS:
438 return 0; 460 return 0;
439 default: 461 default:
@@ -473,7 +495,7 @@ static inline int core_alua_state_transition(
473 case INQUIRY: 495 case INQUIRY:
474 case REPORT_LUNS: 496 case REPORT_LUNS:
475 case MAINTENANCE_IN: 497 case MAINTENANCE_IN:
476 switch (cdb[1]) { 498 switch (cdb[1] & 0x1f) {
477 case MI_REPORT_TARGET_PGS: 499 case MI_REPORT_TARGET_PGS:
478 return 0; 500 return 0;
479 default: 501 default:
@@ -1359,6 +1381,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1359 */ 1381 */
1360 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1382 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1361 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1383 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1384 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
1362 1385
1363 if (def_group) { 1386 if (def_group) {
1364 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1387 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@@ -1855,6 +1878,37 @@ ssize_t core_alua_store_trans_delay_msecs(
1855 return count; 1878 return count;
1856} 1879}
1857 1880
1881ssize_t core_alua_show_implict_trans_secs(
1882 struct t10_alua_tg_pt_gp *tg_pt_gp,
1883 char *page)
1884{
1885 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
1886}
1887
1888ssize_t core_alua_store_implict_trans_secs(
1889 struct t10_alua_tg_pt_gp *tg_pt_gp,
1890 const char *page,
1891 size_t count)
1892{
1893 unsigned long tmp;
1894 int ret;
1895
1896 ret = strict_strtoul(page, 0, &tmp);
1897 if (ret < 0) {
1898 pr_err("Unable to extract implict_trans_secs\n");
1899 return -EINVAL;
1900 }
1901 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
1902 pr_err("Passed implict_trans_secs: %lu, exceeds"
1903 " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
1904 ALUA_MAX_IMPLICT_TRANS_SECS);
1905 return -EINVAL;
1906 }
1907 tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
1908
1909 return count;
1910}
1911
1858ssize_t core_alua_show_preferred_bit( 1912ssize_t core_alua_show_preferred_bit(
1859 struct t10_alua_tg_pt_gp *tg_pt_gp, 1913 struct t10_alua_tg_pt_gp *tg_pt_gp,
1860 char *page) 1914 char *page)
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c5b4ecd3e745..f920c170d47b 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -52,6 +52,12 @@
52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ 53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
54/* 54/*
55 * Used for the recommended application client implict transition timeout
56 * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
57 */
58#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
59#define ALUA_MAX_IMPLICT_TRANS_SECS 255
60/*
55 * Used by core_alua_update_tpg_primary_metadata() and 61 * Used by core_alua_update_tpg_primary_metadata() and
56 * core_alua_update_tpg_secondary_metadata() 62 * core_alua_update_tpg_secondary_metadata()
57 */ 63 */
@@ -66,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
66extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 72extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
67extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 73extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 74
69extern int target_emulate_report_target_port_groups(struct se_task *); 75extern int target_emulate_report_target_port_groups(struct se_cmd *);
70extern int target_emulate_set_target_port_groups(struct se_task *); 76extern int target_emulate_set_target_port_groups(struct se_cmd *);
71extern int core_alua_check_nonop_delay(struct se_cmd *); 77extern int core_alua_check_nonop_delay(struct se_cmd *);
72extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 78extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
73 struct se_device *, struct se_port *, 79 struct se_device *, struct se_port *,
@@ -107,6 +113,10 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
107 char *); 113 char *);
108extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, 114extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
109 const char *, size_t); 115 const char *, size_t);
116extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
117 char *);
118extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
119 const char *, size_t);
110extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, 120extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
111 char *); 121 char *);
112extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, 122extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 30a67707036f..9888693a18fe 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -432,6 +432,7 @@ static int
432target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 432target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
433{ 433{
434 struct se_device *dev = cmd->se_dev; 434 struct se_device *dev = cmd->se_dev;
435 u32 max_sectors;
435 int have_tp = 0; 436 int have_tp = 0;
436 437
437 /* 438 /*
@@ -456,7 +457,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
456 /* 457 /*
457 * Set MAXIMUM TRANSFER LENGTH 458 * Set MAXIMUM TRANSFER LENGTH
458 */ 459 */
459 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]); 460 max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
461 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
462 put_unaligned_be32(max_sectors, &buf[8]);
460 463
461 /* 464 /*
462 * Set OPTIMAL TRANSFER LENGTH 465 * Set OPTIMAL TRANSFER LENGTH
@@ -598,9 +601,8 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
598 return 0; 601 return 0;
599} 602}
600 603
601int target_emulate_inquiry(struct se_task *task) 604int target_emulate_inquiry(struct se_cmd *cmd)
602{ 605{
603 struct se_cmd *cmd = task->task_se_cmd;
604 struct se_device *dev = cmd->se_dev; 606 struct se_device *dev = cmd->se_dev;
605 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 607 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
606 unsigned char *buf, *map_buf; 608 unsigned char *buf, *map_buf;
@@ -664,16 +666,13 @@ out:
664 } 666 }
665 transport_kunmap_data_sg(cmd); 667 transport_kunmap_data_sg(cmd);
666 668
667 if (!ret) { 669 if (!ret)
668 task->task_scsi_status = GOOD; 670 target_complete_cmd(cmd, GOOD);
669 transport_complete_task(task, 1);
670 }
671 return ret; 671 return ret;
672} 672}
673 673
674int target_emulate_readcapacity(struct se_task *task) 674int target_emulate_readcapacity(struct se_cmd *cmd)
675{ 675{
676 struct se_cmd *cmd = task->task_se_cmd;
677 struct se_device *dev = cmd->se_dev; 676 struct se_device *dev = cmd->se_dev;
678 unsigned char *buf; 677 unsigned char *buf;
679 unsigned long long blocks_long = dev->transport->get_blocks(dev); 678 unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -697,14 +696,12 @@ int target_emulate_readcapacity(struct se_task *task)
697 696
698 transport_kunmap_data_sg(cmd); 697 transport_kunmap_data_sg(cmd);
699 698
700 task->task_scsi_status = GOOD; 699 target_complete_cmd(cmd, GOOD);
701 transport_complete_task(task, 1);
702 return 0; 700 return 0;
703} 701}
704 702
705int target_emulate_readcapacity_16(struct se_task *task) 703int target_emulate_readcapacity_16(struct se_cmd *cmd)
706{ 704{
707 struct se_cmd *cmd = task->task_se_cmd;
708 struct se_device *dev = cmd->se_dev; 705 struct se_device *dev = cmd->se_dev;
709 unsigned char *buf; 706 unsigned char *buf;
710 unsigned long long blocks = dev->transport->get_blocks(dev); 707 unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -732,8 +729,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
732 729
733 transport_kunmap_data_sg(cmd); 730 transport_kunmap_data_sg(cmd);
734 731
735 task->task_scsi_status = GOOD; 732 target_complete_cmd(cmd, GOOD);
736 transport_complete_task(task, 1);
737 return 0; 733 return 0;
738} 734}
739 735
@@ -872,9 +868,8 @@ target_modesense_dpofua(unsigned char *buf, int type)
872 } 868 }
873} 869}
874 870
875int target_emulate_modesense(struct se_task *task) 871int target_emulate_modesense(struct se_cmd *cmd)
876{ 872{
877 struct se_cmd *cmd = task->task_se_cmd;
878 struct se_device *dev = cmd->se_dev; 873 struct se_device *dev = cmd->se_dev;
879 char *cdb = cmd->t_task_cdb; 874 char *cdb = cmd->t_task_cdb;
880 unsigned char *rbuf; 875 unsigned char *rbuf;
@@ -947,14 +942,12 @@ int target_emulate_modesense(struct se_task *task)
947 memcpy(rbuf, buf, offset); 942 memcpy(rbuf, buf, offset);
948 transport_kunmap_data_sg(cmd); 943 transport_kunmap_data_sg(cmd);
949 944
950 task->task_scsi_status = GOOD; 945 target_complete_cmd(cmd, GOOD);
951 transport_complete_task(task, 1);
952 return 0; 946 return 0;
953} 947}
954 948
955int target_emulate_request_sense(struct se_task *task) 949int target_emulate_request_sense(struct se_cmd *cmd)
956{ 950{
957 struct se_cmd *cmd = task->task_se_cmd;
958 unsigned char *cdb = cmd->t_task_cdb; 951 unsigned char *cdb = cmd->t_task_cdb;
959 unsigned char *buf; 952 unsigned char *buf;
960 u8 ua_asc = 0, ua_ascq = 0; 953 u8 ua_asc = 0, ua_ascq = 0;
@@ -1008,8 +1001,7 @@ int target_emulate_request_sense(struct se_task *task)
1008 1001
1009end: 1002end:
1010 transport_kunmap_data_sg(cmd); 1003 transport_kunmap_data_sg(cmd);
1011 task->task_scsi_status = GOOD; 1004 target_complete_cmd(cmd, GOOD);
1012 transport_complete_task(task, 1);
1013 return 0; 1005 return 0;
1014} 1006}
1015 1007
@@ -1017,9 +1009,8 @@ end:
1017 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1009 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1018 * Note this is not used for TCM/pSCSI passthrough 1010 * Note this is not used for TCM/pSCSI passthrough
1019 */ 1011 */
1020int target_emulate_unmap(struct se_task *task) 1012int target_emulate_unmap(struct se_cmd *cmd)
1021{ 1013{
1022 struct se_cmd *cmd = task->task_se_cmd;
1023 struct se_device *dev = cmd->se_dev; 1014 struct se_device *dev = cmd->se_dev;
1024 unsigned char *buf, *ptr = NULL; 1015 unsigned char *buf, *ptr = NULL;
1025 unsigned char *cdb = &cmd->t_task_cdb[0]; 1016 unsigned char *cdb = &cmd->t_task_cdb[0];
@@ -1066,10 +1057,8 @@ int target_emulate_unmap(struct se_task *task)
1066 1057
1067err: 1058err:
1068 transport_kunmap_data_sg(cmd); 1059 transport_kunmap_data_sg(cmd);
1069 if (!ret) { 1060 if (!ret)
1070 task->task_scsi_status = GOOD; 1061 target_complete_cmd(cmd, GOOD);
1071 transport_complete_task(task, 1);
1072 }
1073 return ret; 1062 return ret;
1074} 1063}
1075 1064
@@ -1077,9 +1066,8 @@ err:
1077 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1066 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1078 * Note this is not used for TCM/pSCSI passthrough 1067 * Note this is not used for TCM/pSCSI passthrough
1079 */ 1068 */
1080int target_emulate_write_same(struct se_task *task) 1069int target_emulate_write_same(struct se_cmd *cmd)
1081{ 1070{
1082 struct se_cmd *cmd = task->task_se_cmd;
1083 struct se_device *dev = cmd->se_dev; 1071 struct se_device *dev = cmd->se_dev;
1084 sector_t range; 1072 sector_t range;
1085 sector_t lba = cmd->t_task_lba; 1073 sector_t lba = cmd->t_task_lba;
@@ -1118,79 +1106,25 @@ int target_emulate_write_same(struct se_task *task)
1118 return ret; 1106 return ret;
1119 } 1107 }
1120 1108
1121 task->task_scsi_status = GOOD; 1109 target_complete_cmd(cmd, GOOD);
1122 transport_complete_task(task, 1);
1123 return 0; 1110 return 0;
1124} 1111}
1125 1112
1126int target_emulate_synchronize_cache(struct se_task *task) 1113int target_emulate_synchronize_cache(struct se_cmd *cmd)
1127{ 1114{
1128 struct se_device *dev = task->task_se_cmd->se_dev; 1115 if (!cmd->se_dev->transport->do_sync_cache) {
1129 struct se_cmd *cmd = task->task_se_cmd;
1130
1131 if (!dev->transport->do_sync_cache) {
1132 pr_err("SYNCHRONIZE_CACHE emulation not supported" 1116 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1133 " for: %s\n", dev->transport->name); 1117 " for: %s\n", cmd->se_dev->transport->name);
1134 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1118 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1135 return -ENOSYS; 1119 return -ENOSYS;
1136 } 1120 }
1137 1121
1138 dev->transport->do_sync_cache(task); 1122 cmd->se_dev->transport->do_sync_cache(cmd);
1139 return 0; 1123 return 0;
1140} 1124}
1141 1125
1142int target_emulate_noop(struct se_task *task) 1126int target_emulate_noop(struct se_cmd *cmd)
1143{ 1127{
1144 task->task_scsi_status = GOOD; 1128 target_complete_cmd(cmd, GOOD);
1145 transport_complete_task(task, 1);
1146 return 0; 1129 return 0;
1147} 1130}
1148
1149/*
1150 * Write a CDB into @cdb that is based on the one the intiator sent us,
1151 * but updated to only cover the sectors that the current task handles.
1152 */
1153void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
1154{
1155 struct se_cmd *cmd = task->task_se_cmd;
1156 unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
1157
1158 memcpy(cdb, cmd->t_task_cdb, cdb_len);
1159 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
1160 unsigned long long lba = task->task_lba;
1161 u32 sectors = task->task_sectors;
1162
1163 switch (cdb_len) {
1164 case 6:
1165 /* 21-bit LBA and 8-bit sectors */
1166 cdb[1] = (lba >> 16) & 0x1f;
1167 cdb[2] = (lba >> 8) & 0xff;
1168 cdb[3] = lba & 0xff;
1169 cdb[4] = sectors & 0xff;
1170 break;
1171 case 10:
1172 /* 32-bit LBA and 16-bit sectors */
1173 put_unaligned_be32(lba, &cdb[2]);
1174 put_unaligned_be16(sectors, &cdb[7]);
1175 break;
1176 case 12:
1177 /* 32-bit LBA and 32-bit sectors */
1178 put_unaligned_be32(lba, &cdb[2]);
1179 put_unaligned_be32(sectors, &cdb[6]);
1180 break;
1181 case 16:
1182 /* 64-bit LBA and 32-bit sectors */
1183 put_unaligned_be64(lba, &cdb[2]);
1184 put_unaligned_be32(sectors, &cdb[10]);
1185 break;
1186 case 32:
1187 /* 64-bit LBA and 32-bit sectors, extended CDB */
1188 put_unaligned_be64(lba, &cdb[12]);
1189 put_unaligned_be32(sectors, &cdb[28]);
1190 break;
1191 default:
1192 BUG();
1193 }
1194 }
1195}
1196EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index cbb66537d230..801efa892046 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
683DEF_DEV_ATTRIB_RO(hw_max_sectors); 683DEF_DEV_ATTRIB_RO(hw_max_sectors);
684SE_DEV_ATTR_RO(hw_max_sectors); 684SE_DEV_ATTR_RO(hw_max_sectors);
685 685
686DEF_DEV_ATTRIB(max_sectors);
687SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
688
689DEF_DEV_ATTRIB(fabric_max_sectors); 686DEF_DEV_ATTRIB(fabric_max_sectors);
690SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); 687SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
691 688
@@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
727 &target_core_dev_attrib_hw_block_size.attr, 724 &target_core_dev_attrib_hw_block_size.attr,
728 &target_core_dev_attrib_block_size.attr, 725 &target_core_dev_attrib_block_size.attr,
729 &target_core_dev_attrib_hw_max_sectors.attr, 726 &target_core_dev_attrib_hw_max_sectors.attr,
730 &target_core_dev_attrib_max_sectors.attr,
731 &target_core_dev_attrib_fabric_max_sectors.attr, 727 &target_core_dev_attrib_fabric_max_sectors.attr,
732 &target_core_dev_attrib_optimal_sectors.attr, 728 &target_core_dev_attrib_optimal_sectors.attr,
733 &target_core_dev_attrib_hw_queue_depth.attr, 729 &target_core_dev_attrib_hw_queue_depth.attr,
@@ -2451,6 +2447,26 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2451SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); 2447SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2452 2448
2453/* 2449/*
2450 * implict_trans_secs
2451 */
2452static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
2453 struct t10_alua_tg_pt_gp *tg_pt_gp,
2454 char *page)
2455{
2456 return core_alua_show_implict_trans_secs(tg_pt_gp, page);
2457}
2458
2459static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
2460 struct t10_alua_tg_pt_gp *tg_pt_gp,
2461 const char *page,
2462 size_t count)
2463{
2464 return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
2465}
2466
2467SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
2468
2469/*
2454 * preferred 2470 * preferred
2455 */ 2471 */
2456 2472
@@ -2574,6 +2590,7 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2574 &target_core_alua_tg_pt_gp_alua_write_metadata.attr, 2590 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2575 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, 2591 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2576 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, 2592 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2593 &target_core_alua_tg_pt_gp_implict_trans_secs.attr,
2577 &target_core_alua_tg_pt_gp_preferred.attr, 2594 &target_core_alua_tg_pt_gp_preferred.attr,
2578 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, 2595 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2579 &target_core_alua_tg_pt_gp_members.attr, 2596 &target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index aa6267746383..5ad972856a8d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -643,9 +643,8 @@ void core_dev_unexport(
643 lun->lun_se_dev = NULL; 643 lun->lun_se_dev = NULL;
644} 644}
645 645
646int target_report_luns(struct se_task *se_task) 646int target_report_luns(struct se_cmd *se_cmd)
647{ 647{
648 struct se_cmd *se_cmd = se_task->task_se_cmd;
649 struct se_dev_entry *deve; 648 struct se_dev_entry *deve;
650 struct se_session *se_sess = se_cmd->se_sess; 649 struct se_session *se_sess = se_cmd->se_sess;
651 unsigned char *buf; 650 unsigned char *buf;
@@ -696,8 +695,7 @@ done:
696 buf[3] = (lun_count & 0xff); 695 buf[3] = (lun_count & 0xff);
697 transport_kunmap_data_sg(se_cmd); 696 transport_kunmap_data_sg(se_cmd);
698 697
699 se_task->task_scsi_status = GOOD; 698 target_complete_cmd(se_cmd, GOOD);
700 transport_complete_task(se_task, 1);
701 return 0; 699 return 0;
702} 700}
703 701
@@ -878,15 +876,12 @@ void se_dev_set_default_attribs(
878 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; 876 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
879 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; 877 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
880 /* 878 /*
881 * max_sectors is based on subsystem plugin dependent requirements. 879 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
882 */ 880 */
883 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 881 limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
884 /*
885 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
886 */
887 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
888 limits->logical_block_size); 882 limits->logical_block_size);
889 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 883 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
884
890 /* 885 /*
891 * Set fabric_max_sectors, which is reported in block limits 886 * Set fabric_max_sectors, which is reported in block limits
892 * VPD page (B0h). 887 * VPD page (B0h).
@@ -1170,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1170 return 0; 1165 return 0;
1171} 1166}
1172 1167
1173int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1174{
1175 int force = 0; /* Force setting for VDEVS */
1176
1177 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1178 pr_err("dev[%p]: Unable to change SE Device"
1179 " max_sectors while dev_export_obj: %d count exists\n",
1180 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1181 return -EINVAL;
1182 }
1183 if (!max_sectors) {
1184 pr_err("dev[%p]: Illegal ZERO value for"
1185 " max_sectors\n", dev);
1186 return -EINVAL;
1187 }
1188 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1189 pr_err("dev[%p]: Passed max_sectors: %u less than"
1190 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1191 DA_STATUS_MAX_SECTORS_MIN);
1192 return -EINVAL;
1193 }
1194 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1195 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1196 pr_err("dev[%p]: Passed max_sectors: %u"
1197 " greater than TCM/SE_Device max_sectors:"
1198 " %u\n", dev, max_sectors,
1199 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1200 return -EINVAL;
1201 }
1202 } else {
1203 if (!force && (max_sectors >
1204 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1205 pr_err("dev[%p]: Passed max_sectors: %u"
1206 " greater than TCM/SE_Device max_sectors"
1207 ": %u, use force=1 to override.\n", dev,
1208 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1209 return -EINVAL;
1210 }
1211 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1212 pr_err("dev[%p]: Passed max_sectors: %u"
1213 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1214 " %u\n", dev, max_sectors,
1215 DA_STATUS_MAX_SECTORS_MAX);
1216 return -EINVAL;
1217 }
1218 }
1219 /*
1220 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1221 */
1222 max_sectors = se_dev_align_max_sectors(max_sectors,
1223 dev->se_sub_dev->se_dev_attrib.block_size);
1224
1225 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1226 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1227 dev, max_sectors);
1228 return 0;
1229}
1230
1231int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1168int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1232{ 1169{
1233 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1170 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1341,7 +1278,6 @@ struct se_lun *core_dev_add_lun(
1341 u32 lun) 1278 u32 lun)
1342{ 1279{
1343 struct se_lun *lun_p; 1280 struct se_lun *lun_p;
1344 u32 lun_access = 0;
1345 int rc; 1281 int rc;
1346 1282
1347 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1283 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
@@ -1354,12 +1290,8 @@ struct se_lun *core_dev_add_lun(
1354 if (IS_ERR(lun_p)) 1290 if (IS_ERR(lun_p))
1355 return lun_p; 1291 return lun_p;
1356 1292
1357 if (dev->dev_flags & DF_READ_ONLY) 1293 rc = core_tpg_post_addlun(tpg, lun_p,
1358 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1294 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1359 else
1360 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1361
1362 rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
1363 if (rc < 0) 1295 if (rc < 0)
1364 return ERR_PTR(rc); 1296 return ERR_PTR(rc);
1365 1297
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index f286955331a2..686dba189f8e 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,15 +133,10 @@ static struct se_device *fd_create_virtdevice(
133 ret = PTR_ERR(dev_p); 133 ret = PTR_ERR(dev_p);
134 goto fail; 134 goto fail;
135 } 135 }
136#if 0 136
137 if (di->no_create_file) 137 /* O_DIRECT too? */
138 flags = O_RDWR | O_LARGEFILE;
139 else
140 flags = O_RDWR | O_CREAT | O_LARGEFILE;
141#else
142 flags = O_RDWR | O_CREAT | O_LARGEFILE; 138 flags = O_RDWR | O_CREAT | O_LARGEFILE;
143#endif 139
144/* flags |= O_DIRECT; */
145 /* 140 /*
146 * If fd_buffered_io=1 has not been set explicitly (the default), 141 * If fd_buffered_io=1 has not been set explicitly (the default),
147 * use O_SYNC to force FILEIO writes to disk. 142 * use O_SYNC to force FILEIO writes to disk.
@@ -249,53 +244,33 @@ static void fd_free_device(void *p)
249 kfree(fd_dev); 244 kfree(fd_dev);
250} 245}
251 246
252static inline struct fd_request *FILE_REQ(struct se_task *task) 247static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
253{ 248 u32 sgl_nents)
254 return container_of(task, struct fd_request, fd_task);
255}
256
257
258static struct se_task *
259fd_alloc_task(unsigned char *cdb)
260{ 249{
261 struct fd_request *fd_req; 250 struct se_device *se_dev = cmd->se_dev;
262
263 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
264 if (!fd_req) {
265 pr_err("Unable to allocate struct fd_request\n");
266 return NULL;
267 }
268
269 return &fd_req->fd_task;
270}
271
272static int fd_do_readv(struct se_task *task)
273{
274 struct fd_request *req = FILE_REQ(task);
275 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
276 struct fd_dev *dev = se_dev->dev_ptr; 251 struct fd_dev *dev = se_dev->dev_ptr;
277 struct file *fd = dev->fd_file; 252 struct file *fd = dev->fd_file;
278 struct scatterlist *sg = task->task_sg; 253 struct scatterlist *sg;
279 struct iovec *iov; 254 struct iovec *iov;
280 mm_segment_t old_fs; 255 mm_segment_t old_fs;
281 loff_t pos = (task->task_lba * 256 loff_t pos = (cmd->t_task_lba *
282 se_dev->se_sub_dev->se_dev_attrib.block_size); 257 se_dev->se_sub_dev->se_dev_attrib.block_size);
283 int ret = 0, i; 258 int ret = 0, i;
284 259
285 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 260 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
286 if (!iov) { 261 if (!iov) {
287 pr_err("Unable to allocate fd_do_readv iov[]\n"); 262 pr_err("Unable to allocate fd_do_readv iov[]\n");
288 return -ENOMEM; 263 return -ENOMEM;
289 } 264 }
290 265
291 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 266 for_each_sg(sgl, sg, sgl_nents, i) {
292 iov[i].iov_len = sg->length; 267 iov[i].iov_len = sg->length;
293 iov[i].iov_base = sg_virt(sg); 268 iov[i].iov_base = sg_virt(sg);
294 } 269 }
295 270
296 old_fs = get_fs(); 271 old_fs = get_fs();
297 set_fs(get_ds()); 272 set_fs(get_ds());
298 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); 273 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
299 set_fs(old_fs); 274 set_fs(old_fs);
300 275
301 kfree(iov); 276 kfree(iov);
@@ -305,10 +280,10 @@ static int fd_do_readv(struct se_task *task)
305 * block_device. 280 * block_device.
306 */ 281 */
307 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 282 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
308 if (ret < 0 || ret != task->task_size) { 283 if (ret < 0 || ret != cmd->data_length) {
309 pr_err("vfs_readv() returned %d," 284 pr_err("vfs_readv() returned %d,"
310 " expecting %d for S_ISBLK\n", ret, 285 " expecting %d for S_ISBLK\n", ret,
311 (int)task->task_size); 286 (int)cmd->data_length);
312 return (ret < 0 ? ret : -EINVAL); 287 return (ret < 0 ? ret : -EINVAL);
313 } 288 }
314 } else { 289 } else {
@@ -322,38 +297,38 @@ static int fd_do_readv(struct se_task *task)
322 return 1; 297 return 1;
323} 298}
324 299
325static int fd_do_writev(struct se_task *task) 300static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
301 u32 sgl_nents)
326{ 302{
327 struct fd_request *req = FILE_REQ(task); 303 struct se_device *se_dev = cmd->se_dev;
328 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
329 struct fd_dev *dev = se_dev->dev_ptr; 304 struct fd_dev *dev = se_dev->dev_ptr;
330 struct file *fd = dev->fd_file; 305 struct file *fd = dev->fd_file;
331 struct scatterlist *sg = task->task_sg; 306 struct scatterlist *sg;
332 struct iovec *iov; 307 struct iovec *iov;
333 mm_segment_t old_fs; 308 mm_segment_t old_fs;
334 loff_t pos = (task->task_lba * 309 loff_t pos = (cmd->t_task_lba *
335 se_dev->se_sub_dev->se_dev_attrib.block_size); 310 se_dev->se_sub_dev->se_dev_attrib.block_size);
336 int ret, i = 0; 311 int ret, i = 0;
337 312
338 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 313 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
339 if (!iov) { 314 if (!iov) {
340 pr_err("Unable to allocate fd_do_writev iov[]\n"); 315 pr_err("Unable to allocate fd_do_writev iov[]\n");
341 return -ENOMEM; 316 return -ENOMEM;
342 } 317 }
343 318
344 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 319 for_each_sg(sgl, sg, sgl_nents, i) {
345 iov[i].iov_len = sg->length; 320 iov[i].iov_len = sg->length;
346 iov[i].iov_base = sg_virt(sg); 321 iov[i].iov_base = sg_virt(sg);
347 } 322 }
348 323
349 old_fs = get_fs(); 324 old_fs = get_fs();
350 set_fs(get_ds()); 325 set_fs(get_ds());
351 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); 326 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
352 set_fs(old_fs); 327 set_fs(old_fs);
353 328
354 kfree(iov); 329 kfree(iov);
355 330
356 if (ret < 0 || ret != task->task_size) { 331 if (ret < 0 || ret != cmd->data_length) {
357 pr_err("vfs_writev() returned %d\n", ret); 332 pr_err("vfs_writev() returned %d\n", ret);
358 return (ret < 0 ? ret : -EINVAL); 333 return (ret < 0 ? ret : -EINVAL);
359 } 334 }
@@ -361,9 +336,8 @@ static int fd_do_writev(struct se_task *task)
361 return 1; 336 return 1;
362} 337}
363 338
364static void fd_emulate_sync_cache(struct se_task *task) 339static void fd_emulate_sync_cache(struct se_cmd *cmd)
365{ 340{
366 struct se_cmd *cmd = task->task_se_cmd;
367 struct se_device *dev = cmd->se_dev; 341 struct se_device *dev = cmd->se_dev;
368 struct fd_dev *fd_dev = dev->dev_ptr; 342 struct fd_dev *fd_dev = dev->dev_ptr;
369 int immed = (cmd->t_task_cdb[1] & 0x2); 343 int immed = (cmd->t_task_cdb[1] & 0x2);
@@ -375,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
375 * for this SYNCHRONIZE_CACHE op 349 * for this SYNCHRONIZE_CACHE op
376 */ 350 */
377 if (immed) 351 if (immed)
378 transport_complete_sync_cache(cmd, 1); 352 target_complete_cmd(cmd, SAM_STAT_GOOD);
379 353
380 /* 354 /*
381 * Determine if we will be flushing the entire device. 355 * Determine if we will be flushing the entire device.
@@ -395,33 +369,37 @@ static void fd_emulate_sync_cache(struct se_task *task)
395 if (ret != 0) 369 if (ret != 0)
396 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 370 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
397 371
398 if (!immed) 372 if (immed)
399 transport_complete_sync_cache(cmd, ret == 0); 373 return;
374
375 if (ret) {
376 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
377 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
378 } else {
379 target_complete_cmd(cmd, SAM_STAT_GOOD);
380 }
400} 381}
401 382
402/* 383static void fd_emulate_write_fua(struct se_cmd *cmd)
403 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
404 * LBA range basis..
405 */
406static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
407{ 384{
408 struct se_device *dev = cmd->se_dev; 385 struct se_device *dev = cmd->se_dev;
409 struct fd_dev *fd_dev = dev->dev_ptr; 386 struct fd_dev *fd_dev = dev->dev_ptr;
410 loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 387 loff_t start = cmd->t_task_lba *
411 loff_t end = start + task->task_size; 388 dev->se_sub_dev->se_dev_attrib.block_size;
389 loff_t end = start + cmd->data_length;
412 int ret; 390 int ret;
413 391
414 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 392 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
415 task->task_lba, task->task_size); 393 cmd->t_task_lba, cmd->data_length);
416 394
417 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
418 if (ret != 0) 396 if (ret != 0)
419 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
420} 398}
421 399
422static int fd_do_task(struct se_task *task) 400static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
401 u32 sgl_nents, enum dma_data_direction data_direction)
423{ 402{
424 struct se_cmd *cmd = task->task_se_cmd;
425 struct se_device *dev = cmd->se_dev; 403 struct se_device *dev = cmd->se_dev;
426 int ret = 0; 404 int ret = 0;
427 405
@@ -429,10 +407,10 @@ static int fd_do_task(struct se_task *task)
429 * Call vectorized fileio functions to map struct scatterlist 407 * Call vectorized fileio functions to map struct scatterlist
430 * physical memory addresses to struct iovec virtual memory. 408 * physical memory addresses to struct iovec virtual memory.
431 */ 409 */
432 if (task->task_data_direction == DMA_FROM_DEVICE) { 410 if (data_direction == DMA_FROM_DEVICE) {
433 ret = fd_do_readv(task); 411 ret = fd_do_readv(cmd, sgl, sgl_nents);
434 } else { 412 } else {
435 ret = fd_do_writev(task); 413 ret = fd_do_writev(cmd, sgl, sgl_nents);
436 414
437 if (ret > 0 && 415 if (ret > 0 &&
438 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 416 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
@@ -443,7 +421,7 @@ static int fd_do_task(struct se_task *task)
443 * and return some sense data to let the initiator 421 * and return some sense data to let the initiator
444 * know the FUA WRITE cache sync failed..? 422 * know the FUA WRITE cache sync failed..?
445 */ 423 */
446 fd_emulate_write_fua(cmd, task); 424 fd_emulate_write_fua(cmd);
447 } 425 }
448 426
449 } 427 }
@@ -452,24 +430,11 @@ static int fd_do_task(struct se_task *task)
452 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 430 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
453 return ret; 431 return ret;
454 } 432 }
455 if (ret) { 433 if (ret)
456 task->task_scsi_status = GOOD; 434 target_complete_cmd(cmd, SAM_STAT_GOOD);
457 transport_complete_task(task, 1);
458 }
459 return 0; 435 return 0;
460} 436}
461 437
462/* fd_free_task(): (Part of se_subsystem_api_t template)
463 *
464 *
465 */
466static void fd_free_task(struct se_task *task)
467{
468 struct fd_request *req = FILE_REQ(task);
469
470 kfree(req);
471}
472
473enum { 438enum {
474 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 439 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
475}; 440};
@@ -632,10 +597,8 @@ static struct se_subsystem_api fileio_template = {
632 .allocate_virtdevice = fd_allocate_virtdevice, 597 .allocate_virtdevice = fd_allocate_virtdevice,
633 .create_virtdevice = fd_create_virtdevice, 598 .create_virtdevice = fd_create_virtdevice,
634 .free_device = fd_free_device, 599 .free_device = fd_free_device,
635 .alloc_task = fd_alloc_task, 600 .execute_cmd = fd_execute_cmd,
636 .do_task = fd_do_task,
637 .do_sync_cache = fd_emulate_sync_cache, 601 .do_sync_cache = fd_emulate_sync_cache,
638 .free_task = fd_free_task,
639 .check_configfs_dev_params = fd_check_configfs_dev_params, 602 .check_configfs_dev_params = fd_check_configfs_dev_params,
640 .set_configfs_dev_params = fd_set_configfs_dev_params, 603 .set_configfs_dev_params = fd_set_configfs_dev_params,
641 .show_configfs_dev_params = fd_show_configfs_dev_params, 604 .show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 59e6e73106c2..fbd59ef7d8be 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -12,10 +12,6 @@
12#define RRF_EMULATE_CDB 0x01 12#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 13#define RRF_GOT_LBA 0x02
14 14
15struct fd_request {
16 struct se_task fd_task;
17};
18
19#define FBDF_HAS_PATH 0x01 15#define FBDF_HAS_PATH 0x01
20#define FBDF_HAS_SIZE 0x02 16#define FBDF_HAS_SIZE 0x02
21#define FDBD_USE_BUFFERED_IO 0x04 17#define FDBD_USE_BUFFERED_IO 0x04
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2ec299e8a73e..fd47950727b4 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -189,26 +189,6 @@ static void iblock_free_device(void *p)
189 kfree(ib_dev); 189 kfree(ib_dev);
190} 190}
191 191
192static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
193{
194 return container_of(task, struct iblock_req, ib_task);
195}
196
197static struct se_task *
198iblock_alloc_task(unsigned char *cdb)
199{
200 struct iblock_req *ib_req;
201
202 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
203 if (!ib_req) {
204 pr_err("Unable to allocate memory for struct iblock_req\n");
205 return NULL;
206 }
207
208 atomic_set(&ib_req->pending, 1);
209 return &ib_req->ib_task;
210}
211
212static unsigned long long iblock_emulate_read_cap_with_block_size( 192static unsigned long long iblock_emulate_read_cap_with_block_size(
213 struct se_device *dev, 193 struct se_device *dev,
214 struct block_device *bd, 194 struct block_device *bd,
@@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err)
295 if (err) 275 if (err)
296 pr_err("IBLOCK: cache flush failed: %d\n", err); 276 pr_err("IBLOCK: cache flush failed: %d\n", err);
297 277
298 if (cmd) 278 if (cmd) {
299 transport_complete_sync_cache(cmd, err == 0); 279 if (err) {
280 cmd->scsi_sense_reason =
281 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
282 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
283 } else {
284 target_complete_cmd(cmd, SAM_STAT_GOOD);
285 }
286 }
287
300 bio_put(bio); 288 bio_put(bio);
301} 289}
302 290
@@ -304,9 +292,8 @@ static void iblock_end_io_flush(struct bio *bio, int err)
304 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 292 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
305 * always flush the whole cache. 293 * always flush the whole cache.
306 */ 294 */
307static void iblock_emulate_sync_cache(struct se_task *task) 295static void iblock_emulate_sync_cache(struct se_cmd *cmd)
308{ 296{
309 struct se_cmd *cmd = task->task_se_cmd;
310 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 297 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
311 int immed = (cmd->t_task_cdb[1] & 0x2); 298 int immed = (cmd->t_task_cdb[1] & 0x2);
312 struct bio *bio; 299 struct bio *bio;
@@ -316,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
316 * for this SYNCHRONIZE_CACHE op. 303 * for this SYNCHRONIZE_CACHE op.
317 */ 304 */
318 if (immed) 305 if (immed)
319 transport_complete_sync_cache(cmd, 1); 306 target_complete_cmd(cmd, SAM_STAT_GOOD);
320 307
321 bio = bio_alloc(GFP_KERNEL, 0); 308 bio = bio_alloc(GFP_KERNEL, 0);
322 bio->bi_end_io = iblock_end_io_flush; 309 bio->bi_end_io = iblock_end_io_flush;
@@ -335,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
335 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 322 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
336} 323}
337 324
338static void iblock_free_task(struct se_task *task)
339{
340 kfree(IBLOCK_REQ(task));
341}
342
343enum { 325enum {
344 Opt_udev_path, Opt_force, Opt_err 326 Opt_udev_path, Opt_force, Opt_err
345}; 327};
@@ -448,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params(
448 return bl; 430 return bl;
449} 431}
450 432
433static void iblock_complete_cmd(struct se_cmd *cmd)
434{
435 struct iblock_req *ibr = cmd->priv;
436 u8 status;
437
438 if (!atomic_dec_and_test(&ibr->pending))
439 return;
440
441 if (atomic_read(&ibr->ib_bio_err_cnt))
442 status = SAM_STAT_CHECK_CONDITION;
443 else
444 status = SAM_STAT_GOOD;
445
446 target_complete_cmd(cmd, status);
447 kfree(ibr);
448}
449
451static void iblock_bio_destructor(struct bio *bio) 450static void iblock_bio_destructor(struct bio *bio)
452{ 451{
453 struct se_task *task = bio->bi_private; 452 struct se_cmd *cmd = bio->bi_private;
454 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 453 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
455 454
456 bio_free(bio, ib_dev->ibd_bio_set); 455 bio_free(bio, ib_dev->ibd_bio_set);
457} 456}
458 457
459static struct bio * 458static struct bio *
460iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) 459iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
461{ 460{
462 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 461 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
463 struct iblock_req *ib_req = IBLOCK_REQ(task);
464 struct bio *bio; 462 struct bio *bio;
465 463
466 /* 464 /*
@@ -476,19 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
476 return NULL; 474 return NULL;
477 } 475 }
478 476
479 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
480 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
481 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
482
483 bio->bi_bdev = ib_dev->ibd_bd; 477 bio->bi_bdev = ib_dev->ibd_bd;
484 bio->bi_private = task; 478 bio->bi_private = cmd;
485 bio->bi_destructor = iblock_bio_destructor; 479 bio->bi_destructor = iblock_bio_destructor;
486 bio->bi_end_io = &iblock_bio_done; 480 bio->bi_end_io = &iblock_bio_done;
487 bio->bi_sector = lba; 481 bio->bi_sector = lba;
488 atomic_inc(&ib_req->pending);
489
490 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
491 pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
492 return bio; 482 return bio;
493} 483}
494 484
@@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
503 blk_finish_plug(&plug); 493 blk_finish_plug(&plug);
504} 494}
505 495
506static int iblock_do_task(struct se_task *task) 496static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
497 u32 sgl_nents, enum dma_data_direction data_direction)
507{ 498{
508 struct se_cmd *cmd = task->task_se_cmd;
509 struct se_device *dev = cmd->se_dev; 499 struct se_device *dev = cmd->se_dev;
510 struct iblock_req *ibr = IBLOCK_REQ(task); 500 struct iblock_req *ibr;
511 struct bio *bio; 501 struct bio *bio;
512 struct bio_list list; 502 struct bio_list list;
513 struct scatterlist *sg; 503 struct scatterlist *sg;
514 u32 i, sg_num = task->task_sg_nents; 504 u32 sg_num = sgl_nents;
515 sector_t block_lba; 505 sector_t block_lba;
516 unsigned bio_cnt; 506 unsigned bio_cnt;
517 int rw; 507 int rw;
508 int i;
518 509
519 if (task->task_data_direction == DMA_TO_DEVICE) { 510 if (data_direction == DMA_TO_DEVICE) {
520 /* 511 /*
521 * Force data to disk if we pretend to not have a volatile 512 * Force data to disk if we pretend to not have a volatile
522 * write cache, or the initiator set the Force Unit Access bit. 513 * write cache, or the initiator set the Force Unit Access bit.
@@ -532,17 +523,17 @@ static int iblock_do_task(struct se_task *task)
532 } 523 }
533 524
534 /* 525 /*
535 * Do starting conversion up from non 512-byte blocksize with 526 * Convert the blocksize advertised to the initiator to the 512 byte
536 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 527 * units unconditionally used by the Linux block layer.
537 */ 528 */
538 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 529 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
539 block_lba = (task->task_lba << 3); 530 block_lba = (cmd->t_task_lba << 3);
540 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 531 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
541 block_lba = (task->task_lba << 2); 532 block_lba = (cmd->t_task_lba << 2);
542 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 533 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
543 block_lba = (task->task_lba << 1); 534 block_lba = (cmd->t_task_lba << 1);
544 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 535 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
545 block_lba = task->task_lba; 536 block_lba = cmd->t_task_lba;
546 else { 537 else {
547 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 538 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
548 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 539 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
@@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task)
550 return -ENOSYS; 541 return -ENOSYS;
551 } 542 }
552 543
553 bio = iblock_get_bio(task, block_lba, sg_num); 544 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
554 if (!bio) { 545 if (!ibr)
555 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 546 goto fail;
556 return -ENOMEM; 547 cmd->priv = ibr;
557 } 548
549 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
550 if (!bio)
551 goto fail_free_ibr;
558 552
559 bio_list_init(&list); 553 bio_list_init(&list);
560 bio_list_add(&list, bio); 554 bio_list_add(&list, bio);
555
556 atomic_set(&ibr->pending, 2);
561 bio_cnt = 1; 557 bio_cnt = 1;
562 558
563 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 559 for_each_sg(sgl, sg, sgl_nents, i) {
564 /* 560 /*
565 * XXX: if the length the device accepts is shorter than the 561 * XXX: if the length the device accepts is shorter than the
566 * length of the S/G list entry this will cause and 562 * length of the S/G list entry this will cause and
@@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task)
573 bio_cnt = 0; 569 bio_cnt = 0;
574 } 570 }
575 571
576 bio = iblock_get_bio(task, block_lba, sg_num); 572 bio = iblock_get_bio(cmd, block_lba, sg_num);
577 if (!bio) 573 if (!bio)
578 goto fail; 574 goto fail_put_bios;
575
576 atomic_inc(&ibr->pending);
579 bio_list_add(&list, bio); 577 bio_list_add(&list, bio);
580 bio_cnt++; 578 bio_cnt++;
581 } 579 }
@@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task)
586 } 584 }
587 585
588 iblock_submit_bios(&list, rw); 586 iblock_submit_bios(&list, rw);
589 587 iblock_complete_cmd(cmd);
590 if (atomic_dec_and_test(&ibr->pending)) {
591 transport_complete_task(task,
592 !atomic_read(&ibr->ib_bio_err_cnt));
593 }
594 return 0; 588 return 0;
595 589
596fail: 590fail_put_bios:
597 while ((bio = bio_list_pop(&list))) 591 while ((bio = bio_list_pop(&list)))
598 bio_put(bio); 592 bio_put(bio);
593fail_free_ibr:
594 kfree(ibr);
599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 595 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
596fail:
600 return -ENOMEM; 597 return -ENOMEM;
601} 598}
602 599
@@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev)
621 618
622static void iblock_bio_done(struct bio *bio, int err) 619static void iblock_bio_done(struct bio *bio, int err)
623{ 620{
624 struct se_task *task = bio->bi_private; 621 struct se_cmd *cmd = bio->bi_private;
625 struct iblock_req *ibr = IBLOCK_REQ(task); 622 struct iblock_req *ibr = cmd->priv;
626 623
627 /* 624 /*
628 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 625 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
@@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err)
642 639
643 bio_put(bio); 640 bio_put(bio);
644 641
645 if (!atomic_dec_and_test(&ibr->pending)) 642 iblock_complete_cmd(cmd);
646 return;
647
648 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
649 task, bio, task->task_lba,
650 (unsigned long long)bio->bi_sector, err);
651
652 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
653} 643}
654 644
655static struct se_subsystem_api iblock_template = { 645static struct se_subsystem_api iblock_template = {
@@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = {
663 .allocate_virtdevice = iblock_allocate_virtdevice, 653 .allocate_virtdevice = iblock_allocate_virtdevice,
664 .create_virtdevice = iblock_create_virtdevice, 654 .create_virtdevice = iblock_create_virtdevice,
665 .free_device = iblock_free_device, 655 .free_device = iblock_free_device,
666 .alloc_task = iblock_alloc_task, 656 .execute_cmd = iblock_execute_cmd,
667 .do_task = iblock_do_task,
668 .do_discard = iblock_do_discard, 657 .do_discard = iblock_do_discard,
669 .do_sync_cache = iblock_emulate_sync_cache, 658 .do_sync_cache = iblock_emulate_sync_cache,
670 .free_task = iblock_free_task,
671 .check_configfs_dev_params = iblock_check_configfs_dev_params, 659 .check_configfs_dev_params = iblock_check_configfs_dev_params,
672 .set_configfs_dev_params = iblock_set_configfs_dev_params, 660 .set_configfs_dev_params = iblock_set_configfs_dev_params,
673 .show_configfs_dev_params = iblock_show_configfs_dev_params, 661 .show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index e929370b6fd3..66cf7b9e205e 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -7,7 +7,6 @@
7#define IBLOCK_LBA_SHIFT 9 7#define IBLOCK_LBA_SHIFT 9
8 8
9struct iblock_req { 9struct iblock_req {
10 struct se_task ib_task;
11 atomic_t pending; 10 atomic_t pending;
12 atomic_t ib_bio_err_cnt; 11 atomic_t ib_bio_err_cnt;
13} ____cacheline_aligned; 12} ____cacheline_aligned;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 21c05638f158..165e82429687 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -5,15 +5,15 @@
5extern struct t10_alua_lu_gp *default_lu_gp; 5extern struct t10_alua_lu_gp *default_lu_gp;
6 6
7/* target_core_cdb.c */ 7/* target_core_cdb.c */
8int target_emulate_inquiry(struct se_task *task); 8int target_emulate_inquiry(struct se_cmd *cmd);
9int target_emulate_readcapacity(struct se_task *task); 9int target_emulate_readcapacity(struct se_cmd *cmd);
10int target_emulate_readcapacity_16(struct se_task *task); 10int target_emulate_readcapacity_16(struct se_cmd *cmd);
11int target_emulate_modesense(struct se_task *task); 11int target_emulate_modesense(struct se_cmd *cmd);
12int target_emulate_request_sense(struct se_task *task); 12int target_emulate_request_sense(struct se_cmd *cmd);
13int target_emulate_unmap(struct se_task *task); 13int target_emulate_unmap(struct se_cmd *cmd);
14int target_emulate_write_same(struct se_task *task); 14int target_emulate_write_same(struct se_cmd *cmd);
15int target_emulate_synchronize_cache(struct se_task *task); 15int target_emulate_synchronize_cache(struct se_cmd *cmd);
16int target_emulate_noop(struct se_task *task); 16int target_emulate_noop(struct se_cmd *cmd);
17 17
18/* target_core_device.c */ 18/* target_core_device.c */
19struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 19struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
@@ -28,7 +28,7 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
28 struct se_lun *); 28 struct se_lun *);
29void core_dev_unexport(struct se_device *, struct se_portal_group *, 29void core_dev_unexport(struct se_device *, struct se_portal_group *,
30 struct se_lun *); 30 struct se_lun *);
31int target_report_luns(struct se_task *); 31int target_report_luns(struct se_cmd *);
32void se_release_device_for_hba(struct se_device *); 32void se_release_device_for_hba(struct se_device *);
33void se_release_vpd_for_dev(struct se_device *); 33void se_release_vpd_for_dev(struct se_device *);
34int se_free_virtual_device(struct se_device *, struct se_hba *); 34int se_free_virtual_device(struct se_device *, struct se_hba *);
@@ -104,8 +104,7 @@ void release_se_kmem_caches(void);
104u32 scsi_get_new_index(scsi_index_t); 104u32 scsi_get_new_index(scsi_index_t);
105void transport_subsystem_check_init(void); 105void transport_subsystem_check_init(void);
106void transport_cmd_finish_abort(struct se_cmd *, int); 106void transport_cmd_finish_abort(struct se_cmd *, int);
107void __transport_remove_task_from_execute_queue(struct se_task *, 107void __target_remove_from_execute_list(struct se_cmd *);
108 struct se_device *);
109unsigned char *transport_dump_cmd_direction(struct se_cmd *); 108unsigned char *transport_dump_cmd_direction(struct se_cmd *);
110void transport_dump_dev_state(struct se_device *, char *, int *); 109void transport_dump_dev_state(struct se_device *, char *, int *);
111void transport_dump_dev_info(struct se_device *, struct se_lun *, 110void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -114,7 +113,7 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
114int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); 113int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
115int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 114int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
116int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 115int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
117bool target_stop_task(struct se_task *task, unsigned long *flags); 116bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
118int transport_clear_lun_from_sessions(struct se_lun *); 117int transport_clear_lun_from_sessions(struct se_lun *);
119void transport_send_task_abort(struct se_cmd *); 118void transport_send_task_abort(struct se_cmd *);
120 119
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index c3148b10b4b3..85564998500a 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -193,9 +193,8 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
193 return 0; 193 return 0;
194} 194}
195 195
196int target_scsi2_reservation_release(struct se_task *task) 196int target_scsi2_reservation_release(struct se_cmd *cmd)
197{ 197{
198 struct se_cmd *cmd = task->task_se_cmd;
199 struct se_device *dev = cmd->se_dev; 198 struct se_device *dev = cmd->se_dev;
200 struct se_session *sess = cmd->se_sess; 199 struct se_session *sess = cmd->se_sess;
201 struct se_portal_group *tpg = sess->se_tpg; 200 struct se_portal_group *tpg = sess->se_tpg;
@@ -237,16 +236,13 @@ int target_scsi2_reservation_release(struct se_task *task)
237out_unlock: 236out_unlock:
238 spin_unlock(&dev->dev_reservation_lock); 237 spin_unlock(&dev->dev_reservation_lock);
239out: 238out:
240 if (!ret) { 239 if (!ret)
241 task->task_scsi_status = GOOD; 240 target_complete_cmd(cmd, GOOD);
242 transport_complete_task(task, 1);
243 }
244 return ret; 241 return ret;
245} 242}
246 243
247int target_scsi2_reservation_reserve(struct se_task *task) 244int target_scsi2_reservation_reserve(struct se_cmd *cmd)
248{ 245{
249 struct se_cmd *cmd = task->task_se_cmd;
250 struct se_device *dev = cmd->se_dev; 246 struct se_device *dev = cmd->se_dev;
251 struct se_session *sess = cmd->se_sess; 247 struct se_session *sess = cmd->se_sess;
252 struct se_portal_group *tpg = sess->se_tpg; 248 struct se_portal_group *tpg = sess->se_tpg;
@@ -307,10 +303,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
307out_unlock: 303out_unlock:
308 spin_unlock(&dev->dev_reservation_lock); 304 spin_unlock(&dev->dev_reservation_lock);
309out: 305out:
310 if (!ret) { 306 if (!ret)
311 task->task_scsi_status = GOOD; 307 target_complete_cmd(cmd, GOOD);
312 transport_complete_task(task, 1);
313 }
314 return ret; 308 return ret;
315} 309}
316 310
@@ -503,11 +497,10 @@ static int core_scsi3_pr_seq_non_holder(
503 * statement. 497 * statement.
504 */ 498 */
505 if (!ret && !other_cdb) { 499 if (!ret && !other_cdb) {
506#if 0
507 pr_debug("Allowing explict CDB: 0x%02x for %s" 500 pr_debug("Allowing explict CDB: 0x%02x for %s"
508 " reservation holder\n", cdb[0], 501 " reservation holder\n", cdb[0],
509 core_scsi3_pr_dump_type(pr_reg_type)); 502 core_scsi3_pr_dump_type(pr_reg_type));
510#endif 503
511 return ret; 504 return ret;
512 } 505 }
513 /* 506 /*
@@ -535,14 +528,14 @@ static int core_scsi3_pr_seq_non_holder(
535 * as we expect registered non-reservation holding 528 * as we expect registered non-reservation holding
536 * nexuses to issue CDBs. 529 * nexuses to issue CDBs.
537 */ 530 */
538#if 0 531
539 if (!registered_nexus) { 532 if (!registered_nexus) {
540 pr_debug("Allowing implict CDB: 0x%02x" 533 pr_debug("Allowing implict CDB: 0x%02x"
541 " for %s reservation on unregistered" 534 " for %s reservation on unregistered"
542 " nexus\n", cdb[0], 535 " nexus\n", cdb[0],
543 core_scsi3_pr_dump_type(pr_reg_type)); 536 core_scsi3_pr_dump_type(pr_reg_type));
544 } 537 }
545#endif 538
546 return 0; 539 return 0;
547 } 540 }
548 } else if ((reg_only) || (all_reg)) { 541 } else if ((reg_only) || (all_reg)) {
@@ -551,11 +544,11 @@ static int core_scsi3_pr_seq_non_holder(
551 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, 544 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
552 * allow commands from registered nexuses. 545 * allow commands from registered nexuses.
553 */ 546 */
554#if 0 547
555 pr_debug("Allowing implict CDB: 0x%02x for %s" 548 pr_debug("Allowing implict CDB: 0x%02x for %s"
556 " reservation\n", cdb[0], 549 " reservation\n", cdb[0],
557 core_scsi3_pr_dump_type(pr_reg_type)); 550 core_scsi3_pr_dump_type(pr_reg_type));
558#endif 551
559 return 0; 552 return 0;
560 } 553 }
561 } 554 }
@@ -1669,12 +1662,12 @@ static int core_scsi3_decode_spec_i_port(
1669 ret = -EINVAL; 1662 ret = -EINVAL;
1670 goto out; 1663 goto out;
1671 } 1664 }
1672#if 0 1665
1673 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1666 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1674 " tid_len: %d for %s + %s\n", 1667 " tid_len: %d for %s + %s\n",
1675 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1668 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
1676 tpdl, tid_len, i_str, iport_ptr); 1669 tpdl, tid_len, i_str, iport_ptr);
1677#endif 1670
1678 if (tid_len > tpdl) { 1671 if (tid_len > tpdl) {
1679 pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" 1672 pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
1680 " %u for Transport ID: %s\n", tid_len, ptr); 1673 " %u for Transport ID: %s\n", tid_len, ptr);
@@ -1717,12 +1710,12 @@ static int core_scsi3_decode_spec_i_port(
1717 ret = -EINVAL; 1710 ret = -EINVAL;
1718 goto out; 1711 goto out;
1719 } 1712 }
1720#if 0 1713
1721 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1714 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1722 " dest_se_deve mapped_lun: %u\n", 1715 " dest_se_deve mapped_lun: %u\n",
1723 dest_tpg->se_tpg_tfo->get_fabric_name(), 1716 dest_tpg->se_tpg_tfo->get_fabric_name(),
1724 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); 1717 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1725#endif 1718
1726 /* 1719 /*
1727 * Skip any TransportIDs that already have a registration for 1720 * Skip any TransportIDs that already have a registration for
1728 * this target port. 1721 * this target port.
@@ -3476,10 +3469,10 @@ static int core_scsi3_emulate_pro_register_and_move(
3476 3469
3477 buf = transport_kmap_data_sg(cmd); 3470 buf = transport_kmap_data_sg(cmd);
3478 proto_ident = (buf[24] & 0x0f); 3471 proto_ident = (buf[24] & 0x0f);
3479#if 0 3472
3480 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3473 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
3481 " 0x%02x\n", proto_ident); 3474 " 0x%02x\n", proto_ident);
3482#endif 3475
3483 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { 3476 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
3484 pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" 3477 pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
3485 " proto_ident: 0x%02x does not match ident: 0x%02x" 3478 " proto_ident: 0x%02x does not match ident: 0x%02x"
@@ -3578,11 +3571,11 @@ after_iport_check:
3578 ret = -EINVAL; 3571 ret = -EINVAL;
3579 goto out; 3572 goto out;
3580 } 3573 }
3581#if 0 3574
3582 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3575 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3583 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3576 " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
3584 dest_node_acl->initiatorname); 3577 dest_node_acl->initiatorname);
3585#endif 3578
3586 /* 3579 /*
3587 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET 3580 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
3588 * PORT IDENTIFIER. 3581 * PORT IDENTIFIER.
@@ -3606,12 +3599,12 @@ after_iport_check:
3606 ret = -EINVAL; 3599 ret = -EINVAL;
3607 goto out; 3600 goto out;
3608 } 3601 }
3609#if 0 3602
3610 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3603 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3611 " ACL for dest_se_deve->mapped_lun: %u\n", 3604 " ACL for dest_se_deve->mapped_lun: %u\n",
3612 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3605 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
3613 dest_se_deve->mapped_lun); 3606 dest_se_deve->mapped_lun);
3614#endif 3607
3615 /* 3608 /*
3616 * A persistent reservation needs to already existing in order to 3609 * A persistent reservation needs to already existing in order to
3617 * successfully complete the REGISTER_AND_MOVE service action.. 3610 * successfully complete the REGISTER_AND_MOVE service action..
@@ -3802,9 +3795,8 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3802/* 3795/*
3803 * See spc4r17 section 6.14 Table 170 3796 * See spc4r17 section 6.14 Table 170
3804 */ 3797 */
3805int target_scsi3_emulate_pr_out(struct se_task *task) 3798int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3806{ 3799{
3807 struct se_cmd *cmd = task->task_se_cmd;
3808 unsigned char *cdb = &cmd->t_task_cdb[0]; 3800 unsigned char *cdb = &cmd->t_task_cdb[0];
3809 unsigned char *buf; 3801 unsigned char *buf;
3810 u64 res_key, sa_res_key; 3802 u64 res_key, sa_res_key;
@@ -3944,10 +3936,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3944 } 3936 }
3945 3937
3946out: 3938out:
3947 if (!ret) { 3939 if (!ret)
3948 task->task_scsi_status = GOOD; 3940 target_complete_cmd(cmd, GOOD);
3949 transport_complete_task(task, 1);
3950 }
3951 return ret; 3941 return ret;
3952} 3942}
3953 3943
@@ -4302,9 +4292,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4302 return 0; 4292 return 0;
4303} 4293}
4304 4294
4305int target_scsi3_emulate_pr_in(struct se_task *task) 4295int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
4306{ 4296{
4307 struct se_cmd *cmd = task->task_se_cmd;
4308 int ret; 4297 int ret;
4309 4298
4310 /* 4299 /*
@@ -4345,10 +4334,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4345 break; 4334 break;
4346 } 4335 }
4347 4336
4348 if (!ret) { 4337 if (!ret)
4349 task->task_scsi_status = GOOD; 4338 target_complete_cmd(cmd, GOOD);
4350 transport_complete_task(task, 1);
4351 }
4352 return ret; 4339 return ret;
4353} 4340}
4354 4341
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 7a233feb7e99..af6c460d886d 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
47 47
48extern int core_pr_dump_initiator_port(struct t10_pr_registration *, 48extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
49 char *, u32); 49 char *, u32);
50extern int target_scsi2_reservation_release(struct se_task *task); 50extern int target_scsi2_reservation_release(struct se_cmd *);
51extern int target_scsi2_reservation_reserve(struct se_task *task); 51extern int target_scsi2_reservation_reserve(struct se_cmd *);
52extern int core_scsi3_alloc_aptpl_registration( 52extern int core_scsi3_alloc_aptpl_registration(
53 struct t10_reservation *, u64, 53 struct t10_reservation *, u64,
54 unsigned char *, unsigned char *, u32, 54 unsigned char *, unsigned char *, u32,
@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
61extern void core_scsi3_free_all_registrations(struct se_device *); 61extern void core_scsi3_free_all_registrations(struct se_device *);
62extern unsigned char *core_scsi3_pr_dump_type(int); 62extern unsigned char *core_scsi3_pr_dump_type(int);
63 63
64extern int target_scsi3_emulate_pr_in(struct se_task *task); 64extern int target_scsi3_emulate_pr_in(struct se_cmd *);
65extern int target_scsi3_emulate_pr_out(struct se_task *task); 65extern int target_scsi3_emulate_pr_out(struct se_cmd *);
66extern int core_setup_reservations(struct se_device *, int); 66extern int core_setup_reservations(struct se_device *, int);
67 67
68#endif /* TARGET_CORE_PR_H */ 68#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 94c905fcbceb..4ce2cf642fce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -663,22 +663,12 @@ static void pscsi_free_device(void *p)
663 kfree(pdv); 663 kfree(pdv);
664} 664}
665 665
666static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) 666static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
667{ 667{
668 return container_of(task, struct pscsi_plugin_task, pscsi_task); 668 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
669}
670
671
672/* pscsi_transport_complete():
673 *
674 *
675 */
676static int pscsi_transport_complete(struct se_task *task)
677{
678 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
679 struct scsi_device *sd = pdv->pdv_sd; 669 struct scsi_device *sd = pdv->pdv_sd;
680 int result; 670 int result;
681 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 671 struct pscsi_plugin_task *pt = cmd->priv;
682 unsigned char *cdb = &pt->pscsi_cdb[0]; 672 unsigned char *cdb = &pt->pscsi_cdb[0];
683 673
684 result = pt->pscsi_result; 674 result = pt->pscsi_result;
@@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task)
688 */ 678 */
689 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 679 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
690 (status_byte(result) << 1) == SAM_STAT_GOOD) { 680 (status_byte(result) << 1) == SAM_STAT_GOOD) {
691 if (!task->task_se_cmd->se_deve) 681 if (!cmd->se_deve)
692 goto after_mode_sense; 682 goto after_mode_sense;
693 683
694 if (task->task_se_cmd->se_deve->lun_flags & 684 if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
695 TRANSPORT_LUNFLAGS_READ_ONLY) { 685 unsigned char *buf = transport_kmap_data_sg(cmd);
696 unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
697 686
698 if (cdb[0] == MODE_SENSE_10) { 687 if (cdb[0] == MODE_SENSE_10) {
699 if (!(buf[3] & 0x80)) 688 if (!(buf[3] & 0x80))
@@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task)
703 buf[2] |= 0x80; 692 buf[2] |= 0x80;
704 } 693 }
705 694
706 transport_kunmap_data_sg(task->task_se_cmd); 695 transport_kunmap_data_sg(cmd);
707 } 696 }
708 } 697 }
709after_mode_sense: 698after_mode_sense:
@@ -722,7 +711,6 @@ after_mode_sense:
722 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 711 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
723 (status_byte(result) << 1) == SAM_STAT_GOOD) { 712 (status_byte(result) << 1) == SAM_STAT_GOOD) {
724 unsigned char *buf; 713 unsigned char *buf;
725 struct scatterlist *sg = task->task_sg;
726 u16 bdl; 714 u16 bdl;
727 u32 blocksize; 715 u32 blocksize;
728 716
@@ -757,35 +745,6 @@ after_mode_select:
757 return 0; 745 return 0;
758} 746}
759 747
760static struct se_task *
761pscsi_alloc_task(unsigned char *cdb)
762{
763 struct pscsi_plugin_task *pt;
764
765 /*
766 * Dynamically alloc cdb space, since it may be larger than
767 * TCM_MAX_COMMAND_SIZE
768 */
769 pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
770 if (!pt) {
771 pr_err("Unable to allocate struct pscsi_plugin_task\n");
772 return NULL;
773 }
774
775 return &pt->pscsi_task;
776}
777
778static void pscsi_free_task(struct se_task *task)
779{
780 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
781
782 /*
783 * We do not release the bio(s) here associated with this task, as
784 * this is handled by bio_put() and pscsi_bi_endio().
785 */
786 kfree(pt);
787}
788
789enum { 748enum {
790 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, 749 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
791 Opt_scsi_lun_id, Opt_err 750 Opt_scsi_lun_id, Opt_err
@@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num)
958 return bio; 917 return bio;
959} 918}
960 919
961static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, 920static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
921 u32 sgl_nents, enum dma_data_direction data_direction,
962 struct bio **hbio) 922 struct bio **hbio)
963{ 923{
964 struct se_cmd *cmd = task->task_se_cmd; 924 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
965 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
966 u32 task_sg_num = task->task_sg_nents;
967 struct bio *bio = NULL, *tbio = NULL; 925 struct bio *bio = NULL, *tbio = NULL;
968 struct page *page; 926 struct page *page;
969 struct scatterlist *sg; 927 struct scatterlist *sg;
970 u32 data_len = task->task_size, i, len, bytes, off; 928 u32 data_len = cmd->data_length, i, len, bytes, off;
971 int nr_pages = (task->task_size + task_sg[0].offset + 929 int nr_pages = (cmd->data_length + sgl[0].offset +
972 PAGE_SIZE - 1) >> PAGE_SHIFT; 930 PAGE_SIZE - 1) >> PAGE_SHIFT;
973 int nr_vecs = 0, rc; 931 int nr_vecs = 0, rc;
974 int rw = (task->task_data_direction == DMA_TO_DEVICE); 932 int rw = (data_direction == DMA_TO_DEVICE);
975 933
976 *hbio = NULL; 934 *hbio = NULL;
977 935
978 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); 936 pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
979 937
980 for_each_sg(task_sg, sg, task_sg_num, i) { 938 for_each_sg(sgl, sg, sgl_nents, i) {
981 page = sg_page(sg); 939 page = sg_page(sg);
982 off = sg->offset; 940 off = sg->offset;
983 len = sg->length; 941 len = sg->length;
@@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
1009 * Set *hbio pointer to handle the case: 967 * Set *hbio pointer to handle the case:
1010 * nr_pages > BIO_MAX_PAGES, where additional 968 * nr_pages > BIO_MAX_PAGES, where additional
1011 * bios need to be added to complete a given 969 * bios need to be added to complete a given
1012 * struct se_task 970 * command.
1013 */ 971 */
1014 if (!*hbio) 972 if (!*hbio)
1015 *hbio = tbio = bio; 973 *hbio = tbio = bio;
@@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
1049 } 1007 }
1050 } 1008 }
1051 1009
1052 return task->task_sg_nents; 1010 return sgl_nents;
1053fail: 1011fail:
1054 while (*hbio) { 1012 while (*hbio) {
1055 bio = *hbio; 1013 bio = *hbio;
@@ -1061,52 +1019,61 @@ fail:
1061 return -ENOMEM; 1019 return -ENOMEM;
1062} 1020}
1063 1021
1064static int pscsi_do_task(struct se_task *task) 1022static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1023 u32 sgl_nents, enum dma_data_direction data_direction)
1065{ 1024{
1066 struct se_cmd *cmd = task->task_se_cmd; 1025 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
1067 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 1026 struct pscsi_plugin_task *pt;
1068 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1069 struct request *req; 1027 struct request *req;
1070 struct bio *hbio; 1028 struct bio *hbio;
1071 int ret; 1029 int ret;
1072 1030
1073 target_get_task_cdb(task, pt->pscsi_cdb); 1031 /*
1032 * Dynamically alloc cdb space, since it may be larger than
1033 * TCM_MAX_COMMAND_SIZE
1034 */
1035 pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
1036 if (!pt) {
1037 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1038 return -ENOMEM;
1039 }
1040 cmd->priv = pt;
1041
1042 memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
1043 scsi_command_size(cmd->t_task_cdb));
1074 1044
1075 if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { 1045 if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
1076 req = blk_get_request(pdv->pdv_sd->request_queue, 1046 req = blk_get_request(pdv->pdv_sd->request_queue,
1077 (task->task_data_direction == DMA_TO_DEVICE), 1047 (data_direction == DMA_TO_DEVICE),
1078 GFP_KERNEL); 1048 GFP_KERNEL);
1079 if (!req || IS_ERR(req)) { 1049 if (!req || IS_ERR(req)) {
1080 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1050 pr_err("PSCSI: blk_get_request() failed: %ld\n",
1081 req ? IS_ERR(req) : -ENOMEM); 1051 req ? IS_ERR(req) : -ENOMEM);
1082 cmd->scsi_sense_reason = 1052 cmd->scsi_sense_reason =
1083 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1053 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1084 return -ENODEV; 1054 goto fail;
1085 } 1055 }
1086 } else { 1056 } else {
1087 BUG_ON(!task->task_size); 1057 BUG_ON(!cmd->data_length);
1088 1058
1089 /* 1059 ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
1090 * Setup the main struct request for the task->task_sg[] payload
1091 */
1092 ret = pscsi_map_sg(task, task->task_sg, &hbio);
1093 if (ret < 0) { 1060 if (ret < 0) {
1094 cmd->scsi_sense_reason = 1061 cmd->scsi_sense_reason =
1095 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1062 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1096 return ret; 1063 goto fail;
1097 } 1064 }
1098 1065
1099 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1066 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1100 GFP_KERNEL); 1067 GFP_KERNEL);
1101 if (IS_ERR(req)) { 1068 if (IS_ERR(req)) {
1102 pr_err("pSCSI: blk_make_request() failed\n"); 1069 pr_err("pSCSI: blk_make_request() failed\n");
1103 goto fail; 1070 goto fail_free_bio;
1104 } 1071 }
1105 } 1072 }
1106 1073
1107 req->cmd_type = REQ_TYPE_BLOCK_PC; 1074 req->cmd_type = REQ_TYPE_BLOCK_PC;
1108 req->end_io = pscsi_req_done; 1075 req->end_io = pscsi_req_done;
1109 req->end_io_data = task; 1076 req->end_io_data = cmd;
1110 req->cmd_len = scsi_command_size(pt->pscsi_cdb); 1077 req->cmd_len = scsi_command_size(pt->pscsi_cdb);
1111 req->cmd = &pt->pscsi_cdb[0]; 1078 req->cmd = &pt->pscsi_cdb[0];
1112 req->sense = &pt->pscsi_sense[0]; 1079 req->sense = &pt->pscsi_sense[0];
@@ -1118,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task)
1118 req->retries = PS_RETRY; 1085 req->retries = PS_RETRY;
1119 1086
1120 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, 1087 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
1121 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), 1088 (cmd->sam_task_attr == MSG_HEAD_TAG),
1122 pscsi_req_done); 1089 pscsi_req_done);
1123 1090
1124 return 0; 1091 return 0;
1125 1092
1126fail: 1093fail_free_bio:
1127 while (hbio) { 1094 while (hbio) {
1128 struct bio *bio = hbio; 1095 struct bio *bio = hbio;
1129 hbio = hbio->bi_next; 1096 hbio = hbio->bi_next;
@@ -1131,16 +1098,14 @@ fail:
1131 bio_endio(bio, 0); /* XXX: should be error */ 1098 bio_endio(bio, 0); /* XXX: should be error */
1132 } 1099 }
1133 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1100 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1101fail:
1102 kfree(pt);
1134 return -ENOMEM; 1103 return -ENOMEM;
1135} 1104}
1136 1105
1137/* pscsi_get_sense_buffer(): 1106static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd)
1138 *
1139 *
1140 */
1141static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1142{ 1107{
1143 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1108 struct pscsi_plugin_task *pt = cmd->priv;
1144 1109
1145 return pt->pscsi_sense; 1110 return pt->pscsi_sense;
1146} 1111}
@@ -1180,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
1180 return 0; 1145 return 0;
1181} 1146}
1182 1147
1183/* pscsi_handle_SAM_STATUS_failures(): 1148static void pscsi_req_done(struct request *req, int uptodate)
1184 *
1185 *
1186 */
1187static inline void pscsi_process_SAM_status(
1188 struct se_task *task,
1189 struct pscsi_plugin_task *pt)
1190{ 1149{
1191 task->task_scsi_status = status_byte(pt->pscsi_result); 1150 struct se_cmd *cmd = req->end_io_data;
1192 if (task->task_scsi_status) { 1151 struct pscsi_plugin_task *pt = cmd->priv;
1193 task->task_scsi_status <<= 1; 1152
1194 pr_debug("PSCSI Status Byte exception at task: %p CDB:" 1153 pt->pscsi_result = req->errors;
1195 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1154 pt->pscsi_resid = req->resid_len;
1155
1156 cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
1157 if (cmd->scsi_status) {
1158 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
1159 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
1196 pt->pscsi_result); 1160 pt->pscsi_result);
1197 } 1161 }
1198 1162
1199 switch (host_byte(pt->pscsi_result)) { 1163 switch (host_byte(pt->pscsi_result)) {
1200 case DID_OK: 1164 case DID_OK:
1201 transport_complete_task(task, (!task->task_scsi_status)); 1165 target_complete_cmd(cmd, cmd->scsi_status);
1202 break; 1166 break;
1203 default: 1167 default:
1204 pr_debug("PSCSI Host Byte exception at task: %p CDB:" 1168 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
1205 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1169 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
1206 pt->pscsi_result); 1170 pt->pscsi_result);
1207 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1171 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1208 task->task_se_cmd->scsi_sense_reason = 1172 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
1209 TCM_UNSUPPORTED_SCSI_OPCODE;
1210 transport_complete_task(task, 0);
1211 break; 1173 break;
1212 } 1174 }
1213}
1214 1175
1215static void pscsi_req_done(struct request *req, int uptodate)
1216{
1217 struct se_task *task = req->end_io_data;
1218 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1219
1220 pt->pscsi_result = req->errors;
1221 pt->pscsi_resid = req->resid_len;
1222
1223 pscsi_process_SAM_status(task, pt);
1224 __blk_put_request(req->q, req); 1176 __blk_put_request(req->q, req);
1177 kfree(pt);
1225} 1178}
1226 1179
1227static struct se_subsystem_api pscsi_template = { 1180static struct se_subsystem_api pscsi_template = {
@@ -1235,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = {
1235 .create_virtdevice = pscsi_create_virtdevice, 1188 .create_virtdevice = pscsi_create_virtdevice,
1236 .free_device = pscsi_free_device, 1189 .free_device = pscsi_free_device,
1237 .transport_complete = pscsi_transport_complete, 1190 .transport_complete = pscsi_transport_complete,
1238 .alloc_task = pscsi_alloc_task, 1191 .execute_cmd = pscsi_execute_cmd,
1239 .do_task = pscsi_do_task,
1240 .free_task = pscsi_free_task,
1241 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1192 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1242 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1193 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1243 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1194 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 43f1c419e8e5..bc1e5e11eca0 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -22,7 +22,6 @@
22#include <linux/kobject.h> 22#include <linux/kobject.h>
23 23
24struct pscsi_plugin_task { 24struct pscsi_plugin_task {
25 struct se_task pscsi_task;
26 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; 25 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
27 int pscsi_direction; 26 int pscsi_direction;
28 int pscsi_result; 27 int pscsi_result;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 8b68f7b82631..d0ceb873c0e5 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -64,9 +64,6 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba->hba_id, 65 " Generic Target Core Stack %s\n", hba->hba_id,
66 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); 66 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
67 pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
68 " MaxSectors: %u\n", hba->hba_id,
69 rd_host->rd_host_id, RD_MAX_SECTORS);
70 67
71 return 0; 68 return 0;
72} 69}
@@ -199,10 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
199 return 0; 196 return 0;
200} 197}
201 198
202static void *rd_allocate_virtdevice( 199static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
203 struct se_hba *hba,
204 const char *name,
205 int rd_direct)
206{ 200{
207 struct rd_dev *rd_dev; 201 struct rd_dev *rd_dev;
208 struct rd_host *rd_host = hba->hba_ptr; 202 struct rd_host *rd_host = hba->hba_ptr;
@@ -214,25 +208,12 @@ static void *rd_allocate_virtdevice(
214 } 208 }
215 209
216 rd_dev->rd_host = rd_host; 210 rd_dev->rd_host = rd_host;
217 rd_dev->rd_direct = rd_direct;
218 211
219 return rd_dev; 212 return rd_dev;
220} 213}
221 214
222static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) 215static struct se_device *rd_create_virtdevice(struct se_hba *hba,
223{ 216 struct se_subsystem_dev *se_dev, void *p)
224 return rd_allocate_virtdevice(hba, name, 0);
225}
226
227/* rd_create_virtdevice():
228 *
229 *
230 */
231static struct se_device *rd_create_virtdevice(
232 struct se_hba *hba,
233 struct se_subsystem_dev *se_dev,
234 void *p,
235 int rd_direct)
236{ 217{
237 struct se_device *dev; 218 struct se_device *dev;
238 struct se_dev_limits dev_limits; 219 struct se_dev_limits dev_limits;
@@ -247,13 +228,12 @@ static struct se_device *rd_create_virtdevice(
247 if (ret < 0) 228 if (ret < 0)
248 goto fail; 229 goto fail;
249 230
250 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); 231 snprintf(prod, 16, "RAMDISK-MCP");
251 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : 232 snprintf(rev, 4, "%s", RD_MCP_VERSION);
252 RD_MCP_VERSION);
253 233
254 dev_limits.limits.logical_block_size = RD_BLOCKSIZE; 234 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
255 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; 235 dev_limits.limits.max_hw_sectors = UINT_MAX;
256 dev_limits.limits.max_sectors = RD_MAX_SECTORS; 236 dev_limits.limits.max_sectors = UINT_MAX;
257 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 237 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
258 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 238 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
259 239
@@ -264,12 +244,10 @@ static struct se_device *rd_create_virtdevice(
264 goto fail; 244 goto fail;
265 245
266 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 246 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
267 rd_dev->rd_queue_depth = dev->queue_depth;
268 247
269 pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 248 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
270 " %u pages in %u tables, %lu total bytes\n", 249 " %u pages in %u tables, %lu total bytes\n",
271 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : 250 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
272 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
273 rd_dev->sg_table_count, 251 rd_dev->sg_table_count,
274 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 252 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
275 253
@@ -280,18 +258,6 @@ fail:
280 return ERR_PTR(ret); 258 return ERR_PTR(ret);
281} 259}
282 260
283static struct se_device *rd_MEMCPY_create_virtdevice(
284 struct se_hba *hba,
285 struct se_subsystem_dev *se_dev,
286 void *p)
287{
288 return rd_create_virtdevice(hba, se_dev, p, 0);
289}
290
291/* rd_free_device(): (Part of se_subsystem_api_t template)
292 *
293 *
294 */
295static void rd_free_device(void *p) 261static void rd_free_device(void *p)
296{ 262{
297 struct rd_dev *rd_dev = p; 263 struct rd_dev *rd_dev = p;
@@ -300,29 +266,6 @@ static void rd_free_device(void *p)
300 kfree(rd_dev); 266 kfree(rd_dev);
301} 267}
302 268
303static inline struct rd_request *RD_REQ(struct se_task *task)
304{
305 return container_of(task, struct rd_request, rd_task);
306}
307
308static struct se_task *
309rd_alloc_task(unsigned char *cdb)
310{
311 struct rd_request *rd_req;
312
313 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
314 if (!rd_req) {
315 pr_err("Unable to allocate struct rd_request\n");
316 return NULL;
317 }
318
319 return &rd_req->rd_task;
320}
321
322/* rd_get_sg_table():
323 *
324 *
325 */
326static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 269static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
327{ 270{
328 u32 i; 271 u32 i;
@@ -341,31 +284,41 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
341 return NULL; 284 return NULL;
342} 285}
343 286
344static int rd_MEMCPY(struct rd_request *req, u32 read_rd) 287static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
288 u32 sgl_nents, enum dma_data_direction data_direction)
345{ 289{
346 struct se_task *task = &req->rd_task; 290 struct se_device *se_dev = cmd->se_dev;
347 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; 291 struct rd_dev *dev = se_dev->dev_ptr;
348 struct rd_dev_sg_table *table; 292 struct rd_dev_sg_table *table;
349 struct scatterlist *rd_sg; 293 struct scatterlist *rd_sg;
350 struct sg_mapping_iter m; 294 struct sg_mapping_iter m;
351 u32 rd_offset = req->rd_offset; 295 u32 rd_offset;
296 u32 rd_size;
297 u32 rd_page;
352 u32 src_len; 298 u32 src_len;
299 u64 tmp;
300
301 tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
302 rd_offset = do_div(tmp, PAGE_SIZE);
303 rd_page = tmp;
304 rd_size = cmd->data_length;
353 305
354 table = rd_get_sg_table(dev, req->rd_page); 306 table = rd_get_sg_table(dev, rd_page);
355 if (!table) 307 if (!table)
356 return -EINVAL; 308 return -EINVAL;
357 309
358 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; 310 rd_sg = &table->sg_table[rd_page - table->page_start_offset];
359 311
360 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", 312 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
361 dev->rd_dev_id, read_rd ? "Read" : "Write", 313 dev->rd_dev_id,
362 task->task_lba, req->rd_size, req->rd_page, 314 data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
363 rd_offset); 315 cmd->t_task_lba, rd_size, rd_page, rd_offset);
364 316
365 src_len = PAGE_SIZE - rd_offset; 317 src_len = PAGE_SIZE - rd_offset;
366 sg_miter_start(&m, task->task_sg, task->task_sg_nents, 318 sg_miter_start(&m, sgl, sgl_nents,
367 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); 319 data_direction == DMA_FROM_DEVICE ?
368 while (req->rd_size) { 320 SG_MITER_TO_SG : SG_MITER_FROM_SG);
321 while (rd_size) {
369 u32 len; 322 u32 len;
370 void *rd_addr; 323 void *rd_addr;
371 324
@@ -375,13 +328,13 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
375 328
376 rd_addr = sg_virt(rd_sg) + rd_offset; 329 rd_addr = sg_virt(rd_sg) + rd_offset;
377 330
378 if (read_rd) 331 if (data_direction == DMA_FROM_DEVICE)
379 memcpy(m.addr, rd_addr, len); 332 memcpy(m.addr, rd_addr, len);
380 else 333 else
381 memcpy(rd_addr, m.addr, len); 334 memcpy(rd_addr, m.addr, len);
382 335
383 req->rd_size -= len; 336 rd_size -= len;
384 if (!req->rd_size) 337 if (!rd_size)
385 continue; 338 continue;
386 339
387 src_len -= len; 340 src_len -= len;
@@ -391,15 +344,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
391 } 344 }
392 345
393 /* rd page completed, next one please */ 346 /* rd page completed, next one please */
394 req->rd_page++; 347 rd_page++;
395 rd_offset = 0; 348 rd_offset = 0;
396 src_len = PAGE_SIZE; 349 src_len = PAGE_SIZE;
397 if (req->rd_page <= table->page_end_offset) { 350 if (rd_page <= table->page_end_offset) {
398 rd_sg++; 351 rd_sg++;
399 continue; 352 continue;
400 } 353 }
401 354
402 table = rd_get_sg_table(dev, req->rd_page); 355 table = rd_get_sg_table(dev, rd_page);
403 if (!table) { 356 if (!table) {
404 sg_miter_stop(&m); 357 sg_miter_stop(&m);
405 return -EINVAL; 358 return -EINVAL;
@@ -409,43 +362,11 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
409 rd_sg = table->sg_table; 362 rd_sg = table->sg_table;
410 } 363 }
411 sg_miter_stop(&m); 364 sg_miter_stop(&m);
412 return 0;
413}
414 365
415/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) 366 target_complete_cmd(cmd, SAM_STAT_GOOD);
416 *
417 *
418 */
419static int rd_MEMCPY_do_task(struct se_task *task)
420{
421 struct se_device *dev = task->task_se_cmd->se_dev;
422 struct rd_request *req = RD_REQ(task);
423 u64 tmp;
424 int ret;
425
426 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
427 req->rd_offset = do_div(tmp, PAGE_SIZE);
428 req->rd_page = tmp;
429 req->rd_size = task->task_size;
430
431 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
432 if (ret != 0)
433 return ret;
434
435 task->task_scsi_status = GOOD;
436 transport_complete_task(task, 1);
437 return 0; 367 return 0;
438} 368}
439 369
440/* rd_free_task(): (Part of se_subsystem_api_t template)
441 *
442 *
443 */
444static void rd_free_task(struct se_task *task)
445{
446 kfree(RD_REQ(task));
447}
448
449enum { 370enum {
450 Opt_rd_pages, Opt_err 371 Opt_rd_pages, Opt_err
451}; 372};
@@ -512,9 +433,8 @@ static ssize_t rd_show_configfs_dev_params(
512 char *b) 433 char *b)
513{ 434{
514 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 435 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
515 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", 436 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
516 rd_dev->rd_dev_id, (rd_dev->rd_direct) ? 437 rd_dev->rd_dev_id);
517 "rd_direct" : "rd_mcp");
518 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 438 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
519 " SG_table_count: %u\n", rd_dev->rd_page_count, 439 " SG_table_count: %u\n", rd_dev->rd_page_count,
520 PAGE_SIZE, rd_dev->sg_table_count); 440 PAGE_SIZE, rd_dev->sg_table_count);
@@ -545,12 +465,10 @@ static struct se_subsystem_api rd_mcp_template = {
545 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 465 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
546 .attach_hba = rd_attach_hba, 466 .attach_hba = rd_attach_hba,
547 .detach_hba = rd_detach_hba, 467 .detach_hba = rd_detach_hba,
548 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, 468 .allocate_virtdevice = rd_allocate_virtdevice,
549 .create_virtdevice = rd_MEMCPY_create_virtdevice, 469 .create_virtdevice = rd_create_virtdevice,
550 .free_device = rd_free_device, 470 .free_device = rd_free_device,
551 .alloc_task = rd_alloc_task, 471 .execute_cmd = rd_execute_cmd,
552 .do_task = rd_MEMCPY_do_task,
553 .free_task = rd_free_task,
554 .check_configfs_dev_params = rd_check_configfs_dev_params, 472 .check_configfs_dev_params = rd_check_configfs_dev_params,
555 .set_configfs_dev_params = rd_set_configfs_dev_params, 473 .set_configfs_dev_params = rd_set_configfs_dev_params,
556 .show_configfs_dev_params = rd_show_configfs_dev_params, 474 .show_configfs_dev_params = rd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 784e56a04100..21458125fe51 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -2,7 +2,6 @@
2#define TARGET_CORE_RD_H 2#define TARGET_CORE_RD_H
3 3
4#define RD_HBA_VERSION "v4.0" 4#define RD_HBA_VERSION "v4.0"
5#define RD_DR_VERSION "4.0"
6#define RD_MCP_VERSION "4.0" 5#define RD_MCP_VERSION "4.0"
7 6
8/* Largest piece of memory kmalloc can allocate */ 7/* Largest piece of memory kmalloc can allocate */
@@ -10,28 +9,11 @@
10#define RD_DEVICE_QUEUE_DEPTH 32 9#define RD_DEVICE_QUEUE_DEPTH 32
11#define RD_MAX_DEVICE_QUEUE_DEPTH 128 10#define RD_MAX_DEVICE_QUEUE_DEPTH 128
12#define RD_BLOCKSIZE 512 11#define RD_BLOCKSIZE 512
13#define RD_MAX_SECTORS 1024
14 12
15/* Used in target_core_init_configfs() for virtual LUN 0 access */ 13/* Used in target_core_init_configfs() for virtual LUN 0 access */
16int __init rd_module_init(void); 14int __init rd_module_init(void);
17void rd_module_exit(void); 15void rd_module_exit(void);
18 16
19#define RRF_EMULATE_CDB 0x01
20#define RRF_GOT_LBA 0x02
21
22struct rd_request {
23 struct se_task rd_task;
24
25 /* Offset from start of page */
26 u32 rd_offset;
27 /* Starting page in Ramdisk for request */
28 u32 rd_page;
29 /* Total number of pages needed for request */
30 u32 rd_page_count;
31 /* Scatterlist count */
32 u32 rd_size;
33} ____cacheline_aligned;
34
35struct rd_dev_sg_table { 17struct rd_dev_sg_table {
36 u32 page_start_offset; 18 u32 page_start_offset;
37 u32 page_end_offset; 19 u32 page_end_offset;
@@ -42,7 +24,6 @@ struct rd_dev_sg_table {
42#define RDF_HAS_PAGE_COUNT 0x01 24#define RDF_HAS_PAGE_COUNT 0x01
43 25
44struct rd_dev { 26struct rd_dev {
45 int rd_direct;
46 u32 rd_flags; 27 u32 rd_flags;
47 /* Unique Ramdisk Device ID in Ramdisk HBA */ 28 /* Unique Ramdisk Device ID in Ramdisk HBA */
48 u32 rd_dev_id; 29 u32 rd_dev_id;
@@ -50,7 +31,6 @@ struct rd_dev {
50 u32 rd_page_count; 31 u32 rd_page_count;
51 /* Number of SG tables in sg_table_array */ 32 /* Number of SG tables in sg_table_array */
52 u32 sg_table_count; 33 u32 sg_table_count;
53 u32 rd_queue_depth;
54 /* Array of rd_dev_sg_table_t containing scatterlists */ 34 /* Array of rd_dev_sg_table_t containing scatterlists */
55 struct rd_dev_sg_table *sg_table_array; 35 struct rd_dev_sg_table *sg_table_array;
56 /* Ramdisk HBA device is connected to */ 36 /* Ramdisk HBA device is connected to */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f015839aef89..84caf1bed9a3 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -244,7 +244,7 @@ static void core_tmr_drain_tmr_list(
244 } 244 }
245} 245}
246 246
247static void core_tmr_drain_task_list( 247static void core_tmr_drain_state_list(
248 struct se_device *dev, 248 struct se_device *dev,
249 struct se_cmd *prout_cmd, 249 struct se_cmd *prout_cmd,
250 struct se_node_acl *tmr_nacl, 250 struct se_node_acl *tmr_nacl,
@@ -252,12 +252,13 @@ static void core_tmr_drain_task_list(
252 struct list_head *preempt_and_abort_list) 252 struct list_head *preempt_and_abort_list)
253{ 253{
254 LIST_HEAD(drain_task_list); 254 LIST_HEAD(drain_task_list);
255 struct se_cmd *cmd; 255 struct se_cmd *cmd, *next;
256 struct se_task *task, *task_tmp;
257 unsigned long flags; 256 unsigned long flags;
258 int fe_count; 257 int fe_count;
258
259 /* 259 /*
260 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. 260 * Complete outstanding commands with TASK_ABORTED SAM status.
261 *
261 * This is following sam4r17, section 5.6 Aborting commands, Table 38 262 * This is following sam4r17, section 5.6 Aborting commands, Table 38
262 * for TMR LUN_RESET: 263 * for TMR LUN_RESET:
263 * 264 *
@@ -278,56 +279,43 @@ static void core_tmr_drain_task_list(
278 * in the Control Mode Page. 279 * in the Control Mode Page.
279 */ 280 */
280 spin_lock_irqsave(&dev->execute_task_lock, flags); 281 spin_lock_irqsave(&dev->execute_task_lock, flags);
281 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 282 list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
282 t_state_list) {
283 if (!task->task_se_cmd) {
284 pr_err("task->task_se_cmd is NULL!\n");
285 continue;
286 }
287 cmd = task->task_se_cmd;
288
289 /* 283 /*
290 * For PREEMPT_AND_ABORT usage, only process commands 284 * For PREEMPT_AND_ABORT usage, only process commands
291 * with a matching reservation key. 285 * with a matching reservation key.
292 */ 286 */
293 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 287 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
294 continue; 288 continue;
289
295 /* 290 /*
296 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 291 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
297 */ 292 */
298 if (prout_cmd == cmd) 293 if (prout_cmd == cmd)
299 continue; 294 continue;
300 295
301 list_move_tail(&task->t_state_list, &drain_task_list); 296 list_move_tail(&cmd->state_list, &drain_task_list);
302 task->t_state_active = false; 297 cmd->state_active = false;
303 /* 298
304 * Remove from task execute list before processing drain_task_list 299 if (!list_empty(&cmd->execute_list))
305 */ 300 __target_remove_from_execute_list(cmd);
306 if (!list_empty(&task->t_execute_list))
307 __transport_remove_task_from_execute_queue(task, dev);
308 } 301 }
309 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 302 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
310 303
311 while (!list_empty(&drain_task_list)) { 304 while (!list_empty(&drain_task_list)) {
312 task = list_entry(drain_task_list.next, struct se_task, t_state_list); 305 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
313 list_del(&task->t_state_list); 306 list_del(&cmd->state_list);
314 cmd = task->task_se_cmd;
315 307
316 pr_debug("LUN_RESET: %s cmd: %p task: %p" 308 pr_debug("LUN_RESET: %s cmd: %p"
317 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" 309 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
318 "cdb: 0x%02x\n", 310 "cdb: 0x%02x\n",
319 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 311 (preempt_and_abort_list) ? "Preempt" : "", cmd,
320 cmd->se_tfo->get_task_tag(cmd), 0, 312 cmd->se_tfo->get_task_tag(cmd), 0,
321 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 313 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
322 cmd->t_task_cdb[0]); 314 cmd->t_task_cdb[0]);
323 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 315 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
324 " t_task_cdbs: %d t_task_cdbs_left: %d" 316 " -- CMD_T_ACTIVE: %d"
325 " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
326 " CMD_T_STOP: %d CMD_T_SENT: %d\n", 317 " CMD_T_STOP: %d CMD_T_SENT: %d\n",
327 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 318 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
328 cmd->t_task_list_num,
329 atomic_read(&cmd->t_task_cdbs_left),
330 atomic_read(&cmd->t_task_cdbs_sent),
331 (cmd->transport_state & CMD_T_ACTIVE) != 0, 319 (cmd->transport_state & CMD_T_ACTIVE) != 0,
332 (cmd->transport_state & CMD_T_STOP) != 0, 320 (cmd->transport_state & CMD_T_STOP) != 0,
333 (cmd->transport_state & CMD_T_SENT) != 0); 321 (cmd->transport_state & CMD_T_SENT) != 0);
@@ -343,20 +331,13 @@ static void core_tmr_drain_task_list(
343 cancel_work_sync(&cmd->work); 331 cancel_work_sync(&cmd->work);
344 332
345 spin_lock_irqsave(&cmd->t_state_lock, flags); 333 spin_lock_irqsave(&cmd->t_state_lock, flags);
346 target_stop_task(task, &flags); 334 target_stop_cmd(cmd, &flags);
347 335
348 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
349 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
350 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
351 " t_task_cdbs_ex_left: %d\n", task, dev,
352 atomic_read(&cmd->t_task_cdbs_ex_left));
353 continue;
354 }
355 fe_count = atomic_read(&cmd->t_fe_count); 336 fe_count = atomic_read(&cmd->t_fe_count);
356 337
357 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 338 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
358 pr_debug("LUN_RESET: got CMD_T_ACTIVE for" 339 pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
359 " task: %p, t_fe_count: %d dev: %p\n", task, 340 " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
360 fe_count, dev); 341 fe_count, dev);
361 cmd->transport_state |= CMD_T_ABORTED; 342 cmd->transport_state |= CMD_T_ABORTED;
362 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 343 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -364,8 +345,8 @@ static void core_tmr_drain_task_list(
364 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 345 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
365 continue; 346 continue;
366 } 347 }
367 pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p," 348 pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
368 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 349 " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
369 cmd->transport_state |= CMD_T_ABORTED; 350 cmd->transport_state |= CMD_T_ABORTED;
370 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 351 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
371 352
@@ -384,13 +365,11 @@ static void core_tmr_drain_cmd_list(
384 struct se_queue_obj *qobj = &dev->dev_queue_obj; 365 struct se_queue_obj *qobj = &dev->dev_queue_obj;
385 struct se_cmd *cmd, *tcmd; 366 struct se_cmd *cmd, *tcmd;
386 unsigned long flags; 367 unsigned long flags;
368
387 /* 369 /*
388 * Release all commands remaining in the struct se_device cmd queue. 370 * Release all commands remaining in the per-device command queue.
389 * 371 *
390 * This follows the same logic as above for the struct se_device 372 * This follows the same logic as above for the state list.
391 * struct se_task state list, where commands are returned with
392 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
393 * reference, otherwise the struct se_cmd is released.
394 */ 373 */
395 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 374 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
396 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { 375 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
@@ -466,7 +445,7 @@ int core_tmr_lun_reset(
466 dev->transport->name, tas); 445 dev->transport->name, tas);
467 446
468 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 447 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
469 core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, 448 core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
470 preempt_and_abort_list); 449 preempt_and_abort_list);
471 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, 450 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
472 preempt_and_abort_list); 451 preempt_and_abort_list);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index e320ec24aa1b..8bd58e284185 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -153,10 +153,7 @@ void core_tpg_add_node_to_devs(
153 * demo_mode_write_protect is ON, or READ_ONLY; 153 * demo_mode_write_protect is ON, or READ_ONLY;
154 */ 154 */
155 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 155 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
156 if (dev->dev_flags & DF_READ_ONLY) 156 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
157 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
158 else
159 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
160 } else { 157 } else {
161 /* 158 /*
162 * Allow only optical drives to issue R/W in default RO 159 * Allow only optical drives to issue R/W in default RO
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 443704f84fd5..b05fdc0c05d3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -72,7 +72,6 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72static void transport_complete_task_attr(struct se_cmd *cmd); 72static void transport_complete_task_attr(struct se_cmd *cmd);
73static void transport_handle_queue_full(struct se_cmd *cmd, 73static void transport_handle_queue_full(struct se_cmd *cmd,
74 struct se_device *dev); 74 struct se_device *dev);
75static void transport_free_dev_tasks(struct se_cmd *cmd);
76static int transport_generic_get_mem(struct se_cmd *cmd); 75static int transport_generic_get_mem(struct se_cmd *cmd);
77static void transport_put_cmd(struct se_cmd *cmd); 76static void transport_put_cmd(struct se_cmd *cmd);
78static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 77static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
@@ -331,9 +330,9 @@ void target_get_session(struct se_session *se_sess)
331} 330}
332EXPORT_SYMBOL(target_get_session); 331EXPORT_SYMBOL(target_get_session);
333 332
334int target_put_session(struct se_session *se_sess) 333void target_put_session(struct se_session *se_sess)
335{ 334{
336 return kref_put(&se_sess->sess_kref, target_release_session); 335 kref_put(&se_sess->sess_kref, target_release_session);
337} 336}
338EXPORT_SYMBOL(target_put_session); 337EXPORT_SYMBOL(target_put_session);
339 338
@@ -444,31 +443,23 @@ EXPORT_SYMBOL(transport_deregister_session);
444/* 443/*
445 * Called with cmd->t_state_lock held. 444 * Called with cmd->t_state_lock held.
446 */ 445 */
447static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 446static void target_remove_from_state_list(struct se_cmd *cmd)
448{ 447{
449 struct se_device *dev = cmd->se_dev; 448 struct se_device *dev = cmd->se_dev;
450 struct se_task *task;
451 unsigned long flags; 449 unsigned long flags;
452 450
453 if (!dev) 451 if (!dev)
454 return; 452 return;
455 453
456 list_for_each_entry(task, &cmd->t_task_list, t_list) { 454 if (cmd->transport_state & CMD_T_BUSY)
457 if (task->task_flags & TF_ACTIVE) 455 return;
458 continue;
459
460 spin_lock_irqsave(&dev->execute_task_lock, flags);
461 if (task->t_state_active) {
462 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
463 cmd->se_tfo->get_task_tag(cmd), dev, task);
464 456
465 list_del(&task->t_state_list); 457 spin_lock_irqsave(&dev->execute_task_lock, flags);
466 atomic_dec(&cmd->t_task_cdbs_ex_left); 458 if (cmd->state_active) {
467 task->t_state_active = false; 459 list_del(&cmd->state_list);
468 } 460 cmd->state_active = false;
469 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
470 } 461 }
471 462 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
472} 463}
473 464
474/* transport_cmd_check_stop(): 465/* transport_cmd_check_stop():
@@ -497,7 +488,7 @@ static int transport_cmd_check_stop(
497 488
498 cmd->transport_state &= ~CMD_T_ACTIVE; 489 cmd->transport_state &= ~CMD_T_ACTIVE;
499 if (transport_off == 2) 490 if (transport_off == 2)
500 transport_all_task_dev_remove_state(cmd); 491 target_remove_from_state_list(cmd);
501 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 492 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
502 493
503 complete(&cmd->transport_lun_stop_comp); 494 complete(&cmd->transport_lun_stop_comp);
@@ -513,7 +504,7 @@ static int transport_cmd_check_stop(
513 cmd->se_tfo->get_task_tag(cmd)); 504 cmd->se_tfo->get_task_tag(cmd));
514 505
515 if (transport_off == 2) 506 if (transport_off == 2)
516 transport_all_task_dev_remove_state(cmd); 507 target_remove_from_state_list(cmd);
517 508
518 /* 509 /*
519 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff 510 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
@@ -529,7 +520,7 @@ static int transport_cmd_check_stop(
529 if (transport_off) { 520 if (transport_off) {
530 cmd->transport_state &= ~CMD_T_ACTIVE; 521 cmd->transport_state &= ~CMD_T_ACTIVE;
531 if (transport_off == 2) { 522 if (transport_off == 2) {
532 transport_all_task_dev_remove_state(cmd); 523 target_remove_from_state_list(cmd);
533 /* 524 /*
534 * Clear struct se_cmd->se_lun before the transport_off == 2 525 * Clear struct se_cmd->se_lun before the transport_off == 2
535 * handoff to fabric module. 526 * handoff to fabric module.
@@ -577,7 +568,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
577 spin_lock_irqsave(&cmd->t_state_lock, flags); 568 spin_lock_irqsave(&cmd->t_state_lock, flags);
578 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 569 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
579 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 570 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
580 transport_all_task_dev_remove_state(cmd); 571 target_remove_from_state_list(cmd);
581 } 572 }
582 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 573 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
583 574
@@ -669,29 +660,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
669 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 660 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
670} 661}
671 662
672/*
673 * Completion function used by TCM subsystem plugins (such as FILEIO)
674 * for queueing up response from struct se_subsystem_api->do_task()
675 */
676void transport_complete_sync_cache(struct se_cmd *cmd, int good)
677{
678 struct se_task *task = list_entry(cmd->t_task_list.next,
679 struct se_task, t_list);
680
681 if (good) {
682 cmd->scsi_status = SAM_STAT_GOOD;
683 task->task_scsi_status = GOOD;
684 } else {
685 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
686 task->task_se_cmd->scsi_sense_reason =
687 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
688
689 }
690
691 transport_complete_task(task, good);
692}
693EXPORT_SYMBOL(transport_complete_sync_cache);
694
695static void target_complete_failure_work(struct work_struct *work) 663static void target_complete_failure_work(struct work_struct *work)
696{ 664{
697 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 665 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -699,40 +667,32 @@ static void target_complete_failure_work(struct work_struct *work)
699 transport_generic_request_failure(cmd); 667 transport_generic_request_failure(cmd);
700} 668}
701 669
702/* transport_complete_task(): 670void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
703 *
704 * Called from interrupt and non interrupt context depending
705 * on the transport plugin.
706 */
707void transport_complete_task(struct se_task *task, int success)
708{ 671{
709 struct se_cmd *cmd = task->task_se_cmd;
710 struct se_device *dev = cmd->se_dev; 672 struct se_device *dev = cmd->se_dev;
673 int success = scsi_status == GOOD;
711 unsigned long flags; 674 unsigned long flags;
712 675
676 cmd->scsi_status = scsi_status;
677
678
713 spin_lock_irqsave(&cmd->t_state_lock, flags); 679 spin_lock_irqsave(&cmd->t_state_lock, flags);
714 task->task_flags &= ~TF_ACTIVE; 680 cmd->transport_state &= ~CMD_T_BUSY;
715 681
716 /*
717 * See if any sense data exists, if so set the TASK_SENSE flag.
718 * Also check for any other post completion work that needs to be
719 * done by the plugins.
720 */
721 if (dev && dev->transport->transport_complete) { 682 if (dev && dev->transport->transport_complete) {
722 if (dev->transport->transport_complete(task) != 0) { 683 if (dev->transport->transport_complete(cmd,
684 cmd->t_data_sg) != 0) {
723 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 685 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
724 task->task_flags |= TF_HAS_SENSE;
725 success = 1; 686 success = 1;
726 } 687 }
727 } 688 }
728 689
729 /* 690 /*
730 * See if we are waiting for outstanding struct se_task 691 * See if we are waiting to complete for an exception condition.
731 * to complete for an exception condition
732 */ 692 */
733 if (task->task_flags & TF_REQUEST_STOP) { 693 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
734 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 694 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
735 complete(&task->task_stop_comp); 695 complete(&cmd->task_stop_comp);
736 return; 696 return;
737 } 697 }
738 698
@@ -740,15 +700,6 @@ void transport_complete_task(struct se_task *task, int success)
740 cmd->transport_state |= CMD_T_FAILED; 700 cmd->transport_state |= CMD_T_FAILED;
741 701
742 /* 702 /*
743 * Decrement the outstanding t_task_cdbs_left count. The last
744 * struct se_task from struct se_cmd will complete itself into the
745 * device queue depending upon int success.
746 */
747 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
748 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
749 return;
750 }
751 /*
752 * Check for case where an explict ABORT_TASK has been received 703 * Check for case where an explict ABORT_TASK has been received
753 * and transport_wait_for_tasks() will be waiting for completion.. 704 * and transport_wait_for_tasks() will be waiting for completion..
754 */ 705 */
@@ -770,157 +721,77 @@ void transport_complete_task(struct se_task *task, int success)
770 721
771 queue_work(target_completion_wq, &cmd->work); 722 queue_work(target_completion_wq, &cmd->work);
772} 723}
773EXPORT_SYMBOL(transport_complete_task); 724EXPORT_SYMBOL(target_complete_cmd);
774
775/*
776 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
777 * struct se_task list are ready to be added to the active execution list
778 * struct se_device
779 725
780 * Called with se_dev_t->execute_task_lock called. 726static void target_add_to_state_list(struct se_cmd *cmd)
781 */
782static inline int transport_add_task_check_sam_attr(
783 struct se_task *task,
784 struct se_task *task_prev,
785 struct se_device *dev)
786{ 727{
787 /* 728 struct se_device *dev = cmd->se_dev;
788 * No SAM Task attribute emulation enabled, add to tail of 729 unsigned long flags;
789 * execution queue 730
790 */ 731 spin_lock_irqsave(&dev->execute_task_lock, flags);
791 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { 732 if (!cmd->state_active) {
792 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 733 list_add_tail(&cmd->state_list, &dev->state_list);
793 return 0; 734 cmd->state_active = true;
794 }
795 /*
796 * HEAD_OF_QUEUE attribute for received CDB, which means
797 * the first task that is associated with a struct se_cmd goes to
798 * head of the struct se_device->execute_task_list, and task_prev
799 * after that for each subsequent task
800 */
801 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
802 list_add(&task->t_execute_list,
803 (task_prev != NULL) ?
804 &task_prev->t_execute_list :
805 &dev->execute_task_list);
806
807 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
808 " in execution queue\n",
809 task->task_se_cmd->t_task_cdb[0]);
810 return 1;
811 } 735 }
812 /* 736 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
813 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
814 * transitioned from Dermant -> Active state, and are added to the end
815 * of the struct se_device->execute_task_list
816 */
817 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
818 return 0;
819} 737}
820 738
821/* __transport_add_task_to_execute_queue(): 739static void __target_add_to_execute_list(struct se_cmd *cmd)
822 *
823 * Called with se_dev_t->execute_task_lock called.
824 */
825static void __transport_add_task_to_execute_queue(
826 struct se_task *task,
827 struct se_task *task_prev,
828 struct se_device *dev)
829{ 740{
830 int head_of_queue; 741 struct se_device *dev = cmd->se_dev;
831 742 bool head_of_queue = false;
832 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
833 atomic_inc(&dev->execute_tasks);
834 743
835 if (task->t_state_active) 744 if (!list_empty(&cmd->execute_list))
836 return; 745 return;
837 /*
838 * Determine if this task needs to go to HEAD_OF_QUEUE for the
839 * state list as well. Running with SAM Task Attribute emulation
840 * will always return head_of_queue == 0 here
841 */
842 if (head_of_queue)
843 list_add(&task->t_state_list, (task_prev) ?
844 &task_prev->t_state_list :
845 &dev->state_task_list);
846 else
847 list_add_tail(&task->t_state_list, &dev->state_task_list);
848 746
849 task->t_state_active = true; 747 if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
748 cmd->sam_task_attr == MSG_HEAD_TAG)
749 head_of_queue = true;
850 750
851 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 751 if (head_of_queue)
852 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 752 list_add(&cmd->execute_list, &dev->execute_list);
853 task, dev); 753 else
854} 754 list_add_tail(&cmd->execute_list, &dev->execute_list);
855 755
856static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) 756 atomic_inc(&dev->execute_tasks);
857{
858 struct se_device *dev = cmd->se_dev;
859 struct se_task *task;
860 unsigned long flags;
861 757
862 spin_lock_irqsave(&cmd->t_state_lock, flags); 758 if (cmd->state_active)
863 list_for_each_entry(task, &cmd->t_task_list, t_list) { 759 return;
864 spin_lock(&dev->execute_task_lock);
865 if (!task->t_state_active) {
866 list_add_tail(&task->t_state_list,
867 &dev->state_task_list);
868 task->t_state_active = true;
869
870 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
871 task->task_se_cmd->se_tfo->get_task_tag(
872 task->task_se_cmd), task, dev);
873 }
874 spin_unlock(&dev->execute_task_lock);
875 }
876 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
877}
878 760
879static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) 761 if (head_of_queue)
880{ 762 list_add(&cmd->state_list, &dev->state_list);
881 struct se_device *dev = cmd->se_dev; 763 else
882 struct se_task *task, *task_prev = NULL; 764 list_add_tail(&cmd->state_list, &dev->state_list);
883 765
884 list_for_each_entry(task, &cmd->t_task_list, t_list) { 766 cmd->state_active = true;
885 if (!list_empty(&task->t_execute_list))
886 continue;
887 /*
888 * __transport_add_task_to_execute_queue() handles the
889 * SAM Task Attribute emulation if enabled
890 */
891 __transport_add_task_to_execute_queue(task, task_prev, dev);
892 task_prev = task;
893 }
894} 767}
895 768
896static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 769static void target_add_to_execute_list(struct se_cmd *cmd)
897{ 770{
898 unsigned long flags; 771 unsigned long flags;
899 struct se_device *dev = cmd->se_dev; 772 struct se_device *dev = cmd->se_dev;
900 773
901 spin_lock_irqsave(&dev->execute_task_lock, flags); 774 spin_lock_irqsave(&dev->execute_task_lock, flags);
902 __transport_add_tasks_from_cmd(cmd); 775 __target_add_to_execute_list(cmd);
903 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 776 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
904} 777}
905 778
906void __transport_remove_task_from_execute_queue(struct se_task *task, 779void __target_remove_from_execute_list(struct se_cmd *cmd)
907 struct se_device *dev)
908{ 780{
909 list_del_init(&task->t_execute_list); 781 list_del_init(&cmd->execute_list);
910 atomic_dec(&dev->execute_tasks); 782 atomic_dec(&cmd->se_dev->execute_tasks);
911} 783}
912 784
913static void transport_remove_task_from_execute_queue( 785static void target_remove_from_execute_list(struct se_cmd *cmd)
914 struct se_task *task,
915 struct se_device *dev)
916{ 786{
787 struct se_device *dev = cmd->se_dev;
917 unsigned long flags; 788 unsigned long flags;
918 789
919 if (WARN_ON(list_empty(&task->t_execute_list))) 790 if (WARN_ON(list_empty(&cmd->execute_list)))
920 return; 791 return;
921 792
922 spin_lock_irqsave(&dev->execute_task_lock, flags); 793 spin_lock_irqsave(&dev->execute_task_lock, flags);
923 __transport_remove_task_from_execute_queue(task, dev); 794 __target_remove_from_execute_list(cmd);
924 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 795 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
925} 796}
926 797
@@ -999,8 +870,9 @@ void transport_dump_dev_state(
999 870
1000 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", 871 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
1001 atomic_read(&dev->execute_tasks), dev->queue_depth); 872 atomic_read(&dev->execute_tasks), dev->queue_depth);
1002 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 873 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
1003 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 874 dev->se_sub_dev->se_dev_attrib.block_size,
875 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1004 *bl += sprintf(b + *bl, " "); 876 *bl += sprintf(b + *bl, " ");
1005} 877}
1006 878
@@ -1344,9 +1216,9 @@ struct se_device *transport_add_device_to_core_hba(
1344 INIT_LIST_HEAD(&dev->dev_list); 1216 INIT_LIST_HEAD(&dev->dev_list);
1345 INIT_LIST_HEAD(&dev->dev_sep_list); 1217 INIT_LIST_HEAD(&dev->dev_sep_list);
1346 INIT_LIST_HEAD(&dev->dev_tmr_list); 1218 INIT_LIST_HEAD(&dev->dev_tmr_list);
1347 INIT_LIST_HEAD(&dev->execute_task_list); 1219 INIT_LIST_HEAD(&dev->execute_list);
1348 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1220 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1349 INIT_LIST_HEAD(&dev->state_task_list); 1221 INIT_LIST_HEAD(&dev->state_list);
1350 INIT_LIST_HEAD(&dev->qf_cmd_list); 1222 INIT_LIST_HEAD(&dev->qf_cmd_list);
1351 spin_lock_init(&dev->execute_task_lock); 1223 spin_lock_init(&dev->execute_task_lock);
1352 spin_lock_init(&dev->delayed_cmd_lock); 1224 spin_lock_init(&dev->delayed_cmd_lock);
@@ -1457,6 +1329,7 @@ static inline void transport_generic_prepare_cdb(
1457 case VERIFY_16: /* SBC - VRProtect */ 1329 case VERIFY_16: /* SBC - VRProtect */
1458 case WRITE_VERIFY: /* SBC - VRProtect */ 1330 case WRITE_VERIFY: /* SBC - VRProtect */
1459 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1331 case WRITE_VERIFY_12: /* SBC - VRProtect */
1332 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1460 break; 1333 break;
1461 default: 1334 default:
1462 cdb[1] &= 0x1f; /* clear logical unit number */ 1335 cdb[1] &= 0x1f; /* clear logical unit number */
@@ -1464,29 +1337,6 @@ static inline void transport_generic_prepare_cdb(
1464 } 1337 }
1465} 1338}
1466 1339
1467static struct se_task *
1468transport_generic_get_task(struct se_cmd *cmd,
1469 enum dma_data_direction data_direction)
1470{
1471 struct se_task *task;
1472 struct se_device *dev = cmd->se_dev;
1473
1474 task = dev->transport->alloc_task(cmd->t_task_cdb);
1475 if (!task) {
1476 pr_err("Unable to allocate struct se_task\n");
1477 return NULL;
1478 }
1479
1480 INIT_LIST_HEAD(&task->t_list);
1481 INIT_LIST_HEAD(&task->t_execute_list);
1482 INIT_LIST_HEAD(&task->t_state_list);
1483 init_completion(&task->task_stop_comp);
1484 task->task_se_cmd = cmd;
1485 task->task_data_direction = data_direction;
1486
1487 return task;
1488}
1489
1490static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1340static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1491 1341
1492/* 1342/*
@@ -1507,11 +1357,13 @@ void transport_init_se_cmd(
1507 INIT_LIST_HEAD(&cmd->se_qf_node); 1357 INIT_LIST_HEAD(&cmd->se_qf_node);
1508 INIT_LIST_HEAD(&cmd->se_queue_node); 1358 INIT_LIST_HEAD(&cmd->se_queue_node);
1509 INIT_LIST_HEAD(&cmd->se_cmd_list); 1359 INIT_LIST_HEAD(&cmd->se_cmd_list);
1510 INIT_LIST_HEAD(&cmd->t_task_list); 1360 INIT_LIST_HEAD(&cmd->execute_list);
1361 INIT_LIST_HEAD(&cmd->state_list);
1511 init_completion(&cmd->transport_lun_fe_stop_comp); 1362 init_completion(&cmd->transport_lun_fe_stop_comp);
1512 init_completion(&cmd->transport_lun_stop_comp); 1363 init_completion(&cmd->transport_lun_stop_comp);
1513 init_completion(&cmd->t_transport_stop_comp); 1364 init_completion(&cmd->t_transport_stop_comp);
1514 init_completion(&cmd->cmd_wait_comp); 1365 init_completion(&cmd->cmd_wait_comp);
1366 init_completion(&cmd->task_stop_comp);
1515 spin_lock_init(&cmd->t_state_lock); 1367 spin_lock_init(&cmd->t_state_lock);
1516 cmd->transport_state = CMD_T_DEV_ACTIVE; 1368 cmd->transport_state = CMD_T_DEV_ACTIVE;
1517 1369
@@ -1521,6 +1373,8 @@ void transport_init_se_cmd(
1521 cmd->data_direction = data_direction; 1373 cmd->data_direction = data_direction;
1522 cmd->sam_task_attr = task_attr; 1374 cmd->sam_task_attr = task_attr;
1523 cmd->sense_buffer = sense_buffer; 1375 cmd->sense_buffer = sense_buffer;
1376
1377 cmd->state_active = false;
1524} 1378}
1525EXPORT_SYMBOL(transport_init_se_cmd); 1379EXPORT_SYMBOL(transport_init_se_cmd);
1526 1380
@@ -1550,11 +1404,11 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1550 return 0; 1404 return 0;
1551} 1405}
1552 1406
1553/* transport_generic_allocate_tasks(): 1407/* target_setup_cmd_from_cdb():
1554 * 1408 *
1555 * Called from fabric RX Thread. 1409 * Called from fabric RX Thread.
1556 */ 1410 */
1557int transport_generic_allocate_tasks( 1411int target_setup_cmd_from_cdb(
1558 struct se_cmd *cmd, 1412 struct se_cmd *cmd,
1559 unsigned char *cdb) 1413 unsigned char *cdb)
1560{ 1414{
@@ -1620,7 +1474,7 @@ int transport_generic_allocate_tasks(
1620 spin_unlock(&cmd->se_lun->lun_sep_lock); 1474 spin_unlock(&cmd->se_lun->lun_sep_lock);
1621 return 0; 1475 return 0;
1622} 1476}
1623EXPORT_SYMBOL(transport_generic_allocate_tasks); 1477EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1624 1478
1625/* 1479/*
1626 * Used by fabric module frontends to queue tasks directly. 1480 * Used by fabric module frontends to queue tasks directly.
@@ -1701,6 +1555,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1701 */ 1555 */
1702 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1556 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1703 data_length, data_dir, task_attr, sense); 1557 data_length, data_dir, task_attr, sense);
1558 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1559 se_cmd->unknown_data_length = 1;
1704 /* 1560 /*
1705 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1561 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1706 * se_sess->sess_cmd_list. A second kref_get here is necessary 1562 * se_sess->sess_cmd_list. A second kref_get here is necessary
@@ -1726,11 +1582,18 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1726 * Sanitize CDBs via transport_generic_cmd_sequencer() and 1582 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1727 * allocate the necessary tasks to complete the received CDB+data 1583 * allocate the necessary tasks to complete the received CDB+data
1728 */ 1584 */
1729 rc = transport_generic_allocate_tasks(se_cmd, cdb); 1585 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1730 if (rc != 0) { 1586 if (rc != 0) {
1731 transport_generic_request_failure(se_cmd); 1587 transport_generic_request_failure(se_cmd);
1732 return; 1588 return;
1733 } 1589 }
1590
1591 /*
1592 * Check if we need to delay processing because of ALUA
1593 * Active/NonOptimized primary access state..
1594 */
1595 core_alua_check_nonop_delay(se_cmd);
1596
1734 /* 1597 /*
1735 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend 1598 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1736 * for immediate execution of READs, otherwise wait for 1599 * for immediate execution of READs, otherwise wait for
@@ -1872,72 +1735,30 @@ int transport_generic_handle_tmr(
1872EXPORT_SYMBOL(transport_generic_handle_tmr); 1735EXPORT_SYMBOL(transport_generic_handle_tmr);
1873 1736
1874/* 1737/*
1875 * If the task is active, request it to be stopped and sleep until it 1738 * If the cmd is active, request it to be stopped and sleep until it
1876 * has completed. 1739 * has completed.
1877 */ 1740 */
1878bool target_stop_task(struct se_task *task, unsigned long *flags) 1741bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1879{ 1742{
1880 struct se_cmd *cmd = task->task_se_cmd;
1881 bool was_active = false; 1743 bool was_active = false;
1882 1744
1883 if (task->task_flags & TF_ACTIVE) { 1745 if (cmd->transport_state & CMD_T_BUSY) {
1884 task->task_flags |= TF_REQUEST_STOP; 1746 cmd->transport_state |= CMD_T_REQUEST_STOP;
1885 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1747 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1886 1748
1887 pr_debug("Task %p waiting to complete\n", task); 1749 pr_debug("cmd %p waiting to complete\n", cmd);
1888 wait_for_completion(&task->task_stop_comp); 1750 wait_for_completion(&cmd->task_stop_comp);
1889 pr_debug("Task %p stopped successfully\n", task); 1751 pr_debug("cmd %p stopped successfully\n", cmd);
1890 1752
1891 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1753 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1892 atomic_dec(&cmd->t_task_cdbs_left); 1754 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1893 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); 1755 cmd->transport_state &= ~CMD_T_BUSY;
1894 was_active = true; 1756 was_active = true;
1895 } 1757 }
1896 1758
1897 return was_active; 1759 return was_active;
1898} 1760}
1899 1761
1900static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1901{
1902 struct se_task *task, *task_tmp;
1903 unsigned long flags;
1904 int ret = 0;
1905
1906 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1907 cmd->se_tfo->get_task_tag(cmd));
1908
1909 /*
1910 * No tasks remain in the execution queue
1911 */
1912 spin_lock_irqsave(&cmd->t_state_lock, flags);
1913 list_for_each_entry_safe(task, task_tmp,
1914 &cmd->t_task_list, t_list) {
1915 pr_debug("Processing task %p\n", task);
1916 /*
1917 * If the struct se_task has not been sent and is not active,
1918 * remove the struct se_task from the execution queue.
1919 */
1920 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1921 spin_unlock_irqrestore(&cmd->t_state_lock,
1922 flags);
1923 transport_remove_task_from_execute_queue(task,
1924 cmd->se_dev);
1925
1926 pr_debug("Task %p removed from execute queue\n", task);
1927 spin_lock_irqsave(&cmd->t_state_lock, flags);
1928 continue;
1929 }
1930
1931 if (!target_stop_task(task, &flags)) {
1932 pr_debug("Task %p - did nothing\n", task);
1933 ret++;
1934 }
1935 }
1936 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1937
1938 return ret;
1939}
1940
1941/* 1762/*
1942 * Handle SAM-esque emulation for generic transport request failures. 1763 * Handle SAM-esque emulation for generic transport request failures.
1943 */ 1764 */
@@ -1951,13 +1772,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
1951 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", 1772 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1952 cmd->se_tfo->get_cmd_state(cmd), 1773 cmd->se_tfo->get_cmd_state(cmd),
1953 cmd->t_state, cmd->scsi_sense_reason); 1774 cmd->t_state, cmd->scsi_sense_reason);
1954 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1775 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1955 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1956 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1957 cmd->t_task_list_num,
1958 atomic_read(&cmd->t_task_cdbs_left),
1959 atomic_read(&cmd->t_task_cdbs_sent),
1960 atomic_read(&cmd->t_task_cdbs_ex_left),
1961 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1776 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1962 (cmd->transport_state & CMD_T_STOP) != 0, 1777 (cmd->transport_state & CMD_T_STOP) != 0,
1963 (cmd->transport_state & CMD_T_SENT) != 0); 1778 (cmd->transport_state & CMD_T_SENT) != 0);
@@ -2156,7 +1971,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2156 * Called from fabric module context in transport_generic_new_cmd() and 1971 * Called from fabric module context in transport_generic_new_cmd() and
2157 * transport_generic_process_write() 1972 * transport_generic_process_write()
2158 */ 1973 */
2159static int transport_execute_tasks(struct se_cmd *cmd) 1974static void transport_execute_tasks(struct se_cmd *cmd)
2160{ 1975{
2161 int add_tasks; 1976 int add_tasks;
2162 struct se_device *se_dev = cmd->se_dev; 1977 struct se_device *se_dev = cmd->se_dev;
@@ -2170,71 +1985,52 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2170 * attribute for the tasks of the received struct se_cmd CDB 1985 * attribute for the tasks of the received struct se_cmd CDB
2171 */ 1986 */
2172 add_tasks = transport_execute_task_attr(cmd); 1987 add_tasks = transport_execute_task_attr(cmd);
2173 if (!add_tasks) 1988 if (add_tasks) {
2174 goto execute_tasks; 1989 __transport_execute_tasks(se_dev, cmd);
2175 /* 1990 return;
2176 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() 1991 }
2177 * adds associated se_tasks while holding dev->execute_task_lock
2178 * before I/O dispath to avoid a double spinlock access.
2179 */
2180 __transport_execute_tasks(se_dev, cmd);
2181 return 0;
2182 } 1992 }
2183
2184execute_tasks:
2185 __transport_execute_tasks(se_dev, NULL); 1993 __transport_execute_tasks(se_dev, NULL);
2186 return 0;
2187} 1994}
2188 1995
2189/*
2190 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2191 * from struct se_device->execute_task_list and
2192 *
2193 * Called from transport_processing_thread()
2194 */
2195static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) 1996static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2196{ 1997{
2197 int error; 1998 int error;
2198 struct se_cmd *cmd = NULL; 1999 struct se_cmd *cmd = NULL;
2199 struct se_task *task = NULL;
2200 unsigned long flags; 2000 unsigned long flags;
2201 2001
2202check_depth: 2002check_depth:
2203 spin_lock_irq(&dev->execute_task_lock); 2003 spin_lock_irq(&dev->execute_task_lock);
2204 if (new_cmd != NULL) 2004 if (new_cmd != NULL)
2205 __transport_add_tasks_from_cmd(new_cmd); 2005 __target_add_to_execute_list(new_cmd);
2206 2006
2207 if (list_empty(&dev->execute_task_list)) { 2007 if (list_empty(&dev->execute_list)) {
2208 spin_unlock_irq(&dev->execute_task_lock); 2008 spin_unlock_irq(&dev->execute_task_lock);
2209 return 0; 2009 return 0;
2210 } 2010 }
2211 task = list_first_entry(&dev->execute_task_list, 2011 cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
2212 struct se_task, t_execute_list); 2012 __target_remove_from_execute_list(cmd);
2213 __transport_remove_task_from_execute_queue(task, dev);
2214 spin_unlock_irq(&dev->execute_task_lock); 2013 spin_unlock_irq(&dev->execute_task_lock);
2215 2014
2216 cmd = task->task_se_cmd;
2217 spin_lock_irqsave(&cmd->t_state_lock, flags); 2015 spin_lock_irqsave(&cmd->t_state_lock, flags);
2218 task->task_flags |= (TF_ACTIVE | TF_SENT); 2016 cmd->transport_state |= CMD_T_BUSY;
2219 atomic_inc(&cmd->t_task_cdbs_sent); 2017 cmd->transport_state |= CMD_T_SENT;
2220
2221 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2222 cmd->t_task_list_num)
2223 cmd->transport_state |= CMD_T_SENT;
2224 2018
2225 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2019 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2226 2020
2227 if (cmd->execute_task) 2021 if (cmd->execute_cmd)
2228 error = cmd->execute_task(task); 2022 error = cmd->execute_cmd(cmd);
2229 else 2023 else {
2230 error = dev->transport->do_task(task); 2024 error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
2025 cmd->t_data_nents, cmd->data_direction);
2026 }
2027
2231 if (error != 0) { 2028 if (error != 0) {
2232 spin_lock_irqsave(&cmd->t_state_lock, flags); 2029 spin_lock_irqsave(&cmd->t_state_lock, flags);
2233 task->task_flags &= ~TF_ACTIVE; 2030 cmd->transport_state &= ~CMD_T_BUSY;
2234 cmd->transport_state &= ~CMD_T_SENT; 2031 cmd->transport_state &= ~CMD_T_SENT;
2235 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2032 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2236 2033
2237 transport_stop_tasks_for_cmd(cmd);
2238 transport_generic_request_failure(cmd); 2034 transport_generic_request_failure(cmd);
2239 } 2035 }
2240 2036
@@ -2392,12 +2188,12 @@ static inline u32 transport_get_size(
2392 } else /* bytes */ 2188 } else /* bytes */
2393 return sectors; 2189 return sectors;
2394 } 2190 }
2395#if 0 2191
2396 pr_debug("Returning block_size: %u, sectors: %u == %u for" 2192 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2397 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2193 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
2398 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2194 sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2399 dev->transport->name); 2195 dev->transport->name);
2400#endif 2196
2401 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2197 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2402} 2198}
2403 2199
@@ -2462,7 +2258,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2462{ 2258{
2463 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; 2259 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2464 struct se_device *dev = cmd->se_dev; 2260 struct se_device *dev = cmd->se_dev;
2465 struct se_task *task = NULL, *task_tmp;
2466 unsigned long flags; 2261 unsigned long flags;
2467 u32 offset = 0; 2262 u32 offset = 0;
2468 2263
@@ -2477,44 +2272,37 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2477 return 0; 2272 return 0;
2478 } 2273 }
2479 2274
2480 list_for_each_entry_safe(task, task_tmp, 2275 if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2481 &cmd->t_task_list, t_list) { 2276 goto out;
2482 if (!(task->task_flags & TF_HAS_SENSE))
2483 continue;
2484
2485 if (!dev->transport->get_sense_buffer) {
2486 pr_err("dev->transport->get_sense_buffer"
2487 " is NULL\n");
2488 continue;
2489 }
2490
2491 sense_buffer = dev->transport->get_sense_buffer(task);
2492 if (!sense_buffer) {
2493 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2494 " sense buffer for task with sense\n",
2495 cmd->se_tfo->get_task_tag(cmd), task);
2496 continue;
2497 }
2498 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2499 2277
2500 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2278 if (!dev->transport->get_sense_buffer) {
2501 TRANSPORT_SENSE_BUFFER); 2279 pr_err("dev->transport->get_sense_buffer is NULL\n");
2280 goto out;
2281 }
2502 2282
2503 memcpy(&buffer[offset], sense_buffer, 2283 sense_buffer = dev->transport->get_sense_buffer(cmd);
2504 TRANSPORT_SENSE_BUFFER); 2284 if (!sense_buffer) {
2505 cmd->scsi_status = task->task_scsi_status; 2285 pr_err("ITT 0x%08x cmd %p: Unable to locate"
2506 /* Automatically padded */ 2286 " sense buffer for task with sense\n",
2507 cmd->scsi_sense_length = 2287 cmd->se_tfo->get_task_tag(cmd), cmd);
2508 (TRANSPORT_SENSE_BUFFER + offset); 2288 goto out;
2509
2510 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2511 " and sense\n",
2512 dev->se_hba->hba_id, dev->transport->name,
2513 cmd->scsi_status);
2514 return 0;
2515 } 2289 }
2290
2516 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2291 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2517 2292
2293 offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
2294
2295 memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
2296
2297 /* Automatically padded */
2298 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
2299
2300 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
2301 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
2302 return 0;
2303
2304out:
2305 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2518 return -1; 2306 return -1;
2519} 2307}
2520 2308
@@ -2581,7 +2369,7 @@ static int target_check_write_same_discard(unsigned char *flags, struct se_devic
2581 * Generic Command Sequencer that should work for most DAS transport 2369 * Generic Command Sequencer that should work for most DAS transport
2582 * drivers. 2370 * drivers.
2583 * 2371 *
2584 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD 2372 * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
2585 * RX Thread. 2373 * RX Thread.
2586 * 2374 *
2587 * FIXME: Need to support other SCSI OPCODES where as well. 2375 * FIXME: Need to support other SCSI OPCODES where as well.
@@ -2615,11 +2403,10 @@ static int transport_generic_cmd_sequencer(
2615 * by the ALUA primary or secondary access state.. 2403 * by the ALUA primary or secondary access state..
2616 */ 2404 */
2617 if (ret > 0) { 2405 if (ret > 0) {
2618#if 0
2619 pr_debug("[%s]: ALUA TG Port not available," 2406 pr_debug("[%s]: ALUA TG Port not available,"
2620 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2407 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2621 cmd->se_tfo->get_fabric_name(), alua_ascq); 2408 cmd->se_tfo->get_fabric_name(), alua_ascq);
2622#endif 2409
2623 transport_set_sense_codes(cmd, 0x04, alua_ascq); 2410 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2624 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2411 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2625 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; 2412 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
@@ -2695,6 +2482,7 @@ static int transport_generic_cmd_sequencer(
2695 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2482 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2696 break; 2483 break;
2697 case WRITE_10: 2484 case WRITE_10:
2485 case WRITE_VERIFY:
2698 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2486 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2699 if (sector_ret) 2487 if (sector_ret)
2700 goto out_unsupported_cdb; 2488 goto out_unsupported_cdb;
@@ -2796,7 +2584,7 @@ static int transport_generic_cmd_sequencer(
2796 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2584 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2797 goto out_unsupported_cdb; 2585 goto out_unsupported_cdb;
2798 if (!passthrough) 2586 if (!passthrough)
2799 cmd->execute_task = target_emulate_write_same; 2587 cmd->execute_cmd = target_emulate_write_same;
2800 break; 2588 break;
2801 default: 2589 default:
2802 pr_err("VARIABLE_LENGTH_CMD service action" 2590 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -2810,9 +2598,9 @@ static int transport_generic_cmd_sequencer(
2810 /* 2598 /*
2811 * Check for emulated MI_REPORT_TARGET_PGS. 2599 * Check for emulated MI_REPORT_TARGET_PGS.
2812 */ 2600 */
2813 if (cdb[1] == MI_REPORT_TARGET_PGS && 2601 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
2814 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2602 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2815 cmd->execute_task = 2603 cmd->execute_cmd =
2816 target_emulate_report_target_port_groups; 2604 target_emulate_report_target_port_groups;
2817 } 2605 }
2818 size = (cdb[6] << 24) | (cdb[7] << 16) | 2606 size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -2835,13 +2623,13 @@ static int transport_generic_cmd_sequencer(
2835 size = cdb[4]; 2623 size = cdb[4];
2836 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2624 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2837 if (!passthrough) 2625 if (!passthrough)
2838 cmd->execute_task = target_emulate_modesense; 2626 cmd->execute_cmd = target_emulate_modesense;
2839 break; 2627 break;
2840 case MODE_SENSE_10: 2628 case MODE_SENSE_10:
2841 size = (cdb[7] << 8) + cdb[8]; 2629 size = (cdb[7] << 8) + cdb[8];
2842 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2630 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2843 if (!passthrough) 2631 if (!passthrough)
2844 cmd->execute_task = target_emulate_modesense; 2632 cmd->execute_cmd = target_emulate_modesense;
2845 break; 2633 break;
2846 case GPCMD_READ_BUFFER_CAPACITY: 2634 case GPCMD_READ_BUFFER_CAPACITY:
2847 case GPCMD_SEND_OPC: 2635 case GPCMD_SEND_OPC:
@@ -2863,13 +2651,13 @@ static int transport_generic_cmd_sequencer(
2863 break; 2651 break;
2864 case PERSISTENT_RESERVE_IN: 2652 case PERSISTENT_RESERVE_IN:
2865 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2653 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2866 cmd->execute_task = target_scsi3_emulate_pr_in; 2654 cmd->execute_cmd = target_scsi3_emulate_pr_in;
2867 size = (cdb[7] << 8) + cdb[8]; 2655 size = (cdb[7] << 8) + cdb[8];
2868 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2656 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2869 break; 2657 break;
2870 case PERSISTENT_RESERVE_OUT: 2658 case PERSISTENT_RESERVE_OUT:
2871 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2659 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2872 cmd->execute_task = target_scsi3_emulate_pr_out; 2660 cmd->execute_cmd = target_scsi3_emulate_pr_out;
2873 size = (cdb[7] << 8) + cdb[8]; 2661 size = (cdb[7] << 8) + cdb[8];
2874 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2662 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2875 break; 2663 break;
@@ -2890,7 +2678,7 @@ static int transport_generic_cmd_sequencer(
2890 */ 2678 */
2891 if (cdb[1] == MO_SET_TARGET_PGS && 2679 if (cdb[1] == MO_SET_TARGET_PGS &&
2892 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2680 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2893 cmd->execute_task = 2681 cmd->execute_cmd =
2894 target_emulate_set_target_port_groups; 2682 target_emulate_set_target_port_groups;
2895 } 2683 }
2896 2684
@@ -2912,7 +2700,7 @@ static int transport_generic_cmd_sequencer(
2912 cmd->sam_task_attr = MSG_HEAD_TAG; 2700 cmd->sam_task_attr = MSG_HEAD_TAG;
2913 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2701 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2914 if (!passthrough) 2702 if (!passthrough)
2915 cmd->execute_task = target_emulate_inquiry; 2703 cmd->execute_cmd = target_emulate_inquiry;
2916 break; 2704 break;
2917 case READ_BUFFER: 2705 case READ_BUFFER:
2918 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2706 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
@@ -2922,7 +2710,7 @@ static int transport_generic_cmd_sequencer(
2922 size = READ_CAP_LEN; 2710 size = READ_CAP_LEN;
2923 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2711 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2924 if (!passthrough) 2712 if (!passthrough)
2925 cmd->execute_task = target_emulate_readcapacity; 2713 cmd->execute_cmd = target_emulate_readcapacity;
2926 break; 2714 break;
2927 case READ_MEDIA_SERIAL_NUMBER: 2715 case READ_MEDIA_SERIAL_NUMBER:
2928 case SECURITY_PROTOCOL_IN: 2716 case SECURITY_PROTOCOL_IN:
@@ -2934,7 +2722,7 @@ static int transport_generic_cmd_sequencer(
2934 switch (cmd->t_task_cdb[1] & 0x1f) { 2722 switch (cmd->t_task_cdb[1] & 0x1f) {
2935 case SAI_READ_CAPACITY_16: 2723 case SAI_READ_CAPACITY_16:
2936 if (!passthrough) 2724 if (!passthrough)
2937 cmd->execute_task = 2725 cmd->execute_cmd =
2938 target_emulate_readcapacity_16; 2726 target_emulate_readcapacity_16;
2939 break; 2727 break;
2940 default: 2728 default:
@@ -2977,7 +2765,7 @@ static int transport_generic_cmd_sequencer(
2977 size = cdb[4]; 2765 size = cdb[4];
2978 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2766 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2979 if (!passthrough) 2767 if (!passthrough)
2980 cmd->execute_task = target_emulate_request_sense; 2768 cmd->execute_cmd = target_emulate_request_sense;
2981 break; 2769 break;
2982 case READ_ELEMENT_STATUS: 2770 case READ_ELEMENT_STATUS:
2983 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2771 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
@@ -3006,7 +2794,7 @@ static int transport_generic_cmd_sequencer(
3006 * emulation disabled. 2794 * emulation disabled.
3007 */ 2795 */
3008 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2796 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
3009 cmd->execute_task = target_scsi2_reservation_reserve; 2797 cmd->execute_cmd = target_scsi2_reservation_reserve;
3010 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2798 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3011 break; 2799 break;
3012 case RELEASE: 2800 case RELEASE:
@@ -3021,7 +2809,7 @@ static int transport_generic_cmd_sequencer(
3021 size = cmd->data_length; 2809 size = cmd->data_length;
3022 2810
3023 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2811 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
3024 cmd->execute_task = target_scsi2_reservation_release; 2812 cmd->execute_cmd = target_scsi2_reservation_release;
3025 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2813 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3026 break; 2814 break;
3027 case SYNCHRONIZE_CACHE: 2815 case SYNCHRONIZE_CACHE:
@@ -3053,13 +2841,13 @@ static int transport_generic_cmd_sequencer(
3053 if (transport_cmd_get_valid_sectors(cmd) < 0) 2841 if (transport_cmd_get_valid_sectors(cmd) < 0)
3054 goto out_invalid_cdb_field; 2842 goto out_invalid_cdb_field;
3055 } 2843 }
3056 cmd->execute_task = target_emulate_synchronize_cache; 2844 cmd->execute_cmd = target_emulate_synchronize_cache;
3057 break; 2845 break;
3058 case UNMAP: 2846 case UNMAP:
3059 size = get_unaligned_be16(&cdb[7]); 2847 size = get_unaligned_be16(&cdb[7]);
3060 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2848 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3061 if (!passthrough) 2849 if (!passthrough)
3062 cmd->execute_task = target_emulate_unmap; 2850 cmd->execute_cmd = target_emulate_unmap;
3063 break; 2851 break;
3064 case WRITE_SAME_16: 2852 case WRITE_SAME_16:
3065 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2853 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
@@ -3079,7 +2867,7 @@ static int transport_generic_cmd_sequencer(
3079 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2867 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3080 goto out_unsupported_cdb; 2868 goto out_unsupported_cdb;
3081 if (!passthrough) 2869 if (!passthrough)
3082 cmd->execute_task = target_emulate_write_same; 2870 cmd->execute_cmd = target_emulate_write_same;
3083 break; 2871 break;
3084 case WRITE_SAME: 2872 case WRITE_SAME:
3085 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2873 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
@@ -3102,7 +2890,7 @@ static int transport_generic_cmd_sequencer(
3102 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2890 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3103 goto out_unsupported_cdb; 2891 goto out_unsupported_cdb;
3104 if (!passthrough) 2892 if (!passthrough)
3105 cmd->execute_task = target_emulate_write_same; 2893 cmd->execute_cmd = target_emulate_write_same;
3106 break; 2894 break;
3107 case ALLOW_MEDIUM_REMOVAL: 2895 case ALLOW_MEDIUM_REMOVAL:
3108 case ERASE: 2896 case ERASE:
@@ -3115,7 +2903,7 @@ static int transport_generic_cmd_sequencer(
3115 case WRITE_FILEMARKS: 2903 case WRITE_FILEMARKS:
3116 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2904 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3117 if (!passthrough) 2905 if (!passthrough)
3118 cmd->execute_task = target_emulate_noop; 2906 cmd->execute_cmd = target_emulate_noop;
3119 break; 2907 break;
3120 case GPCMD_CLOSE_TRACK: 2908 case GPCMD_CLOSE_TRACK:
3121 case INITIALIZE_ELEMENT_STATUS: 2909 case INITIALIZE_ELEMENT_STATUS:
@@ -3125,7 +2913,7 @@ static int transport_generic_cmd_sequencer(
3125 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2913 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3126 break; 2914 break;
3127 case REPORT_LUNS: 2915 case REPORT_LUNS:
3128 cmd->execute_task = target_report_luns; 2916 cmd->execute_cmd = target_report_luns;
3129 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2917 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3130 /* 2918 /*
3131 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 2919 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
@@ -3135,6 +2923,42 @@ static int transport_generic_cmd_sequencer(
3135 cmd->sam_task_attr = MSG_HEAD_TAG; 2923 cmd->sam_task_attr = MSG_HEAD_TAG;
3136 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2924 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3137 break; 2925 break;
2926 case GET_EVENT_STATUS_NOTIFICATION:
2927 size = (cdb[7] << 8) | cdb[8];
2928 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2929 break;
2930 case ATA_16:
2931 /* Only support ATA passthrough to pSCSI backends.. */
2932 if (!passthrough)
2933 goto out_unsupported_cdb;
2934
2935 /* T_LENGTH */
2936 switch (cdb[2] & 0x3) {
2937 case 0x0:
2938 sectors = 0;
2939 break;
2940 case 0x1:
2941 sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
2942 break;
2943 case 0x2:
2944 sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
2945 break;
2946 case 0x3:
2947 pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
2948 goto out_invalid_cdb_field;
2949 }
2950
2951 /* BYTE_BLOCK */
2952 if (cdb[2] & 0x4) {
2953 /* BLOCK T_TYPE: 512 or sector */
2954 size = sectors * ((cdb[2] & 0x10) ?
2955 dev->se_sub_dev->se_dev_attrib.block_size : 512);
2956 } else {
2957 /* BYTE */
2958 size = sectors;
2959 }
2960 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2961 break;
3138 default: 2962 default:
3139 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 2963 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3140 " 0x%02x, sending CHECK_CONDITION.\n", 2964 " 0x%02x, sending CHECK_CONDITION.\n",
@@ -3142,6 +2966,9 @@ static int transport_generic_cmd_sequencer(
3142 goto out_unsupported_cdb; 2966 goto out_unsupported_cdb;
3143 } 2967 }
3144 2968
2969 if (cmd->unknown_data_length)
2970 cmd->data_length = size;
2971
3145 if (size != cmd->data_length) { 2972 if (size != cmd->data_length) {
3146 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 2973 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3147 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 2974 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
@@ -3177,15 +3004,25 @@ static int transport_generic_cmd_sequencer(
3177 cmd->data_length = size; 3004 cmd->data_length = size;
3178 } 3005 }
3179 3006
3180 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && 3007 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3181 sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { 3008 if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
3182 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n", 3009 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
3183 cdb[0], sectors); 3010 " big sectors %u exceeds fabric_max_sectors:"
3184 goto out_invalid_cdb_field; 3011 " %u\n", cdb[0], sectors,
3012 su_dev->se_dev_attrib.fabric_max_sectors);
3013 goto out_invalid_cdb_field;
3014 }
3015 if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
3016 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
3017 " big sectors %u exceeds backend hw_max_sectors:"
3018 " %u\n", cdb[0], sectors,
3019 su_dev->se_dev_attrib.hw_max_sectors);
3020 goto out_invalid_cdb_field;
3021 }
3185 } 3022 }
3186 3023
3187 /* reject any command that we don't have a handler for */ 3024 /* reject any command that we don't have a handler for */
3188 if (!(passthrough || cmd->execute_task || 3025 if (!(passthrough || cmd->execute_cmd ||
3189 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3026 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3190 goto out_unsupported_cdb; 3027 goto out_unsupported_cdb;
3191 3028
@@ -3250,7 +3087,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3250 cmd_p->t_task_cdb[0], 3087 cmd_p->t_task_cdb[0],
3251 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3088 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3252 3089
3253 transport_add_tasks_from_cmd(cmd_p); 3090 target_add_to_execute_list(cmd_p);
3254 new_active_tasks++; 3091 new_active_tasks++;
3255 3092
3256 spin_lock(&dev->delayed_cmd_lock); 3093 spin_lock(&dev->delayed_cmd_lock);
@@ -3346,10 +3183,6 @@ static void target_complete_ok_work(struct work_struct *work)
3346 if (transport_get_sense_data(cmd) < 0) 3183 if (transport_get_sense_data(cmd) < 0)
3347 reason = TCM_NON_EXISTENT_LUN; 3184 reason = TCM_NON_EXISTENT_LUN;
3348 3185
3349 /*
3350 * Only set when an struct se_task->task_scsi_status returned
3351 * a non GOOD status.
3352 */
3353 if (cmd->scsi_status) { 3186 if (cmd->scsi_status) {
3354 ret = transport_send_check_condition_and_sense( 3187 ret = transport_send_check_condition_and_sense(
3355 cmd, reason, 1); 3188 cmd, reason, 1);
@@ -3424,33 +3257,6 @@ queue_full:
3424 transport_handle_queue_full(cmd, cmd->se_dev); 3257 transport_handle_queue_full(cmd, cmd->se_dev);
3425} 3258}
3426 3259
3427static void transport_free_dev_tasks(struct se_cmd *cmd)
3428{
3429 struct se_task *task, *task_tmp;
3430 unsigned long flags;
3431 LIST_HEAD(dispose_list);
3432
3433 spin_lock_irqsave(&cmd->t_state_lock, flags);
3434 list_for_each_entry_safe(task, task_tmp,
3435 &cmd->t_task_list, t_list) {
3436 if (!(task->task_flags & TF_ACTIVE))
3437 list_move_tail(&task->t_list, &dispose_list);
3438 }
3439 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3440
3441 while (!list_empty(&dispose_list)) {
3442 task = list_first_entry(&dispose_list, struct se_task, t_list);
3443
3444 if (task->task_sg != cmd->t_data_sg &&
3445 task->task_sg != cmd->t_bidi_data_sg)
3446 kfree(task->task_sg);
3447
3448 list_del(&task->t_list);
3449
3450 cmd->se_dev->transport->free_task(task);
3451 }
3452}
3453
3454static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 3260static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3455{ 3261{
3456 struct scatterlist *sg; 3262 struct scatterlist *sg;
@@ -3511,7 +3317,6 @@ static void transport_release_cmd(struct se_cmd *cmd)
3511static void transport_put_cmd(struct se_cmd *cmd) 3317static void transport_put_cmd(struct se_cmd *cmd)
3512{ 3318{
3513 unsigned long flags; 3319 unsigned long flags;
3514 int free_tasks = 0;
3515 3320
3516 spin_lock_irqsave(&cmd->t_state_lock, flags); 3321 spin_lock_irqsave(&cmd->t_state_lock, flags);
3517 if (atomic_read(&cmd->t_fe_count)) { 3322 if (atomic_read(&cmd->t_fe_count)) {
@@ -3519,21 +3324,12 @@ static void transport_put_cmd(struct se_cmd *cmd)
3519 goto out_busy; 3324 goto out_busy;
3520 } 3325 }
3521 3326
3522 if (atomic_read(&cmd->t_se_count)) {
3523 if (!atomic_dec_and_test(&cmd->t_se_count))
3524 goto out_busy;
3525 }
3526
3527 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 3327 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3528 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 3328 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3529 transport_all_task_dev_remove_state(cmd); 3329 target_remove_from_state_list(cmd);
3530 free_tasks = 1;
3531 } 3330 }
3532 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3331 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3533 3332
3534 if (free_tasks != 0)
3535 transport_free_dev_tasks(cmd);
3536
3537 transport_free_pages(cmd); 3333 transport_free_pages(cmd);
3538 transport_release_cmd(cmd); 3334 transport_release_cmd(cmd);
3539 return; 3335 return;
@@ -3683,245 +3479,14 @@ out:
3683 return -ENOMEM; 3479 return -ENOMEM;
3684} 3480}
3685 3481
3686/* Reduce sectors if they are too long for the device */
3687static inline sector_t transport_limit_task_sectors(
3688 struct se_device *dev,
3689 unsigned long long lba,
3690 sector_t sectors)
3691{
3692 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3693
3694 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3695 if ((lba + sectors) > transport_dev_end_lba(dev))
3696 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3697
3698 return sectors;
3699}
3700
3701
3702/*
3703 * This function can be used by HW target mode drivers to create a linked
3704 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3705 * This is intended to be called during the completion path by TCM Core
3706 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3707 */
3708void transport_do_task_sg_chain(struct se_cmd *cmd)
3709{
3710 struct scatterlist *sg_first = NULL;
3711 struct scatterlist *sg_prev = NULL;
3712 int sg_prev_nents = 0;
3713 struct scatterlist *sg;
3714 struct se_task *task;
3715 u32 chained_nents = 0;
3716 int i;
3717
3718 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3719
3720 /*
3721 * Walk the struct se_task list and setup scatterlist chains
3722 * for each contiguously allocated struct se_task->task_sg[].
3723 */
3724 list_for_each_entry(task, &cmd->t_task_list, t_list) {
3725 if (!task->task_sg)
3726 continue;
3727
3728 if (!sg_first) {
3729 sg_first = task->task_sg;
3730 chained_nents = task->task_sg_nents;
3731 } else {
3732 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3733 chained_nents += task->task_sg_nents;
3734 }
3735 /*
3736 * For the padded tasks, use the extra SGL vector allocated
3737 * in transport_allocate_data_tasks() for the sg_prev_nents
3738 * offset into sg_chain() above.
3739 *
3740 * We do not need the padding for the last task (or a single
3741 * task), but in that case we will never use the sg_prev_nents
3742 * value below which would be incorrect.
3743 */
3744 sg_prev_nents = (task->task_sg_nents + 1);
3745 sg_prev = task->task_sg;
3746 }
3747 /*
3748 * Setup the starting pointer and total t_tasks_sg_linked_no including
3749 * padding SGs for linking and to mark the end.
3750 */
3751 cmd->t_tasks_sg_chained = sg_first;
3752 cmd->t_tasks_sg_chained_no = chained_nents;
3753
3754 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3755 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3756 cmd->t_tasks_sg_chained_no);
3757
3758 for_each_sg(cmd->t_tasks_sg_chained, sg,
3759 cmd->t_tasks_sg_chained_no, i) {
3760
3761 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3762 i, sg, sg_page(sg), sg->length, sg->offset);
3763 if (sg_is_chain(sg))
3764 pr_debug("SG: %p sg_is_chain=1\n", sg);
3765 if (sg_is_last(sg))
3766 pr_debug("SG: %p sg_is_last=1\n", sg);
3767 }
3768}
3769EXPORT_SYMBOL(transport_do_task_sg_chain);
3770
3771/*
3772 * Break up cmd into chunks transport can handle
3773 */
3774static int
3775transport_allocate_data_tasks(struct se_cmd *cmd,
3776 enum dma_data_direction data_direction,
3777 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3778{
3779 struct se_device *dev = cmd->se_dev;
3780 int task_count, i;
3781 unsigned long long lba;
3782 sector_t sectors, dev_max_sectors;
3783 u32 sector_size;
3784
3785 if (transport_cmd_get_valid_sectors(cmd) < 0)
3786 return -EINVAL;
3787
3788 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3789 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3790
3791 WARN_ON(cmd->data_length % sector_size);
3792
3793 lba = cmd->t_task_lba;
3794 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3795 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3796
3797 /*
3798 * If we need just a single task reuse the SG list in the command
3799 * and avoid a lot of work.
3800 */
3801 if (task_count == 1) {
3802 struct se_task *task;
3803 unsigned long flags;
3804
3805 task = transport_generic_get_task(cmd, data_direction);
3806 if (!task)
3807 return -ENOMEM;
3808
3809 task->task_sg = cmd_sg;
3810 task->task_sg_nents = sgl_nents;
3811
3812 task->task_lba = lba;
3813 task->task_sectors = sectors;
3814 task->task_size = task->task_sectors * sector_size;
3815
3816 spin_lock_irqsave(&cmd->t_state_lock, flags);
3817 list_add_tail(&task->t_list, &cmd->t_task_list);
3818 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3819
3820 return task_count;
3821 }
3822
3823 for (i = 0; i < task_count; i++) {
3824 struct se_task *task;
3825 unsigned int task_size, task_sg_nents_padded;
3826 struct scatterlist *sg;
3827 unsigned long flags;
3828 int count;
3829
3830 task = transport_generic_get_task(cmd, data_direction);
3831 if (!task)
3832 return -ENOMEM;
3833
3834 task->task_lba = lba;
3835 task->task_sectors = min(sectors, dev_max_sectors);
3836 task->task_size = task->task_sectors * sector_size;
3837
3838 /*
3839 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3840 * in order to calculate the number per task SGL entries
3841 */
3842 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3843 /*
3844 * Check if the fabric module driver is requesting that all
3845 * struct se_task->task_sg[] be chained together.. If so,
3846 * then allocate an extra padding SG entry for linking and
3847 * marking the end of the chained SGL for every task except
3848 * the last one for (task_count > 1) operation, or skipping
3849 * the extra padding for the (task_count == 1) case.
3850 */
3851 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3852 task_sg_nents_padded = (task->task_sg_nents + 1);
3853 } else
3854 task_sg_nents_padded = task->task_sg_nents;
3855
3856 task->task_sg = kmalloc(sizeof(struct scatterlist) *
3857 task_sg_nents_padded, GFP_KERNEL);
3858 if (!task->task_sg) {
3859 cmd->se_dev->transport->free_task(task);
3860 return -ENOMEM;
3861 }
3862
3863 sg_init_table(task->task_sg, task_sg_nents_padded);
3864
3865 task_size = task->task_size;
3866
3867 /* Build new sgl, only up to task_size */
3868 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3869 if (cmd_sg->length > task_size)
3870 break;
3871
3872 *sg = *cmd_sg;
3873 task_size -= cmd_sg->length;
3874 cmd_sg = sg_next(cmd_sg);
3875 }
3876
3877 lba += task->task_sectors;
3878 sectors -= task->task_sectors;
3879
3880 spin_lock_irqsave(&cmd->t_state_lock, flags);
3881 list_add_tail(&task->t_list, &cmd->t_task_list);
3882 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3883 }
3884
3885 return task_count;
3886}
3887
3888static int
3889transport_allocate_control_task(struct se_cmd *cmd)
3890{
3891 struct se_task *task;
3892 unsigned long flags;
3893
3894 /* Workaround for handling zero-length control CDBs */
3895 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3896 !cmd->data_length)
3897 return 0;
3898
3899 task = transport_generic_get_task(cmd, cmd->data_direction);
3900 if (!task)
3901 return -ENOMEM;
3902
3903 task->task_sg = cmd->t_data_sg;
3904 task->task_size = cmd->data_length;
3905 task->task_sg_nents = cmd->t_data_nents;
3906
3907 spin_lock_irqsave(&cmd->t_state_lock, flags);
3908 list_add_tail(&task->t_list, &cmd->t_task_list);
3909 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3910
3911 /* Success! Return number of tasks allocated */
3912 return 1;
3913}
3914
3915/* 3482/*
3916 * Allocate any required ressources to execute the command, and either place 3483 * Allocate any required resources to execute the command. For writes we
3917 * it on the execution queue if possible. For writes we might not have the 3484 * might not have the payload yet, so notify the fabric via a call to
3918 * payload yet, thus notify the fabric via a call to ->write_pending instead. 3485 * ->write_pending instead. Otherwise place it on the execution queue.
3919 */ 3486 */
3920int transport_generic_new_cmd(struct se_cmd *cmd) 3487int transport_generic_new_cmd(struct se_cmd *cmd)
3921{ 3488{
3922 struct se_device *dev = cmd->se_dev; 3489 struct se_device *dev = cmd->se_dev;
3923 int task_cdbs, task_cdbs_bidi = 0;
3924 int set_counts = 1;
3925 int ret = 0; 3490 int ret = 0;
3926 3491
3927 /* 3492 /*
@@ -3936,35 +3501,9 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3936 goto out_fail; 3501 goto out_fail;
3937 } 3502 }
3938 3503
3939 /* 3504 /* Workaround for handling zero-length control CDBs */
3940 * For BIDI command set up the read tasks first. 3505 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3941 */ 3506 !cmd->data_length) {
3942 if (cmd->t_bidi_data_sg &&
3943 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3944 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3945
3946 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3947 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3948 cmd->t_bidi_data_nents);
3949 if (task_cdbs_bidi <= 0)
3950 goto out_fail;
3951
3952 atomic_inc(&cmd->t_fe_count);
3953 atomic_inc(&cmd->t_se_count);
3954 set_counts = 0;
3955 }
3956
3957 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3958 task_cdbs = transport_allocate_data_tasks(cmd,
3959 cmd->data_direction, cmd->t_data_sg,
3960 cmd->t_data_nents);
3961 } else {
3962 task_cdbs = transport_allocate_control_task(cmd);
3963 }
3964
3965 if (task_cdbs < 0)
3966 goto out_fail;
3967 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3968 spin_lock_irq(&cmd->t_state_lock); 3507 spin_lock_irq(&cmd->t_state_lock);
3969 cmd->t_state = TRANSPORT_COMPLETE; 3508 cmd->t_state = TRANSPORT_COMPLETE;
3970 cmd->transport_state |= CMD_T_ACTIVE; 3509 cmd->transport_state |= CMD_T_ACTIVE;
@@ -3982,29 +3521,31 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3982 return 0; 3521 return 0;
3983 } 3522 }
3984 3523
3985 if (set_counts) { 3524 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3986 atomic_inc(&cmd->t_fe_count); 3525 struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3987 atomic_inc(&cmd->t_se_count); 3526
3527 if (transport_cmd_get_valid_sectors(cmd) < 0)
3528 return -EINVAL;
3529
3530 BUG_ON(cmd->data_length % attr->block_size);
3531 BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
3532 attr->hw_max_sectors);
3988 } 3533 }
3989 3534
3990 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3535 atomic_inc(&cmd->t_fe_count);
3991 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3992 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3993 3536
3994 /* 3537 /*
3995 * For WRITEs, let the fabric know its buffer is ready.. 3538 * For WRITEs, let the fabric know its buffer is ready.
3996 * This WRITE struct se_cmd (and all of its associated struct se_task's) 3539 *
3997 * will be added to the struct se_device execution queue after its WRITE 3540 * The command will be added to the execution queue after its write
3998 * data has arrived. (ie: It gets handled by the transport processing 3541 * data has arrived.
3999 * thread a second time)
4000 */ 3542 */
4001 if (cmd->data_direction == DMA_TO_DEVICE) { 3543 if (cmd->data_direction == DMA_TO_DEVICE) {
4002 transport_add_tasks_to_state_queue(cmd); 3544 target_add_to_state_list(cmd);
4003 return transport_generic_write_pending(cmd); 3545 return transport_generic_write_pending(cmd);
4004 } 3546 }
4005 /* 3547 /*
4006 * Everything else but a WRITE, add the struct se_cmd's struct se_task's 3548 * Everything else but a WRITE, add the command to the execution queue.
4007 * to the execution queue.
4008 */ 3549 */
4009 transport_execute_tasks(cmd); 3550 transport_execute_tasks(cmd);
4010 return 0; 3551 return 0;
@@ -4091,8 +3632,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
4091 if (cmd->se_lun) 3632 if (cmd->se_lun)
4092 transport_lun_remove_cmd(cmd); 3633 transport_lun_remove_cmd(cmd);
4093 3634
4094 transport_free_dev_tasks(cmd);
4095
4096 transport_put_cmd(cmd); 3635 transport_put_cmd(cmd);
4097 } 3636 }
4098} 3637}
@@ -4233,7 +3772,8 @@ EXPORT_SYMBOL(target_wait_for_sess_cmds);
4233static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 3772static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4234{ 3773{
4235 unsigned long flags; 3774 unsigned long flags;
4236 int ret; 3775 int ret = 0;
3776
4237 /* 3777 /*
4238 * If the frontend has already requested this struct se_cmd to 3778 * If the frontend has already requested this struct se_cmd to
4239 * be stopped, we can safely ignore this struct se_cmd. 3779 * be stopped, we can safely ignore this struct se_cmd.
@@ -4253,10 +3793,21 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4253 3793
4254 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 3794 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4255 3795
4256 ret = transport_stop_tasks_for_cmd(cmd); 3796 // XXX: audit task_flags checks.
3797 spin_lock_irqsave(&cmd->t_state_lock, flags);
3798 if ((cmd->transport_state & CMD_T_BUSY) &&
3799 (cmd->transport_state & CMD_T_SENT)) {
3800 if (!target_stop_cmd(cmd, &flags))
3801 ret++;
3802 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3803 } else {
3804 spin_unlock_irqrestore(&cmd->t_state_lock,
3805 flags);
3806 target_remove_from_execute_list(cmd);
3807 }
4257 3808
4258 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" 3809 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
4259 " %d\n", cmd, cmd->t_task_list_num, ret); 3810 " %d\n", cmd, ret);
4260 if (!ret) { 3811 if (!ret) {
4261 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 3812 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4262 cmd->se_tfo->get_task_tag(cmd)); 3813 cmd->se_tfo->get_task_tag(cmd));
@@ -4328,10 +3879,9 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4328 goto check_cond; 3879 goto check_cond;
4329 } 3880 }
4330 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 3881 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4331 transport_all_task_dev_remove_state(cmd); 3882 target_remove_from_state_list(cmd);
4332 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 3883 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4333 3884
4334 transport_free_dev_tasks(cmd);
4335 /* 3885 /*
4336 * The Storage engine stopped this struct se_cmd before it was 3886 * The Storage engine stopped this struct se_cmd before it was
4337 * send to the fabric frontend for delivery back to the 3887 * send to the fabric frontend for delivery back to the
@@ -4444,7 +3994,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
4444 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 3994 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4445 spin_lock_irqsave(&cmd->t_state_lock, flags); 3995 spin_lock_irqsave(&cmd->t_state_lock, flags);
4446 3996
4447 transport_all_task_dev_remove_state(cmd); 3997 target_remove_from_state_list(cmd);
4448 /* 3998 /*
4449 * At this point, the frontend who was the originator of this 3999 * At this point, the frontend who was the originator of this
4450 * struct se_cmd, now owns the structure and can be released through 4000 * struct se_cmd, now owns the structure and can be released through
@@ -4710,12 +4260,12 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4710 if (!send_status || 4260 if (!send_status ||
4711 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4261 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4712 return 1; 4262 return 1;
4713#if 0 4263
4714 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" 4264 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4715 " status for CDB: 0x%02x ITT: 0x%08x\n", 4265 " status for CDB: 0x%02x ITT: 0x%08x\n",
4716 cmd->t_task_cdb[0], 4266 cmd->t_task_cdb[0],
4717 cmd->se_tfo->get_task_tag(cmd)); 4267 cmd->se_tfo->get_task_tag(cmd));
4718#endif 4268
4719 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 4269 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4720 cmd->se_tfo->queue_status(cmd); 4270 cmd->se_tfo->queue_status(cmd);
4721 ret = 1; 4271 ret = 1;
@@ -4748,11 +4298,11 @@ void transport_send_task_abort(struct se_cmd *cmd)
4748 } 4298 }
4749 } 4299 }
4750 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4300 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4751#if 0 4301
4752 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4302 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4753 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4303 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4754 cmd->se_tfo->get_task_tag(cmd)); 4304 cmd->se_tfo->get_task_tag(cmd));
4755#endif 4305
4756 cmd->se_tfo->queue_status(cmd); 4306 cmd->se_tfo->queue_status(cmd);
4757} 4307}
4758 4308
@@ -4865,7 +4415,7 @@ get_cmd:
4865 } 4415 }
4866 4416
4867out: 4417out:
4868 WARN_ON(!list_empty(&dev->state_task_list)); 4418 WARN_ON(!list_empty(&dev->state_list));
4869 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); 4419 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4870 dev->process_thread = NULL; 4420 dev->process_thread = NULL;
4871 return 0; 4421 return 0;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a375f257aabc..f03fb9730f5b 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -215,20 +215,10 @@ int ft_write_pending(struct se_cmd *se_cmd)
215 */ 215 */
216 if ((ep->xid <= lport->lro_xid) && 216 if ((ep->xid <= lport->lro_xid) &&
217 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { 217 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
218 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 218 if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) &&
219 /* 219 lport->tt.ddp_target(lport, ep->xid,
220 * cmd may have been broken up into multiple 220 se_cmd->t_data_sg,
221 * tasks. Link their sgs together so we can 221 se_cmd->t_data_nents))
222 * operate on them all at once.
223 */
224 transport_do_task_sg_chain(se_cmd);
225 cmd->sg = se_cmd->t_tasks_sg_chained;
226 cmd->sg_cnt =
227 se_cmd->t_tasks_sg_chained_no;
228 }
229 if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
230 cmd->sg,
231 cmd->sg_cnt))
232 cmd->was_ddp_setup = 1; 222 cmd->was_ddp_setup = 1;
233 } 223 }
234 } 224 }
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 2948dc944619..9501844fae2d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -576,9 +576,6 @@ int ft_register_configfs(void)
576 } 576 }
577 fabric->tf_ops = ft_fabric_ops; 577 fabric->tf_ops = ft_fabric_ops;
578 578
579 /* Allowing support for task_sg_chaining */
580 fabric->tf_ops.task_sg_chaining = 1;
581
582 /* 579 /*
583 * Setup default attribute lists for various fabric->tf_cit_tmpl 580 * Setup default attribute lists for various fabric->tf_cit_tmpl
584 */ 581 */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index dc7c0db26e20..071a505f98fc 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -228,7 +228,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
228 "payload, Frame will be dropped if" 228 "payload, Frame will be dropped if"
229 "'Sequence Initiative' bit in f_ctl is" 229 "'Sequence Initiative' bit in f_ctl is"
230 "not set\n", __func__, ep->xid, f_ctl, 230 "not set\n", __func__, ep->xid, f_ctl,
231 cmd->sg, cmd->sg_cnt); 231 se_cmd->t_data_sg, se_cmd->t_data_nents);
232 /* 232 /*
233 * Invalidate HW DDP context if it was setup for respective 233 * Invalidate HW DDP context if it was setup for respective
234 * command. Invalidation of HW DDP context is requited in both 234 * command. Invalidation of HW DDP context is requited in both