aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-05-02 20:12:10 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:44 -0400
commita1d8b49abd60ba5d09e7c968731abcb0f8f1cbf6 (patch)
tree8cbfd54b4829fd5f0ed206e15c81c1e626e7701d
parentdd3a5ad8e0c8706659f02c4a72b8c87f6f7ab479 (diff)
target: Updates from AGrover and HCH (round 3)
This patch contains a squashed version of third round series cleanups, improvements ,and simplfications from Andy and Christoph ahead of the heavy lifting between round 3 -> 4 for the target core SGL conversion. This include cleanups to the main target I/O path and other miscellaneous updates. target: Replace custom sg<->buf functions with lib funcs target: Simplify sector limiting code target: get_cdb should never return NULL target: Simplify transport_memcpy_se_mem_read_contig target: Use assignment rather than increment for t_task_cdbs target: Don't pass dma_size to generic_get_mem target: Pass sg with type scatterlist in transport_map_sg_to_mem target: Move task_sg_num next to task_sg in struct se_task target: inline struct se_transport_task into struct se_cmd target: Change name & semantics of transport_get_sectors() target: Remove unused members of se_cmd target: Rename se_cmd.t_task_cdbs to t_task_list_num target: Fix some spelling target: Remove unused var from transport_generic_do_tmr target: map_sg_to_mem: return sg_count in return value target/pscsi: Use min_t for sector limits target/pscsi: Unused param for pscsi_get_bio() target: Rename get_cdb_count to allocate_tasks target: Make transport_generic_new_cmd() available for iscsi-target target: Remove fabric callback to allocate iovecs target: Fix transport_generic_new_cmd WRITE comment (hch: Use __GFP_ZERO usage for alloc_pages() usage) Signed-off-by: Andy Grover <agrover@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/loopback/tcm_loop.c11
-rw-r--r--drivers/target/target_core_alua.c4
-rw-r--r--drivers/target/target_core_cdb.c39
-rw-r--r--drivers/target/target_core_device.c6
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_pr.c22
-rw-r--r--drivers/target/target_core_pscsi.c20
-rw-r--r--drivers/target/target_core_rd.c8
-rw-r--r--drivers/target/target_core_tmr.c56
-rw-r--r--drivers/target/target_core_transport.c1008
-rw-r--r--drivers/target/target_core_ua.c2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c14
-rw-r--r--drivers/target/tcm_fc/tfc_io.c21
-rw-r--r--include/target/target_core_base.h105
-rw-r--r--include/target/target_core_fabric_ops.h5
-rw-r--r--include/target/target_core_transport.h3
17 files changed, 583 insertions, 753 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index eeb7ee7ab9f7..7ba2542aabe7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi 118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
119 */ 119 */
120 if (scsi_bidi_cmnd(sc)) 120 if (scsi_bidi_cmnd(sc))
121 se_cmd->t_task.t_tasks_bidi = 1; 121 se_cmd->t_tasks_bidi = 1;
122 /* 122 /*
123 * Locate the struct se_lun pointer and attach it to struct se_cmd 123 * Locate the struct se_lun pointer and attach it to struct se_cmd
124 */ 124 */
@@ -169,7 +169,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
169 * For BIDI commands, pass in the extra READ buffer 169 * For BIDI commands, pass in the extra READ buffer
170 * to transport_generic_map_mem_to_cmd() below.. 170 * to transport_generic_map_mem_to_cmd() below..
171 */ 171 */
172 if (se_cmd->t_task.t_tasks_bidi) { 172 if (se_cmd->t_tasks_bidi) {
173 struct scsi_data_buffer *sdb = scsi_in(sc); 173 struct scsi_data_buffer *sdb = scsi_in(sc);
174 174
175 sgl_bidi = sdb->table.sgl; 175 sgl_bidi = sdb->table.sgl;
@@ -1424,13 +1424,6 @@ static int tcm_loop_register_configfs(void)
1424 &tcm_loop_tpg_release_fabric_acl; 1424 &tcm_loop_tpg_release_fabric_acl;
1425 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; 1425 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
1426 /* 1426 /*
1427 * Since tcm_loop is mapping physical memory from Linux/SCSI
1428 * struct scatterlist arrays for each struct scsi_cmnd I/O,
1429 * we do not need TCM to allocate a iovec array for
1430 * virtual memory address mappings
1431 */
1432 fabric->tf_ops.alloc_cmd_iovecs = NULL;
1433 /*
1434 * Used for setting up remaining TCM resources in process context 1427 * Used for setting up remaining TCM resources in process context
1435 */ 1428 */
1436 fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; 1429 fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 76abd86b6a73..76d506fe99e0 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -65,7 +65,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
65 struct se_port *port; 65 struct se_port *port;
66 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 68 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
69 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first 69 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
70 Target port group descriptor */ 70 Target port group descriptor */
71 71
@@ -157,7 +157,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
157 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 157 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
158 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 158 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
159 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 159 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
160 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 160 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
161 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ 161 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
162 u32 len = 4; /* Skip over RESERVED area in header */ 162 u32 len = 4; /* Skip over RESERVED area in header */
163 int alua_access_state, primary = 0, rc; 163 int alua_access_state, primary = 0, rc;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8d5a0fc3a220..09ef3f811567 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
66{ 66{
67 struct se_lun *lun = cmd->se_lun; 67 struct se_lun *lun = cmd->se_lun;
68 struct se_device *dev = cmd->se_dev; 68 struct se_device *dev = cmd->se_dev;
69 unsigned char *buf = cmd->t_task.t_task_buf; 69 unsigned char *buf = cmd->t_task_buf;
70 70
71 /* 71 /*
72 * Make sure we at least have 6 bytes of INQUIRY response 72 * Make sure we at least have 6 bytes of INQUIRY response
@@ -621,8 +621,8 @@ static int
621target_emulate_inquiry(struct se_cmd *cmd) 621target_emulate_inquiry(struct se_cmd *cmd)
622{ 622{
623 struct se_device *dev = cmd->se_dev; 623 struct se_device *dev = cmd->se_dev;
624 unsigned char *buf = cmd->t_task.t_task_buf; 624 unsigned char *buf = cmd->t_task_buf;
625 unsigned char *cdb = cmd->t_task.t_task_cdb; 625 unsigned char *cdb = cmd->t_task_cdb;
626 626
627 if (!(cdb[1] & 0x1)) 627 if (!(cdb[1] & 0x1))
628 return target_emulate_inquiry_std(cmd); 628 return target_emulate_inquiry_std(cmd);
@@ -666,7 +666,7 @@ static int
666target_emulate_readcapacity(struct se_cmd *cmd) 666target_emulate_readcapacity(struct se_cmd *cmd)
667{ 667{
668 struct se_device *dev = cmd->se_dev; 668 struct se_device *dev = cmd->se_dev;
669 unsigned char *buf = cmd->t_task.t_task_buf; 669 unsigned char *buf = cmd->t_task_buf;
670 unsigned long long blocks_long = dev->transport->get_blocks(dev); 670 unsigned long long blocks_long = dev->transport->get_blocks(dev);
671 u32 blocks; 671 u32 blocks;
672 672
@@ -696,7 +696,7 @@ static int
696target_emulate_readcapacity_16(struct se_cmd *cmd) 696target_emulate_readcapacity_16(struct se_cmd *cmd)
697{ 697{
698 struct se_device *dev = cmd->se_dev; 698 struct se_device *dev = cmd->se_dev;
699 unsigned char *buf = cmd->t_task.t_task_buf; 699 unsigned char *buf = cmd->t_task_buf;
700 unsigned long long blocks = dev->transport->get_blocks(dev); 700 unsigned long long blocks = dev->transport->get_blocks(dev);
701 701
702 buf[0] = (blocks >> 56) & 0xff; 702 buf[0] = (blocks >> 56) & 0xff;
@@ -831,8 +831,8 @@ static int
831target_emulate_modesense(struct se_cmd *cmd, int ten) 831target_emulate_modesense(struct se_cmd *cmd, int ten)
832{ 832{
833 struct se_device *dev = cmd->se_dev; 833 struct se_device *dev = cmd->se_dev;
834 char *cdb = cmd->t_task.t_task_cdb; 834 char *cdb = cmd->t_task_cdb;
835 unsigned char *rbuf = cmd->t_task.t_task_buf; 835 unsigned char *rbuf = cmd->t_task_buf;
836 int type = dev->transport->get_device_type(dev); 836 int type = dev->transport->get_device_type(dev);
837 int offset = (ten) ? 8 : 4; 837 int offset = (ten) ? 8 : 4;
838 int length = 0; 838 int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
903static int 903static int
904target_emulate_request_sense(struct se_cmd *cmd) 904target_emulate_request_sense(struct se_cmd *cmd)
905{ 905{
906 unsigned char *cdb = cmd->t_task.t_task_cdb; 906 unsigned char *cdb = cmd->t_task_cdb;
907 unsigned char *buf = cmd->t_task.t_task_buf; 907 unsigned char *buf = cmd->t_task_buf;
908 u8 ua_asc = 0, ua_ascq = 0; 908 u8 ua_asc = 0, ua_ascq = 0;
909 909
910 if (cdb[1] & 0x01) { 910 if (cdb[1] & 0x01) {
@@ -965,8 +965,8 @@ target_emulate_unmap(struct se_task *task)
965{ 965{
966 struct se_cmd *cmd = task->task_se_cmd; 966 struct se_cmd *cmd = task->task_se_cmd;
967 struct se_device *dev = cmd->se_dev; 967 struct se_device *dev = cmd->se_dev;
968 unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL; 968 unsigned char *buf = cmd->t_task_buf, *ptr = NULL;
969 unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; 969 unsigned char *cdb = &cmd->t_task_cdb[0];
970 sector_t lba; 970 sector_t lba;
971 unsigned int size = cmd->data_length, range; 971 unsigned int size = cmd->data_length, range;
972 int ret, offset; 972 int ret, offset;
@@ -1012,7 +1012,8 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1012{ 1012{
1013 struct se_cmd *cmd = task->task_se_cmd; 1013 struct se_cmd *cmd = task->task_se_cmd;
1014 struct se_device *dev = cmd->se_dev; 1014 struct se_device *dev = cmd->se_dev;
1015 sector_t range, lba = cmd->t_task.t_task_lba; 1015 sector_t range;
1016 sector_t lba = cmd->t_task_lba;
1016 unsigned int num_blocks; 1017 unsigned int num_blocks;
1017 int ret; 1018 int ret;
1018 /* 1019 /*
@@ -1021,9 +1022,9 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1021 * range based on ->get_blocks() - starting LBA. 1022 * range based on ->get_blocks() - starting LBA.
1022 */ 1023 */
1023 if (write_same32) 1024 if (write_same32)
1024 num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[28]); 1025 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1025 else 1026 else
1026 num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[10]); 1027 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1027 1028
1028 if (num_blocks != 0) 1029 if (num_blocks != 0)
1029 range = num_blocks; 1030 range = num_blocks;
@@ -1052,7 +1053,7 @@ transport_emulate_control_cdb(struct se_task *task)
1052 unsigned short service_action; 1053 unsigned short service_action;
1053 int ret = 0; 1054 int ret = 0;
1054 1055
1055 switch (cmd->t_task.t_task_cdb[0]) { 1056 switch (cmd->t_task_cdb[0]) {
1056 case INQUIRY: 1057 case INQUIRY:
1057 ret = target_emulate_inquiry(cmd); 1058 ret = target_emulate_inquiry(cmd);
1058 break; 1059 break;
@@ -1066,13 +1067,13 @@ transport_emulate_control_cdb(struct se_task *task)
1066 ret = target_emulate_modesense(cmd, 1); 1067 ret = target_emulate_modesense(cmd, 1);
1067 break; 1068 break;
1068 case SERVICE_ACTION_IN: 1069 case SERVICE_ACTION_IN:
1069 switch (cmd->t_task.t_task_cdb[1] & 0x1f) { 1070 switch (cmd->t_task_cdb[1] & 0x1f) {
1070 case SAI_READ_CAPACITY_16: 1071 case SAI_READ_CAPACITY_16:
1071 ret = target_emulate_readcapacity_16(cmd); 1072 ret = target_emulate_readcapacity_16(cmd);
1072 break; 1073 break;
1073 default: 1074 default:
1074 printk(KERN_ERR "Unsupported SA: 0x%02x\n", 1075 printk(KERN_ERR "Unsupported SA: 0x%02x\n",
1075 cmd->t_task.t_task_cdb[1] & 0x1f); 1076 cmd->t_task_cdb[1] & 0x1f);
1076 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1077 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1077 } 1078 }
1078 break; 1079 break;
@@ -1097,7 +1098,7 @@ transport_emulate_control_cdb(struct se_task *task)
1097 break; 1098 break;
1098 case VARIABLE_LENGTH_CMD: 1099 case VARIABLE_LENGTH_CMD:
1099 service_action = 1100 service_action =
1100 get_unaligned_be16(&cmd->t_task.t_task_cdb[8]); 1101 get_unaligned_be16(&cmd->t_task_cdb[8]);
1101 switch (service_action) { 1102 switch (service_action) {
1102 case WRITE_SAME_32: 1103 case WRITE_SAME_32:
1103 if (!dev->transport->do_discard) { 1104 if (!dev->transport->do_discard) {
@@ -1136,7 +1137,7 @@ transport_emulate_control_cdb(struct se_task *task)
1136 break; 1137 break;
1137 default: 1138 default:
1138 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", 1139 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
1139 cmd->t_task.t_task_cdb[0], dev->transport->name); 1140 cmd->t_task_cdb[0], dev->transport->name);
1140 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1141 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1141 } 1142 }
1142 1143
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ea92f75d215e..c674a5d74218 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -168,7 +168,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
168 */ 168 */
169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); 170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
171 atomic_set(&se_cmd->t_task.transport_lun_active, 1); 171 atomic_set(&se_cmd->transport_lun_active, 1);
172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
173 173
174 return 0; 174 return 0;
@@ -656,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
656 struct se_lun *se_lun; 656 struct se_lun *se_lun;
657 struct se_session *se_sess = se_cmd->se_sess; 657 struct se_session *se_sess = se_cmd->se_sess;
658 struct se_task *se_task; 658 struct se_task *se_task;
659 unsigned char *buf = se_cmd->t_task.t_task_buf; 659 unsigned char *buf = se_cmd->t_task_buf;
660 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 660 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
661 661
662 list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list) 662 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
663 break; 663 break;
664 664
665 if (!(se_task)) { 665 if (!(se_task)) {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2e7ea7457501..5c47f4202386 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
377 struct se_cmd *cmd = task->task_se_cmd; 377 struct se_cmd *cmd = task->task_se_cmd;
378 struct se_device *dev = cmd->se_dev; 378 struct se_device *dev = cmd->se_dev;
379 struct fd_dev *fd_dev = dev->dev_ptr; 379 struct fd_dev *fd_dev = dev->dev_ptr;
380 int immed = (cmd->t_task.t_task_cdb[1] & 0x2); 380 int immed = (cmd->t_task_cdb[1] & 0x2);
381 loff_t start, end; 381 loff_t start, end;
382 int ret; 382 int ret;
383 383
@@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
391 /* 391 /*
392 * Determine if we will be flushing the entire device. 392 * Determine if we will be flushing the entire device.
393 */ 393 */
394 if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) { 394 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
395 start = 0; 395 start = 0;
396 end = LLONG_MAX; 396 end = LLONG_MAX;
397 } else { 397 } else {
398 start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 398 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
399 if (cmd->data_length) 399 if (cmd->data_length)
400 end = start + cmd->data_length; 400 end = start + cmd->data_length;
401 else 401 else
@@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task)
475 if (ret > 0 && 475 if (ret > 0 &&
476 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 476 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
477 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 477 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
478 cmd->t_task.t_tasks_fua) { 478 cmd->t_tasks_fua) {
479 /* 479 /*
480 * We might need to be a bit smarter here 480 * We might need to be a bit smarter here
481 * and return some sense data to let the initiator 481 * and return some sense data to let the initiator
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c73baefeab8e..814a85b954f0 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
331{ 331{
332 struct se_cmd *cmd = task->task_se_cmd; 332 struct se_cmd *cmd = task->task_se_cmd;
333 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 333 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
334 int immed = (cmd->t_task.t_task_cdb[1] & 0x2); 334 int immed = (cmd->t_task_cdb[1] & 0x2);
335 sector_t error_sector; 335 sector_t error_sector;
336 int ret; 336 int ret;
337 337
@@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task)
400 */ 400 */
401 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 401 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
402 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 402 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
403 task->task_se_cmd->t_task.t_tasks_fua)) 403 task->task_se_cmd->t_tasks_fua))
404 rw = WRITE_FUA; 404 rw = WRITE_FUA;
405 else 405 else
406 rw = WRITE; 406 rw = WRITE;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 19406a3474c3..4fdede8da0c6 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
157 struct se_session *sess = cmd->se_sess; 157 struct se_session *sess = cmd->se_sess;
158 struct se_portal_group *tpg = sess->se_tpg; 158 struct se_portal_group *tpg = sess->se_tpg;
159 159
160 if ((cmd->t_task.t_task_cdb[1] & 0x01) && 160 if ((cmd->t_task_cdb[1] & 0x01) &&
161 (cmd->t_task.t_task_cdb[1] & 0x02)) { 161 (cmd->t_task_cdb[1] & 0x02)) {
162 printk(KERN_ERR "LongIO and Obselete Bits set, returning" 162 printk(KERN_ERR "LongIO and Obselete Bits set, returning"
163 " ILLEGAL_REQUEST\n"); 163 " ILLEGAL_REQUEST\n");
164 return PYX_TRANSPORT_ILLEGAL_REQUEST; 164 return PYX_TRANSPORT_ILLEGAL_REQUEST;
@@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
217 struct t10_pr_registration *pr_reg; 217 struct t10_pr_registration *pr_reg;
218 struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 218 struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
219 unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; 219 unsigned char *cdb = &cmd->t_task_cdb[0];
220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
221 int conflict = 0; 221 int conflict = 0;
222 222
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
1482 struct list_head tid_dest_list; 1482 struct list_head tid_dest_list;
1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1484 struct target_core_fabric_ops *tmp_tf_ops; 1484 struct target_core_fabric_ops *tmp_tf_ops;
1485 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 1485 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
1488 u32 tpdl, tid_len = 0; 1488 u32 tpdl, tid_len = 0;
@@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3309 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3309 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3310 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 3310 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
3311 unsigned char *initiator_str; 3311 unsigned char *initiator_str;
3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
3313 u32 tid_len, tmp_tid_len; 3313 u32 tid_len, tmp_tid_len;
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3723 */ 3723 */
3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) 3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3725{ 3725{
3726 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 3726 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
3727 u64 res_key, sa_res_key; 3727 u64 res_key, sa_res_key;
3728 int sa, scope, type, aptpl; 3728 int sa, scope, type, aptpl;
3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; 3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3830,7 +3830,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3830 struct se_device *se_dev = cmd->se_dev; 3830 struct se_device *se_dev = cmd->se_dev;
3831 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3831 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3832 struct t10_pr_registration *pr_reg; 3832 struct t10_pr_registration *pr_reg;
3833 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 3833 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
3834 u32 add_len = 0, off = 8; 3834 u32 add_len = 0, off = 8;
3835 3835
3836 if (cmd->data_length < 8) { 3836 if (cmd->data_length < 8) {
@@ -3885,7 +3885,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3885 struct se_device *se_dev = cmd->se_dev; 3885 struct se_device *se_dev = cmd->se_dev;
3886 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3886 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3887 struct t10_pr_registration *pr_reg; 3887 struct t10_pr_registration *pr_reg;
3888 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 3888 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
3889 u64 pr_res_key; 3889 u64 pr_res_key;
3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ 3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
3891 3891
@@ -3965,7 +3965,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3965{ 3965{
3966 struct se_device *dev = cmd->se_dev; 3966 struct se_device *dev = cmd->se_dev;
3967 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3967 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3968 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 3968 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
3969 u16 add_len = 8; /* Hardcoded to 8. */ 3969 u16 add_len = 8; /* Hardcoded to 8. */
3970 3970
3971 if (cmd->data_length < 6) { 3971 if (cmd->data_length < 6) {
@@ -4020,7 +4020,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4020 struct se_portal_group *se_tpg; 4020 struct se_portal_group *se_tpg;
4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4022 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; 4022 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
4023 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; 4023 unsigned char *buf = (unsigned char *)cmd->t_task_buf;
4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4025 u32 off = 8; /* off into first Full Status descriptor */ 4025 u32 off = 8; /* off into first Full Status descriptor */
4026 int format_code = 0; 4026 int format_code = 0;
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
4174 4174
4175int core_scsi3_emulate_pr(struct se_cmd *cmd) 4175int core_scsi3_emulate_pr(struct se_cmd *cmd)
4176{ 4176{
4177 unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; 4177 unsigned char *cdb = &cmd->t_task_cdb[0];
4178 struct se_device *dev = cmd->se_dev; 4178 struct se_device *dev = cmd->se_dev;
4179 /* 4179 /*
4180 * Following spc2r20 5.5.1 Reservations overview: 4180 * Following spc2r20 5.5.1 Reservations overview:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ecfe889cb0ce..3574c520a5f4 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -328,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
328 q = sd->request_queue; 328 q = sd->request_queue;
329 limits = &dev_limits.limits; 329 limits = &dev_limits.limits;
330 limits->logical_block_size = sd->sector_size; 330 limits->logical_block_size = sd->sector_size;
331 limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? 331 limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
332 queue_max_hw_sectors(q) : sd->host->max_sectors; 332 limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
333 limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
334 queue_max_sectors(q) : sd->host->max_sectors;
335 dev_limits.hw_queue_depth = sd->queue_depth; 333 dev_limits.hw_queue_depth = sd->queue_depth;
336 dev_limits.queue_depth = sd->queue_depth; 334 dev_limits.queue_depth = sd->queue_depth;
337 /* 335 /*
@@ -697,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task)
697 695
698 if (task->task_se_cmd->se_deve->lun_flags & 696 if (task->task_se_cmd->se_deve->lun_flags &
699 TRANSPORT_LUNFLAGS_READ_ONLY) { 697 TRANSPORT_LUNFLAGS_READ_ONLY) {
700 unsigned char *buf = task->task_se_cmd->t_task.t_task_buf; 698 unsigned char *buf = task->task_se_cmd->t_task_buf;
701 699
702 if (cdb[0] == MODE_SENSE_10) { 700 if (cdb[0] == MODE_SENSE_10) {
703 if (!(buf[3] & 0x80)) 701 if (!(buf[3] & 0x80))
@@ -763,7 +761,7 @@ static struct se_task *
763pscsi_alloc_task(struct se_cmd *cmd) 761pscsi_alloc_task(struct se_cmd *cmd)
764{ 762{
765 struct pscsi_plugin_task *pt; 763 struct pscsi_plugin_task *pt;
766 unsigned char *cdb = cmd->t_task.t_task_cdb; 764 unsigned char *cdb = cmd->t_task_cdb;
767 765
768 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); 766 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
769 if (!pt) { 767 if (!pt) {
@@ -776,7 +774,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
776 * allocate the extended CDB buffer for per struct se_task context 774 * allocate the extended CDB buffer for per struct se_task context
777 * pt->pscsi_cdb now. 775 * pt->pscsi_cdb now.
778 */ 776 */
779 if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) { 777 if (cmd->t_task_cdb != cmd->__t_task_cdb) {
780 778
781 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); 779 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
782 if (!(pt->pscsi_cdb)) { 780 if (!(pt->pscsi_cdb)) {
@@ -889,7 +887,7 @@ static void pscsi_free_task(struct se_task *task)
889 * Release the extended CDB allocation from pscsi_alloc_task() 887 * Release the extended CDB allocation from pscsi_alloc_task()
890 * if one exists. 888 * if one exists.
891 */ 889 */
892 if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) 890 if (cmd->t_task_cdb != cmd->__t_task_cdb)
893 kfree(pt->pscsi_cdb); 891 kfree(pt->pscsi_cdb);
894 /* 892 /*
895 * We do not release the bio(s) here associated with this task, as 893 * We do not release the bio(s) here associated with this task, as
@@ -1053,7 +1051,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
1053 bio_put(bio); 1051 bio_put(bio);
1054} 1052}
1055 1053
1056static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) 1054static inline struct bio *pscsi_get_bio(int sg_num)
1057{ 1055{
1058 struct bio *bio; 1056 struct bio *bio;
1059 /* 1057 /*
@@ -1126,7 +1124,7 @@ static int __pscsi_map_task_SG(
1126 /* 1124 /*
1127 * Calls bio_kmalloc() and sets bio->bi_end_io() 1125 * Calls bio_kmalloc() and sets bio->bi_end_io()
1128 */ 1126 */
1129 bio = pscsi_get_bio(pdv, nr_vecs); 1127 bio = pscsi_get_bio(nr_vecs);
1130 if (!(bio)) 1128 if (!(bio))
1131 goto fail; 1129 goto fail;
1132 1130
@@ -1266,7 +1264,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
1266 return 0; 1264 return 0;
1267 1265
1268 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, 1266 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
1269 pt->pscsi_req, cmd->t_task.t_task_buf, 1267 pt->pscsi_req, cmd->t_task_buf,
1270 task->task_size, GFP_KERNEL); 1268 task->task_size, GFP_KERNEL);
1271 if (ret < 0) { 1269 if (ret < 0) {
1272 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); 1270 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 384a8e2083e3..4f9416d5c028 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -737,7 +737,7 @@ check_eot:
737 } 737 }
738 738
739out: 739out:
740 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; 740 task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
741#ifdef DEBUG_RAMDISK_DR 741#ifdef DEBUG_RAMDISK_DR
742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
743 *se_mem_cnt); 743 *se_mem_cnt);
@@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset(
819 } 819 }
820 820
821out: 821out:
822 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; 822 task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
823#ifdef DEBUG_RAMDISK_DR 823#ifdef DEBUG_RAMDISK_DR
824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
825 *se_mem_cnt); 825 *se_mem_cnt);
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
880 * across multiple struct se_task->task_sg[]. 880 * across multiple struct se_task->task_sg[].
881 */ 881 */
882 ret = transport_init_task_sg(task, 882 ret = transport_init_task_sg(task,
883 list_first_entry(&cmd->t_task.t_mem_list, 883 list_first_entry(&cmd->t_mem_list,
884 struct se_mem, se_list), 884 struct se_mem, se_list),
885 task_offset); 885 task_offset);
886 if (ret <= 0) 886 if (ret <= 0)
887 return ret; 887 return ret;
888 888
889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, 889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
890 list_first_entry(&cmd->t_task.t_mem_list, 890 list_first_entry(&cmd->t_mem_list,
891 struct se_mem, se_list), 891 struct se_mem, se_list),
892 out_se_mem, se_mem_cnt, task_offset_in); 892 out_se_mem, se_mem_cnt, task_offset_in);
893} 893}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e1f99f75ac35..6667e39a35a1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -178,14 +178,14 @@ int core_tmr_lun_reset(
178 continue; 178 continue;
179 spin_unlock(&dev->se_tmr_lock); 179 spin_unlock(&dev->se_tmr_lock);
180 180
181 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 181 spin_lock_irqsave(&cmd->t_state_lock, flags);
182 if (!(atomic_read(&cmd->t_task.t_transport_active))) { 182 if (!(atomic_read(&cmd->t_transport_active))) {
183 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 183 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
184 spin_lock(&dev->se_tmr_lock); 184 spin_lock(&dev->se_tmr_lock);
185 continue; 185 continue;
186 } 186 }
187 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 187 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
188 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 188 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
189 spin_lock(&dev->se_tmr_lock); 189 spin_lock(&dev->se_tmr_lock);
190 continue; 190 continue;
191 } 191 }
@@ -193,7 +193,7 @@ int core_tmr_lun_reset(
193 " Response: 0x%02x, t_state: %d\n", 193 " Response: 0x%02x, t_state: %d\n",
194 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 194 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
195 tmr_p->function, tmr_p->response, cmd->t_state); 195 tmr_p->function, tmr_p->response, cmd->t_state);
196 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 196 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
197 197
198 transport_cmd_finish_abort_tmr(cmd); 198 transport_cmd_finish_abort_tmr(cmd);
199 spin_lock(&dev->se_tmr_lock); 199 spin_lock(&dev->se_tmr_lock);
@@ -247,38 +247,38 @@ int core_tmr_lun_reset(
247 atomic_set(&task->task_state_active, 0); 247 atomic_set(&task->task_state_active, 0);
248 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 248 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
249 249
250 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 250 spin_lock_irqsave(&cmd->t_state_lock, flags);
251 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" 251 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" 252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
253 "def_t_state: %d/%d cdb: 0x%02x\n", 253 "def_t_state: %d/%d cdb: 0x%02x\n",
254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
255 cmd->se_tfo->get_task_tag(cmd), 0, 255 cmd->se_tfo->get_task_tag(cmd), 0,
256 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 256 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
257 cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]); 257 cmd->deferred_t_state, cmd->t_task_cdb[0]);
258 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 258 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
259 " t_task_cdbs: %d t_task_cdbs_left: %d" 259 " t_task_cdbs: %d t_task_cdbs_left: %d"
260 " t_task_cdbs_sent: %d -- t_transport_active: %d" 260 " t_task_cdbs_sent: %d -- t_transport_active: %d"
261 " t_transport_stop: %d t_transport_sent: %d\n", 261 " t_transport_stop: %d t_transport_sent: %d\n",
262 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 262 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
263 cmd->t_task.t_task_cdbs, 263 cmd->t_task_list_num,
264 atomic_read(&cmd->t_task.t_task_cdbs_left), 264 atomic_read(&cmd->t_task_cdbs_left),
265 atomic_read(&cmd->t_task.t_task_cdbs_sent), 265 atomic_read(&cmd->t_task_cdbs_sent),
266 atomic_read(&cmd->t_task.t_transport_active), 266 atomic_read(&cmd->t_transport_active),
267 atomic_read(&cmd->t_task.t_transport_stop), 267 atomic_read(&cmd->t_transport_stop),
268 atomic_read(&cmd->t_task.t_transport_sent)); 268 atomic_read(&cmd->t_transport_sent));
269 269
270 if (atomic_read(&task->task_active)) { 270 if (atomic_read(&task->task_active)) {
271 atomic_set(&task->task_stop, 1); 271 atomic_set(&task->task_stop, 1);
272 spin_unlock_irqrestore( 272 spin_unlock_irqrestore(
273 &cmd->t_task.t_state_lock, flags); 273 &cmd->t_state_lock, flags);
274 274
275 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" 275 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
276 " for dev: %p\n", task, dev); 276 " for dev: %p\n", task, dev);
277 wait_for_completion(&task->task_stop_comp); 277 wait_for_completion(&task->task_stop_comp);
278 DEBUG_LR("LUN_RESET Completed task: %p shutdown for" 278 DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
279 " dev: %p\n", task, dev); 279 " dev: %p\n", task, dev);
280 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 280 spin_lock_irqsave(&cmd->t_state_lock, flags);
281 atomic_dec(&cmd->t_task.t_task_cdbs_left); 281 atomic_dec(&cmd->t_task_cdbs_left);
282 282
283 atomic_set(&task->task_active, 0); 283 atomic_set(&task->task_active, 0);
284 atomic_set(&task->task_stop, 0); 284 atomic_set(&task->task_stop, 0);
@@ -288,24 +288,24 @@ int core_tmr_lun_reset(
288 } 288 }
289 __transport_stop_task_timer(task, &flags); 289 __transport_stop_task_timer(task, &flags);
290 290
291 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { 291 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
292 spin_unlock_irqrestore( 292 spin_unlock_irqrestore(
293 &cmd->t_task.t_state_lock, flags); 293 &cmd->t_state_lock, flags);
294 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" 294 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
295 " t_task_cdbs_ex_left: %d\n", task, dev, 295 " t_task_cdbs_ex_left: %d\n", task, dev,
296 atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); 296 atomic_read(&cmd->t_task_cdbs_ex_left));
297 297
298 spin_lock_irqsave(&dev->execute_task_lock, flags); 298 spin_lock_irqsave(&dev->execute_task_lock, flags);
299 continue; 299 continue;
300 } 300 }
301 fe_count = atomic_read(&cmd->t_task.t_fe_count); 301 fe_count = atomic_read(&cmd->t_fe_count);
302 302
303 if (atomic_read(&cmd->t_task.t_transport_active)) { 303 if (atomic_read(&cmd->t_transport_active)) {
304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
305 " task: %p, t_fe_count: %d dev: %p\n", task, 305 " task: %p, t_fe_count: %d dev: %p\n", task,
306 fe_count, dev); 306 fe_count, dev);
307 atomic_set(&cmd->t_task.t_transport_aborted, 1); 307 atomic_set(&cmd->t_transport_aborted, 1);
308 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 308 spin_unlock_irqrestore(&cmd->t_state_lock,
309 flags); 309 flags);
310 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 310 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
311 311
@@ -314,8 +314,8 @@ int core_tmr_lun_reset(
314 } 314 }
315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
316 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 316 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
317 atomic_set(&cmd->t_task.t_transport_aborted, 1); 317 atomic_set(&cmd->t_transport_aborted, 1);
318 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 318 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
319 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 319 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
320 320
321 spin_lock_irqsave(&dev->execute_task_lock, flags); 321 spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -345,7 +345,7 @@ int core_tmr_lun_reset(
345 if (prout_cmd == cmd) 345 if (prout_cmd == cmd)
346 continue; 346 continue;
347 347
348 atomic_dec(&cmd->t_task.t_transport_queue_active); 348 atomic_dec(&cmd->t_transport_queue_active);
349 atomic_dec(&qobj->queue_cnt); 349 atomic_dec(&qobj->queue_cnt);
350 list_del(&cmd->se_queue_node); 350 list_del(&cmd->se_queue_node);
351 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 351 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -353,7 +353,7 @@ int core_tmr_lun_reset(
353 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 353 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
355 "Preempt" : "", cmd, cmd->t_state, 355 "Preempt" : "", cmd, cmd->t_state,
356 atomic_read(&cmd->t_task.t_fe_count)); 356 atomic_read(&cmd->t_fe_count));
357 /* 357 /*
358 * Signal that the command has failed via cmd->se_cmd_flags, 358 * Signal that the command has failed via cmd->se_cmd_flags,
359 * and call TFO->new_cmd_failure() to wakeup any fabric 359 * and call TFO->new_cmd_failure() to wakeup any fabric
@@ -365,7 +365,7 @@ int core_tmr_lun_reset(
365 transport_new_cmd_failure(cmd); 365 transport_new_cmd_failure(cmd);
366 366
367 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 367 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
368 atomic_read(&cmd->t_task.t_fe_count)); 368 atomic_read(&cmd->t_fe_count));
369 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 369 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
370 } 370 }
371 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 371 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bf401da05f35..6f2855dac7f8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -206,20 +206,18 @@ static int __transport_execute_tasks(struct se_device *dev);
206static void transport_complete_task_attr(struct se_cmd *cmd); 206static void transport_complete_task_attr(struct se_cmd *cmd);
207static void transport_direct_request_timeout(struct se_cmd *cmd); 207static void transport_direct_request_timeout(struct se_cmd *cmd);
208static void transport_free_dev_tasks(struct se_cmd *cmd); 208static void transport_free_dev_tasks(struct se_cmd *cmd);
209static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, 209static u32 transport_allocate_tasks(struct se_cmd *cmd,
210 unsigned long long starting_lba, u32 sectors, 210 unsigned long long starting_lba, u32 sectors,
211 enum dma_data_direction data_direction, 211 enum dma_data_direction data_direction,
212 struct list_head *mem_list, int set_counts); 212 struct list_head *mem_list, int set_counts);
213static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, 213static int transport_generic_get_mem(struct se_cmd *cmd, u32 length);
214 u32 dma_size);
215static int transport_generic_remove(struct se_cmd *cmd, 214static int transport_generic_remove(struct se_cmd *cmd,
216 int release_to_pool, int session_reinstatement); 215 int release_to_pool, int session_reinstatement);
217static int transport_get_sectors(struct se_cmd *cmd); 216static int transport_cmd_get_valid_sectors(struct se_cmd *cmd);
218static int transport_map_sg_to_mem(struct se_cmd *cmd, 217static int transport_map_sg_to_mem(struct se_cmd *cmd,
219 struct list_head *se_mem_list, struct scatterlist *sgl, 218 struct list_head *se_mem_list, struct scatterlist *sgl);
220 u32 *se_mem_cnt); 219static void transport_memcpy_se_mem_read_contig(unsigned char *dst,
221static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, 220 struct list_head *se_mem_list, u32 len);
222 unsigned char *dst, struct list_head *se_mem_list);
223static void transport_release_fe_cmd(struct se_cmd *cmd); 221static void transport_release_fe_cmd(struct se_cmd *cmd);
224static void transport_remove_cmd_from_queue(struct se_cmd *cmd, 222static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
225 struct se_queue_obj *qobj); 223 struct se_queue_obj *qobj);
@@ -573,7 +571,7 @@ void transport_deregister_session(struct se_session *se_sess)
573EXPORT_SYMBOL(transport_deregister_session); 571EXPORT_SYMBOL(transport_deregister_session);
574 572
575/* 573/*
576 * Called with cmd->t_task.t_state_lock held. 574 * Called with cmd->t_state_lock held.
577 */ 575 */
578static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 576static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
579{ 577{
@@ -581,7 +579,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
581 struct se_task *task; 579 struct se_task *task;
582 unsigned long flags; 580 unsigned long flags;
583 581
584 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { 582 list_for_each_entry(task, &cmd->t_task_list, t_list) {
585 dev = task->se_dev; 583 dev = task->se_dev;
586 if (!(dev)) 584 if (!(dev))
587 continue; 585 continue;
@@ -599,7 +597,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
599 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 597 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
600 598
601 atomic_set(&task->task_state_active, 0); 599 atomic_set(&task->task_state_active, 0);
602 atomic_dec(&cmd->t_task.t_task_cdbs_ex_left); 600 atomic_dec(&cmd->t_task_cdbs_ex_left);
603 } 601 }
604} 602}
605 603
@@ -618,32 +616,32 @@ static int transport_cmd_check_stop(
618{ 616{
619 unsigned long flags; 617 unsigned long flags;
620 618
621 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 619 spin_lock_irqsave(&cmd->t_state_lock, flags);
622 /* 620 /*
623 * Determine if IOCTL context caller in requesting the stopping of this 621 * Determine if IOCTL context caller in requesting the stopping of this
624 * command for LUN shutdown purposes. 622 * command for LUN shutdown purposes.
625 */ 623 */
626 if (atomic_read(&cmd->t_task.transport_lun_stop)) { 624 if (atomic_read(&cmd->transport_lun_stop)) {
627 DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)" 625 DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)"
628 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 626 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
629 cmd->se_tfo->get_task_tag(cmd)); 627 cmd->se_tfo->get_task_tag(cmd));
630 628
631 cmd->deferred_t_state = cmd->t_state; 629 cmd->deferred_t_state = cmd->t_state;
632 cmd->t_state = TRANSPORT_DEFERRED_CMD; 630 cmd->t_state = TRANSPORT_DEFERRED_CMD;
633 atomic_set(&cmd->t_task.t_transport_active, 0); 631 atomic_set(&cmd->t_transport_active, 0);
634 if (transport_off == 2) 632 if (transport_off == 2)
635 transport_all_task_dev_remove_state(cmd); 633 transport_all_task_dev_remove_state(cmd);
636 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
637 635
638 complete(&cmd->t_task.transport_lun_stop_comp); 636 complete(&cmd->transport_lun_stop_comp);
639 return 1; 637 return 1;
640 } 638 }
641 /* 639 /*
642 * Determine if frontend context caller is requesting the stopping of 640 * Determine if frontend context caller is requesting the stopping of
643 * this command for frontend exceptions. 641 * this command for frontend exceptions.
644 */ 642 */
645 if (atomic_read(&cmd->t_task.t_transport_stop)) { 643 if (atomic_read(&cmd->t_transport_stop)) {
646 DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) ==" 644 DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) =="
647 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 645 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
648 cmd->se_tfo->get_task_tag(cmd)); 646 cmd->se_tfo->get_task_tag(cmd));
649 647
@@ -658,13 +656,13 @@ static int transport_cmd_check_stop(
658 */ 656 */
659 if (transport_off == 2) 657 if (transport_off == 2)
660 cmd->se_lun = NULL; 658 cmd->se_lun = NULL;
661 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 659 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
662 660
663 complete(&cmd->t_task.t_transport_stop_comp); 661 complete(&cmd->t_transport_stop_comp);
664 return 1; 662 return 1;
665 } 663 }
666 if (transport_off) { 664 if (transport_off) {
667 atomic_set(&cmd->t_task.t_transport_active, 0); 665 atomic_set(&cmd->t_transport_active, 0);
668 if (transport_off == 2) { 666 if (transport_off == 2) {
669 transport_all_task_dev_remove_state(cmd); 667 transport_all_task_dev_remove_state(cmd);
670 /* 668 /*
@@ -679,18 +677,18 @@ static int transport_cmd_check_stop(
679 */ 677 */
680 if (cmd->se_tfo->check_stop_free != NULL) { 678 if (cmd->se_tfo->check_stop_free != NULL) {
681 spin_unlock_irqrestore( 679 spin_unlock_irqrestore(
682 &cmd->t_task.t_state_lock, flags); 680 &cmd->t_state_lock, flags);
683 681
684 cmd->se_tfo->check_stop_free(cmd); 682 cmd->se_tfo->check_stop_free(cmd);
685 return 1; 683 return 1;
686 } 684 }
687 } 685 }
688 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 686 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
689 687
690 return 0; 688 return 0;
691 } else if (t_state) 689 } else if (t_state)
692 cmd->t_state = t_state; 690 cmd->t_state = t_state;
693 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 691 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
694 692
695 return 0; 693 return 0;
696} 694}
@@ -708,21 +706,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
708 if (!lun) 706 if (!lun)
709 return; 707 return;
710 708
711 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 709 spin_lock_irqsave(&cmd->t_state_lock, flags);
712 if (!(atomic_read(&cmd->t_task.transport_dev_active))) { 710 if (!(atomic_read(&cmd->transport_dev_active))) {
713 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 711 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
714 goto check_lun; 712 goto check_lun;
715 } 713 }
716 atomic_set(&cmd->t_task.transport_dev_active, 0); 714 atomic_set(&cmd->transport_dev_active, 0);
717 transport_all_task_dev_remove_state(cmd); 715 transport_all_task_dev_remove_state(cmd);
718 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 716 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
719 717
720 718
721check_lun: 719check_lun:
722 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 720 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
723 if (atomic_read(&cmd->t_task.transport_lun_active)) { 721 if (atomic_read(&cmd->transport_lun_active)) {
724 list_del(&cmd->se_lun_node); 722 list_del(&cmd->se_lun_node);
725 atomic_set(&cmd->t_task.transport_lun_active, 0); 723 atomic_set(&cmd->transport_lun_active, 0);
726#if 0 724#if 0
727 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" 725 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
728 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 726 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
@@ -763,15 +761,15 @@ static void transport_add_cmd_to_queue(
763 INIT_LIST_HEAD(&cmd->se_queue_node); 761 INIT_LIST_HEAD(&cmd->se_queue_node);
764 762
765 if (t_state) { 763 if (t_state) {
766 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 764 spin_lock_irqsave(&cmd->t_state_lock, flags);
767 cmd->t_state = t_state; 765 cmd->t_state = t_state;
768 atomic_set(&cmd->t_task.t_transport_active, 1); 766 atomic_set(&cmd->t_transport_active, 1);
769 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 767 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770 } 768 }
771 769
772 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 770 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
773 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); 771 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
774 atomic_inc(&cmd->t_task.t_transport_queue_active); 772 atomic_inc(&cmd->t_transport_queue_active);
775 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 773 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
776 774
777 atomic_inc(&qobj->queue_cnt); 775 atomic_inc(&qobj->queue_cnt);
@@ -791,7 +789,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
791 } 789 }
792 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); 790 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
793 791
794 atomic_dec(&cmd->t_task.t_transport_queue_active); 792 atomic_dec(&cmd->t_transport_queue_active);
795 793
796 list_del(&cmd->se_queue_node); 794 list_del(&cmd->se_queue_node);
797 atomic_dec(&qobj->queue_cnt); 795 atomic_dec(&qobj->queue_cnt);
@@ -807,24 +805,24 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
807 unsigned long flags; 805 unsigned long flags;
808 806
809 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 807 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
810 if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) { 808 if (!(atomic_read(&cmd->t_transport_queue_active))) {
811 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 809 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
812 return; 810 return;
813 } 811 }
814 812
815 list_for_each_entry(t, &qobj->qobj_list, se_queue_node) 813 list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
816 if (t == cmd) { 814 if (t == cmd) {
817 atomic_dec(&cmd->t_task.t_transport_queue_active); 815 atomic_dec(&cmd->t_transport_queue_active);
818 atomic_dec(&qobj->queue_cnt); 816 atomic_dec(&qobj->queue_cnt);
819 list_del(&cmd->se_queue_node); 817 list_del(&cmd->se_queue_node);
820 break; 818 break;
821 } 819 }
822 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 820 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
823 821
824 if (atomic_read(&cmd->t_task.t_transport_queue_active)) { 822 if (atomic_read(&cmd->t_transport_queue_active)) {
825 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", 823 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
826 cmd->se_tfo->get_task_tag(cmd), 824 cmd->se_tfo->get_task_tag(cmd),
827 atomic_read(&cmd->t_task.t_transport_queue_active)); 825 atomic_read(&cmd->t_transport_queue_active));
828 } 826 }
829} 827}
830 828
@@ -834,7 +832,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
834 */ 832 */
835void transport_complete_sync_cache(struct se_cmd *cmd, int good) 833void transport_complete_sync_cache(struct se_cmd *cmd, int good)
836{ 834{
837 struct se_task *task = list_entry(cmd->t_task.t_task_list.next, 835 struct se_task *task = list_entry(cmd->t_task_list.next,
838 struct se_task, t_list); 836 struct se_task, t_list);
839 837
840 if (good) { 838 if (good) {
@@ -864,12 +862,12 @@ void transport_complete_task(struct se_task *task, int success)
864 unsigned long flags; 862 unsigned long flags;
865#if 0 863#if 0
866 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, 864 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
867 cmd->t_task.t_task_cdb[0], dev); 865 cmd->t_task_cdb[0], dev);
868#endif 866#endif
869 if (dev) 867 if (dev)
870 atomic_inc(&dev->depth_left); 868 atomic_inc(&dev->depth_left);
871 869
872 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 870 spin_lock_irqsave(&cmd->t_state_lock, flags);
873 atomic_set(&task->task_active, 0); 871 atomic_set(&task->task_active, 0);
874 872
875 /* 873 /*
@@ -891,14 +889,14 @@ void transport_complete_task(struct se_task *task, int success)
891 */ 889 */
892 if (atomic_read(&task->task_stop)) { 890 if (atomic_read(&task->task_stop)) {
893 /* 891 /*
894 * Decrement cmd->t_task.t_se_count if this task had 892 * Decrement cmd->t_se_count if this task had
895 * previously thrown its timeout exception handler. 893 * previously thrown its timeout exception handler.
896 */ 894 */
897 if (atomic_read(&task->task_timeout)) { 895 if (atomic_read(&task->task_timeout)) {
898 atomic_dec(&cmd->t_task.t_se_count); 896 atomic_dec(&cmd->t_se_count);
899 atomic_set(&task->task_timeout, 0); 897 atomic_set(&task->task_timeout, 0);
900 } 898 }
901 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 899 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
902 900
903 complete(&task->task_stop_comp); 901 complete(&task->task_stop_comp);
904 return; 902 return;
@@ -910,33 +908,33 @@ void transport_complete_task(struct se_task *task, int success)
910 */ 908 */
911 if (atomic_read(&task->task_timeout)) { 909 if (atomic_read(&task->task_timeout)) {
912 if (!(atomic_dec_and_test( 910 if (!(atomic_dec_and_test(
913 &cmd->t_task.t_task_cdbs_timeout_left))) { 911 &cmd->t_task_cdbs_timeout_left))) {
914 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 912 spin_unlock_irqrestore(&cmd->t_state_lock,
915 flags); 913 flags);
916 return; 914 return;
917 } 915 }
918 t_state = TRANSPORT_COMPLETE_TIMEOUT; 916 t_state = TRANSPORT_COMPLETE_TIMEOUT;
919 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 917 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
920 918
921 transport_add_cmd_to_queue(cmd, t_state); 919 transport_add_cmd_to_queue(cmd, t_state);
922 return; 920 return;
923 } 921 }
924 atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left); 922 atomic_dec(&cmd->t_task_cdbs_timeout_left);
925 923
926 /* 924 /*
927 * Decrement the outstanding t_task_cdbs_left count. The last 925 * Decrement the outstanding t_task_cdbs_left count. The last
928 * struct se_task from struct se_cmd will complete itself into the 926 * struct se_task from struct se_cmd will complete itself into the
929 * device queue depending upon int success. 927 * device queue depending upon int success.
930 */ 928 */
931 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { 929 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
932 if (!success) 930 if (!success)
933 cmd->t_task.t_tasks_failed = 1; 931 cmd->t_tasks_failed = 1;
934 932
935 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 933 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
936 return; 934 return;
937 } 935 }
938 936
939 if (!success || cmd->t_task.t_tasks_failed) { 937 if (!success || cmd->t_tasks_failed) {
940 t_state = TRANSPORT_COMPLETE_FAILURE; 938 t_state = TRANSPORT_COMPLETE_FAILURE;
941 if (!task->task_error_status) { 939 if (!task->task_error_status) {
942 task->task_error_status = 940 task->task_error_status =
@@ -945,10 +943,10 @@ void transport_complete_task(struct se_task *task, int success)
945 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 943 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
946 } 944 }
947 } else { 945 } else {
948 atomic_set(&cmd->t_task.t_transport_complete, 1); 946 atomic_set(&cmd->t_transport_complete, 1);
949 t_state = TRANSPORT_COMPLETE_OK; 947 t_state = TRANSPORT_COMPLETE_OK;
950 } 948 }
951 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 949 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
952 950
953 transport_add_cmd_to_queue(cmd, t_state); 951 transport_add_cmd_to_queue(cmd, t_state);
954} 952}
@@ -1041,8 +1039,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1041 struct se_task *task; 1039 struct se_task *task;
1042 unsigned long flags; 1040 unsigned long flags;
1043 1041
1044 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 1042 spin_lock_irqsave(&cmd->t_state_lock, flags);
1045 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { 1043 list_for_each_entry(task, &cmd->t_task_list, t_list) {
1046 dev = task->se_dev; 1044 dev = task->se_dev;
1047 1045
1048 if (atomic_read(&task->task_state_active)) 1046 if (atomic_read(&task->task_state_active))
@@ -1058,7 +1056,7 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1058 1056
1059 spin_unlock(&dev->execute_task_lock); 1057 spin_unlock(&dev->execute_task_lock);
1060 } 1058 }
1061 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 1059 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1062} 1060}
1063 1061
1064static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 1062static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
@@ -1068,7 +1066,7 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1068 unsigned long flags; 1066 unsigned long flags;
1069 1067
1070 spin_lock_irqsave(&dev->execute_task_lock, flags); 1068 spin_lock_irqsave(&dev->execute_task_lock, flags);
1071 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { 1069 list_for_each_entry(task, &cmd->t_task_list, t_list) {
1072 if (atomic_read(&task->task_execute_queue)) 1070 if (atomic_read(&task->task_execute_queue))
1073 continue; 1071 continue;
1074 /* 1072 /*
@@ -1670,14 +1668,13 @@ transport_generic_get_task(struct se_cmd *cmd,
1670 INIT_LIST_HEAD(&task->t_execute_list); 1668 INIT_LIST_HEAD(&task->t_execute_list);
1671 INIT_LIST_HEAD(&task->t_state_list); 1669 INIT_LIST_HEAD(&task->t_state_list);
1672 init_completion(&task->task_stop_comp); 1670 init_completion(&task->task_stop_comp);
1673 task->task_no = cmd->t_task.t_tasks_no++;
1674 task->task_se_cmd = cmd; 1671 task->task_se_cmd = cmd;
1675 task->se_dev = dev; 1672 task->se_dev = dev;
1676 task->task_data_direction = data_direction; 1673 task->task_data_direction = data_direction;
1677 1674
1678 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 1675 spin_lock_irqsave(&cmd->t_state_lock, flags);
1679 list_add_tail(&task->t_list, &cmd->t_task.t_task_list); 1676 list_add_tail(&task->t_list, &cmd->t_task_list);
1680 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 1677 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1681 1678
1682 return task; 1679 return task;
1683} 1680}
@@ -1701,14 +1698,14 @@ void transport_init_se_cmd(
1701 INIT_LIST_HEAD(&cmd->se_delayed_node); 1698 INIT_LIST_HEAD(&cmd->se_delayed_node);
1702 INIT_LIST_HEAD(&cmd->se_ordered_node); 1699 INIT_LIST_HEAD(&cmd->se_ordered_node);
1703 1700
1704 INIT_LIST_HEAD(&cmd->t_task.t_mem_list); 1701 INIT_LIST_HEAD(&cmd->t_mem_list);
1705 INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list); 1702 INIT_LIST_HEAD(&cmd->t_mem_bidi_list);
1706 INIT_LIST_HEAD(&cmd->t_task.t_task_list); 1703 INIT_LIST_HEAD(&cmd->t_task_list);
1707 init_completion(&cmd->t_task.transport_lun_fe_stop_comp); 1704 init_completion(&cmd->transport_lun_fe_stop_comp);
1708 init_completion(&cmd->t_task.transport_lun_stop_comp); 1705 init_completion(&cmd->transport_lun_stop_comp);
1709 init_completion(&cmd->t_task.t_transport_stop_comp); 1706 init_completion(&cmd->t_transport_stop_comp);
1710 spin_lock_init(&cmd->t_task.t_state_lock); 1707 spin_lock_init(&cmd->t_state_lock);
1711 atomic_set(&cmd->t_task.transport_dev_active, 1); 1708 atomic_set(&cmd->transport_dev_active, 1);
1712 1709
1713 cmd->se_tfo = tfo; 1710 cmd->se_tfo = tfo;
1714 cmd->se_sess = se_sess; 1711 cmd->se_sess = se_sess;
@@ -1753,8 +1750,8 @@ void transport_free_se_cmd(
1753 /* 1750 /*
1754 * Check and free any extended CDB buffer that was allocated 1751 * Check and free any extended CDB buffer that was allocated
1755 */ 1752 */
1756 if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb) 1753 if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
1757 kfree(se_cmd->t_task.t_task_cdb); 1754 kfree(se_cmd->t_task_cdb);
1758} 1755}
1759EXPORT_SYMBOL(transport_free_se_cmd); 1756EXPORT_SYMBOL(transport_free_se_cmd);
1760 1757
@@ -1792,26 +1789,26 @@ int transport_generic_allocate_tasks(
1792 * allocate the additional extended CDB buffer now.. Otherwise 1789 * allocate the additional extended CDB buffer now.. Otherwise
1793 * setup the pointer from __t_task_cdb to t_task_cdb. 1790 * setup the pointer from __t_task_cdb to t_task_cdb.
1794 */ 1791 */
1795 if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) { 1792 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1796 cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb), 1793 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1797 GFP_KERNEL); 1794 GFP_KERNEL);
1798 if (!(cmd->t_task.t_task_cdb)) { 1795 if (!(cmd->t_task_cdb)) {
1799 printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb" 1796 printk(KERN_ERR "Unable to allocate cmd->t_task_cdb"
1800 " %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n", 1797 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1801 scsi_command_size(cdb), 1798 scsi_command_size(cdb),
1802 (unsigned long)sizeof(cmd->t_task.__t_task_cdb)); 1799 (unsigned long)sizeof(cmd->__t_task_cdb));
1803 return -ENOMEM; 1800 return -ENOMEM;
1804 } 1801 }
1805 } else 1802 } else
1806 cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0]; 1803 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1807 /* 1804 /*
1808 * Copy the original CDB into cmd->t_task. 1805 * Copy the original CDB into cmd->
1809 */ 1806 */
1810 memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb)); 1807 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1811 /* 1808 /*
1812 * Setup the received CDB based on SCSI defined opcodes and 1809 * Setup the received CDB based on SCSI defined opcodes and
1813 * perform unit attention, persistent reservations and ALUA 1810 * perform unit attention, persistent reservations and ALUA
1814 * checks for virtual device backends. The cmd->t_task.t_task_cdb 1811 * checks for virtual device backends. The cmd->t_task_cdb
1815 * pointer is expected to be setup before we reach this point. 1812 * pointer is expected to be setup before we reach this point.
1816 */ 1813 */
1817 ret = transport_generic_cmd_sequencer(cmd, cdb); 1814 ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1845,7 +1842,6 @@ int transport_generic_handle_cdb(
1845 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1842 printk(KERN_ERR "cmd->se_lun is NULL\n");
1846 return -EINVAL; 1843 return -EINVAL;
1847 } 1844 }
1848
1849 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); 1845 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1850 return 0; 1846 return 0;
1851} 1847}
@@ -1936,9 +1932,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1936 /* 1932 /*
1937 * No tasks remain in the execution queue 1933 * No tasks remain in the execution queue
1938 */ 1934 */
1939 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 1935 spin_lock_irqsave(&cmd->t_state_lock, flags);
1940 list_for_each_entry_safe(task, task_tmp, 1936 list_for_each_entry_safe(task, task_tmp,
1941 &cmd->t_task.t_task_list, t_list) { 1937 &cmd->t_task_list, t_list) {
1942 DEBUG_TS("task_no[%d] - Processing task %p\n", 1938 DEBUG_TS("task_no[%d] - Processing task %p\n",
1943 task->task_no, task); 1939 task->task_no, task);
1944 /* 1940 /*
@@ -1947,14 +1943,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1947 */ 1943 */
1948 if (!atomic_read(&task->task_sent) && 1944 if (!atomic_read(&task->task_sent) &&
1949 !atomic_read(&task->task_active)) { 1945 !atomic_read(&task->task_active)) {
1950 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 1946 spin_unlock_irqrestore(&cmd->t_state_lock,
1951 flags); 1947 flags);
1952 transport_remove_task_from_execute_queue(task, 1948 transport_remove_task_from_execute_queue(task,
1953 task->se_dev); 1949 task->se_dev);
1954 1950
1955 DEBUG_TS("task_no[%d] - Removed from execute queue\n", 1951 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
1956 task->task_no); 1952 task->task_no);
1957 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 1953 spin_lock_irqsave(&cmd->t_state_lock, flags);
1958 continue; 1954 continue;
1959 } 1955 }
1960 1956
@@ -1964,7 +1960,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1964 */ 1960 */
1965 if (atomic_read(&task->task_active)) { 1961 if (atomic_read(&task->task_active)) {
1966 atomic_set(&task->task_stop, 1); 1962 atomic_set(&task->task_stop, 1);
1967 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 1963 spin_unlock_irqrestore(&cmd->t_state_lock,
1968 flags); 1964 flags);
1969 1965
1970 DEBUG_TS("task_no[%d] - Waiting to complete\n", 1966 DEBUG_TS("task_no[%d] - Waiting to complete\n",
@@ -1973,8 +1969,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1973 DEBUG_TS("task_no[%d] - Stopped successfully\n", 1969 DEBUG_TS("task_no[%d] - Stopped successfully\n",
1974 task->task_no); 1970 task->task_no);
1975 1971
1976 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 1972 spin_lock_irqsave(&cmd->t_state_lock, flags);
1977 atomic_dec(&cmd->t_task.t_task_cdbs_left); 1973 atomic_dec(&cmd->t_task_cdbs_left);
1978 1974
1979 atomic_set(&task->task_active, 0); 1975 atomic_set(&task->task_active, 0);
1980 atomic_set(&task->task_stop, 0); 1976 atomic_set(&task->task_stop, 0);
@@ -1985,7 +1981,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1985 1981
1986 __transport_stop_task_timer(task, &flags); 1982 __transport_stop_task_timer(task, &flags);
1987 } 1983 }
1988 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 1984 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1989 1985
1990 return ret; 1986 return ret;
1991} 1987}
@@ -2001,7 +1997,7 @@ static void transport_generic_request_failure(
2001{ 1997{
2002 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1998 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2003 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1999 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
2004 cmd->t_task.t_task_cdb[0]); 2000 cmd->t_task_cdb[0]);
2005 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" 2001 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2006 " %d/%d transport_error_status: %d\n", 2002 " %d/%d transport_error_status: %d\n",
2007 cmd->se_tfo->get_cmd_state(cmd), 2003 cmd->se_tfo->get_cmd_state(cmd),
@@ -2010,13 +2006,13 @@ static void transport_generic_request_failure(
2010 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" 2006 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2011 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 2007 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2012 " t_transport_active: %d t_transport_stop: %d" 2008 " t_transport_active: %d t_transport_stop: %d"
2013 " t_transport_sent: %d\n", cmd->t_task.t_task_cdbs, 2009 " t_transport_sent: %d\n", cmd->t_task_cdbs,
2014 atomic_read(&cmd->t_task.t_task_cdbs_left), 2010 atomic_read(&cmd->t_task_cdbs_left),
2015 atomic_read(&cmd->t_task.t_task_cdbs_sent), 2011 atomic_read(&cmd->t_task_cdbs_sent),
2016 atomic_read(&cmd->t_task.t_task_cdbs_ex_left), 2012 atomic_read(&cmd->t_task_cdbs_ex_left),
2017 atomic_read(&cmd->t_task.t_transport_active), 2013 atomic_read(&cmd->t_transport_active),
2018 atomic_read(&cmd->t_task.t_transport_stop), 2014 atomic_read(&cmd->t_transport_stop),
2019 atomic_read(&cmd->t_task.t_transport_sent)); 2015 atomic_read(&cmd->t_transport_sent));
2020 2016
2021 transport_stop_all_task_timers(cmd); 2017 transport_stop_all_task_timers(cmd);
2022 2018
@@ -2098,7 +2094,7 @@ static void transport_generic_request_failure(
2098 break; 2094 break;
2099 default: 2095 default:
2100 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", 2096 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2101 cmd->t_task.t_task_cdb[0], 2097 cmd->t_task_cdb[0],
2102 cmd->transport_error_status); 2098 cmd->transport_error_status);
2103 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2099 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2104 break; 2100 break;
@@ -2119,19 +2115,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
2119{ 2115{
2120 unsigned long flags; 2116 unsigned long flags;
2121 2117
2122 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2118 spin_lock_irqsave(&cmd->t_state_lock, flags);
2123 if (!(atomic_read(&cmd->t_task.t_transport_timeout))) { 2119 if (!(atomic_read(&cmd->t_transport_timeout))) {
2124 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2120 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2125 return; 2121 return;
2126 } 2122 }
2127 if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) { 2123 if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
2128 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2124 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2129 return; 2125 return;
2130 } 2126 }
2131 2127
2132 atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout), 2128 atomic_sub(atomic_read(&cmd->t_transport_timeout),
2133 &cmd->t_task.t_se_count); 2129 &cmd->t_se_count);
2134 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2130 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2135} 2131}
2136 2132
2137static void transport_generic_request_timeout(struct se_cmd *cmd) 2133static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2139,16 +2135,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
2139 unsigned long flags; 2135 unsigned long flags;
2140 2136
2141 /* 2137 /*
2142 * Reset cmd->t_task.t_se_count to allow transport_generic_remove() 2138 * Reset cmd->t_se_count to allow transport_generic_remove()
2143 * to allow last call to free memory resources. 2139 * to allow last call to free memory resources.
2144 */ 2140 */
2145 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2141 spin_lock_irqsave(&cmd->t_state_lock, flags);
2146 if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) { 2142 if (atomic_read(&cmd->t_transport_timeout) > 1) {
2147 int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1); 2143 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
2148 2144
2149 atomic_sub(tmp, &cmd->t_task.t_se_count); 2145 atomic_sub(tmp, &cmd->t_se_count);
2150 } 2146 }
2151 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2147 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2152 2148
2153 transport_generic_remove(cmd, 0, 0); 2149 transport_generic_remove(cmd, 0, 0);
2154} 2150}
@@ -2164,8 +2160,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2164 return -ENOMEM; 2160 return -ENOMEM;
2165 } 2161 }
2166 2162
2167 cmd->t_task.t_tasks_se_num = 0; 2163 cmd->t_tasks_se_num = 0;
2168 cmd->t_task.t_task_buf = buf; 2164 cmd->t_task_buf = buf;
2169 2165
2170 return 0; 2166 return 0;
2171} 2167}
@@ -2207,9 +2203,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2207{ 2203{
2208 unsigned long flags; 2204 unsigned long flags;
2209 2205
2210 spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); 2206 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2211 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 2207 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2212 spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); 2208 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2213} 2209}
2214 2210
2215/* 2211/*
@@ -2223,9 +2219,9 @@ static void transport_task_timeout_handler(unsigned long data)
2223 2219
2224 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2220 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2225 2221
2226 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2222 spin_lock_irqsave(&cmd->t_state_lock, flags);
2227 if (task->task_flags & TF_STOP) { 2223 if (task->task_flags & TF_STOP) {
2228 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2224 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2229 return; 2225 return;
2230 } 2226 }
2231 task->task_flags &= ~TF_RUNNING; 2227 task->task_flags &= ~TF_RUNNING;
@@ -2236,13 +2232,13 @@ static void transport_task_timeout_handler(unsigned long data)
2236 if (!(atomic_read(&task->task_active))) { 2232 if (!(atomic_read(&task->task_active))) {
2237 DEBUG_TT("transport task: %p cmd: %p timeout task_active" 2233 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2238 " == 0\n", task, cmd); 2234 " == 0\n", task, cmd);
2239 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2235 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2240 return; 2236 return;
2241 } 2237 }
2242 2238
2243 atomic_inc(&cmd->t_task.t_se_count); 2239 atomic_inc(&cmd->t_se_count);
2244 atomic_inc(&cmd->t_task.t_transport_timeout); 2240 atomic_inc(&cmd->t_transport_timeout);
2245 cmd->t_task.t_tasks_failed = 1; 2241 cmd->t_tasks_failed = 1;
2246 2242
2247 atomic_set(&task->task_timeout, 1); 2243 atomic_set(&task->task_timeout, 1);
2248 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; 2244 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
@@ -2251,28 +2247,28 @@ static void transport_task_timeout_handler(unsigned long data)
2251 if (atomic_read(&task->task_stop)) { 2247 if (atomic_read(&task->task_stop)) {
2252 DEBUG_TT("transport task: %p cmd: %p timeout task_stop" 2248 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2253 " == 1\n", task, cmd); 2249 " == 1\n", task, cmd);
2254 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2250 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2255 complete(&task->task_stop_comp); 2251 complete(&task->task_stop_comp);
2256 return; 2252 return;
2257 } 2253 }
2258 2254
2259 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { 2255 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
2260 DEBUG_TT("transport task: %p cmd: %p timeout non zero" 2256 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2261 " t_task_cdbs_left\n", task, cmd); 2257 " t_task_cdbs_left\n", task, cmd);
2262 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2258 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2263 return; 2259 return;
2264 } 2260 }
2265 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2261 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2266 task, cmd); 2262 task, cmd);
2267 2263
2268 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; 2264 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2269 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2265 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2270 2266
2271 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); 2267 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2272} 2268}
2273 2269
2274/* 2270/*
2275 * Called with cmd->t_task.t_state_lock held. 2271 * Called with cmd->t_state_lock held.
2276 */ 2272 */
2277static void transport_start_task_timer(struct se_task *task) 2273static void transport_start_task_timer(struct se_task *task)
2278{ 2274{
@@ -2302,7 +2298,7 @@ static void transport_start_task_timer(struct se_task *task)
2302} 2298}
2303 2299
2304/* 2300/*
2305 * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held. 2301 * Called with spin_lock_irq(&cmd->t_state_lock) held.
2306 */ 2302 */
2307void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) 2303void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2308{ 2304{
@@ -2312,11 +2308,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2312 return; 2308 return;
2313 2309
2314 task->task_flags |= TF_STOP; 2310 task->task_flags |= TF_STOP;
2315 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags); 2311 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2316 2312
2317 del_timer_sync(&task->task_timer); 2313 del_timer_sync(&task->task_timer);
2318 2314
2319 spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags); 2315 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2320 task->task_flags &= ~TF_RUNNING; 2316 task->task_flags &= ~TF_RUNNING;
2321 task->task_flags &= ~TF_STOP; 2317 task->task_flags &= ~TF_STOP;
2322} 2318}
@@ -2326,11 +2322,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
2326 struct se_task *task = NULL, *task_tmp; 2322 struct se_task *task = NULL, *task_tmp;
2327 unsigned long flags; 2323 unsigned long flags;
2328 2324
2329 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2325 spin_lock_irqsave(&cmd->t_state_lock, flags);
2330 list_for_each_entry_safe(task, task_tmp, 2326 list_for_each_entry_safe(task, task_tmp,
2331 &cmd->t_task.t_task_list, t_list) 2327 &cmd->t_task_list, t_list)
2332 __transport_stop_task_timer(task, &flags); 2328 __transport_stop_task_timer(task, &flags);
2333 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2329 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2334} 2330}
2335 2331
2336static inline int transport_tcq_window_closed(struct se_device *dev) 2332static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2365,7 +2361,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2365 smp_mb__after_atomic_inc(); 2361 smp_mb__after_atomic_inc();
2366 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2362 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2367 " 0x%02x, se_ordered_id: %u\n", 2363 " 0x%02x, se_ordered_id: %u\n",
2368 cmd->t_task->t_task_cdb[0], 2364 cmd->_task_cdb[0],
2369 cmd->se_ordered_id); 2365 cmd->se_ordered_id);
2370 return 1; 2366 return 1;
2371 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2367 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2379,7 +2375,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2379 2375
2380 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" 2376 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2381 " list, se_ordered_id: %u\n", 2377 " list, se_ordered_id: %u\n",
2382 cmd->t_task.t_task_cdb[0], 2378 cmd->t_task_cdb[0],
2383 cmd->se_ordered_id); 2379 cmd->se_ordered_id);
2384 /* 2380 /*
2385 * Add ORDERED command to tail of execution queue if 2381 * Add ORDERED command to tail of execution queue if
@@ -2413,7 +2409,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2413 2409
2414 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" 2410 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2415 " delayed CMD list, se_ordered_id: %u\n", 2411 " delayed CMD list, se_ordered_id: %u\n",
2416 cmd->t_task.t_task_cdb[0], cmd->sam_task_attr, 2412 cmd->t_task_cdb[0], cmd->sam_task_attr,
2417 cmd->se_ordered_id); 2413 cmd->se_ordered_id);
2418 /* 2414 /*
2419 * Return zero to let transport_execute_tasks() know 2415 * Return zero to let transport_execute_tasks() know
@@ -2487,7 +2483,7 @@ static int __transport_execute_tasks(struct se_device *dev)
2487 2483
2488 /* 2484 /*
2489 * Check if there is enough room in the device and HBA queue to send 2485 * Check if there is enough room in the device and HBA queue to send
2490 * struct se_transport_task's to the selected transport. 2486 * struct se_tasks to the selected transport.
2491 */ 2487 */
2492check_depth: 2488check_depth:
2493 if (!atomic_read(&dev->depth_left)) 2489 if (!atomic_read(&dev->depth_left))
@@ -2511,17 +2507,17 @@ check_depth:
2511 2507
2512 cmd = task->task_se_cmd; 2508 cmd = task->task_se_cmd;
2513 2509
2514 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2510 spin_lock_irqsave(&cmd->t_state_lock, flags);
2515 atomic_set(&task->task_active, 1); 2511 atomic_set(&task->task_active, 1);
2516 atomic_set(&task->task_sent, 1); 2512 atomic_set(&task->task_sent, 1);
2517 atomic_inc(&cmd->t_task.t_task_cdbs_sent); 2513 atomic_inc(&cmd->t_task_cdbs_sent);
2518 2514
2519 if (atomic_read(&cmd->t_task.t_task_cdbs_sent) == 2515 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2520 cmd->t_task.t_task_cdbs) 2516 cmd->t_task_list_num)
2521 atomic_set(&cmd->transport_sent, 1); 2517 atomic_set(&cmd->transport_sent, 1);
2522 2518
2523 transport_start_task_timer(task); 2519 transport_start_task_timer(task);
2524 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2520 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2525 /* 2521 /*
2526 * The struct se_cmd->transport_emulate_cdb() function pointer is used 2522 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2527 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the 2523 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
@@ -2586,10 +2582,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
2586 * Any unsolicited data will get dumped for failed command inside of 2582 * Any unsolicited data will get dumped for failed command inside of
2587 * the fabric plugin 2583 * the fabric plugin
2588 */ 2584 */
2589 spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); 2585 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2590 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; 2586 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2591 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2587 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2592 spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); 2588 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2593 2589
2594 se_cmd->se_tfo->new_cmd_failure(se_cmd); 2590 se_cmd->se_tfo->new_cmd_failure(se_cmd);
2595} 2591}
@@ -2799,17 +2795,18 @@ static void transport_xor_callback(struct se_cmd *cmd)
2799 return; 2795 return;
2800 } 2796 }
2801 /* 2797 /*
2802 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list 2798 * Copy the scatterlist WRITE buffer located at cmd->t_mem_list
2803 * into the locally allocated *buf 2799 * into the locally allocated *buf
2804 */ 2800 */
2805 transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list); 2801 transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list,
2802 cmd->data_length);
2806 /* 2803 /*
2807 * Now perform the XOR against the BIDI read memory located at 2804 * Now perform the XOR against the BIDI read memory located at
2808 * cmd->t_task.t_mem_bidi_list 2805 * cmd->t_mem_bidi_list
2809 */ 2806 */
2810 2807
2811 offset = 0; 2808 offset = 0;
2812 list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) { 2809 list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) {
2813 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); 2810 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2814 if (!(addr)) 2811 if (!(addr))
2815 goto out; 2812 goto out;
@@ -2837,14 +2834,14 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2837 2834
2838 WARN_ON(!cmd->se_lun); 2835 WARN_ON(!cmd->se_lun);
2839 2836
2840 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 2837 spin_lock_irqsave(&cmd->t_state_lock, flags);
2841 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2838 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2842 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2839 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2843 return 0; 2840 return 0;
2844 } 2841 }
2845 2842
2846 list_for_each_entry_safe(task, task_tmp, 2843 list_for_each_entry_safe(task, task_tmp,
2847 &cmd->t_task.t_task_list, t_list) { 2844 &cmd->t_task_list, t_list) {
2848 2845
2849 if (!task->task_sense) 2846 if (!task->task_sense)
2850 continue; 2847 continue;
@@ -2866,7 +2863,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2866 cmd->se_tfo->get_task_tag(cmd), task->task_no); 2863 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2867 continue; 2864 continue;
2868 } 2865 }
2869 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2866 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2870 2867
2871 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2868 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2872 TRANSPORT_SENSE_BUFFER); 2869 TRANSPORT_SENSE_BUFFER);
@@ -2884,7 +2881,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2884 cmd->scsi_status); 2881 cmd->scsi_status);
2885 return 0; 2882 return 0;
2886 } 2883 }
2887 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 2884 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2888 2885
2889 return -1; 2886 return -1;
2890} 2887}
@@ -2895,7 +2892,7 @@ static int transport_allocate_resources(struct se_cmd *cmd)
2895 2892
2896 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 2893 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
2897 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) 2894 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
2898 return transport_generic_get_mem(cmd, length, PAGE_SIZE); 2895 return transport_generic_get_mem(cmd, length);
2899 else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) 2896 else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
2900 return transport_generic_allocate_buf(cmd, length); 2897 return transport_generic_allocate_buf(cmd, length);
2901 else 2898 else
@@ -2999,7 +2996,7 @@ static int transport_generic_cmd_sequencer(
2999 goto out_unsupported_cdb; 2996 goto out_unsupported_cdb;
3000 size = transport_get_size(sectors, cdb, cmd); 2997 size = transport_get_size(sectors, cdb, cmd);
3001 cmd->transport_split_cdb = &split_cdb_XX_6; 2998 cmd->transport_split_cdb = &split_cdb_XX_6;
3002 cmd->t_task.t_task_lba = transport_lba_21(cdb); 2999 cmd->t_task_lba = transport_lba_21(cdb);
3003 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3000 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3004 break; 3001 break;
3005 case READ_10: 3002 case READ_10:
@@ -3008,7 +3005,7 @@ static int transport_generic_cmd_sequencer(
3008 goto out_unsupported_cdb; 3005 goto out_unsupported_cdb;
3009 size = transport_get_size(sectors, cdb, cmd); 3006 size = transport_get_size(sectors, cdb, cmd);
3010 cmd->transport_split_cdb = &split_cdb_XX_10; 3007 cmd->transport_split_cdb = &split_cdb_XX_10;
3011 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3008 cmd->t_task_lba = transport_lba_32(cdb);
3012 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3009 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3013 break; 3010 break;
3014 case READ_12: 3011 case READ_12:
@@ -3017,7 +3014,7 @@ static int transport_generic_cmd_sequencer(
3017 goto out_unsupported_cdb; 3014 goto out_unsupported_cdb;
3018 size = transport_get_size(sectors, cdb, cmd); 3015 size = transport_get_size(sectors, cdb, cmd);
3019 cmd->transport_split_cdb = &split_cdb_XX_12; 3016 cmd->transport_split_cdb = &split_cdb_XX_12;
3020 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3017 cmd->t_task_lba = transport_lba_32(cdb);
3021 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3018 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3022 break; 3019 break;
3023 case READ_16: 3020 case READ_16:
@@ -3026,7 +3023,7 @@ static int transport_generic_cmd_sequencer(
3026 goto out_unsupported_cdb; 3023 goto out_unsupported_cdb;
3027 size = transport_get_size(sectors, cdb, cmd); 3024 size = transport_get_size(sectors, cdb, cmd);
3028 cmd->transport_split_cdb = &split_cdb_XX_16; 3025 cmd->transport_split_cdb = &split_cdb_XX_16;
3029 cmd->t_task.t_task_lba = transport_lba_64(cdb); 3026 cmd->t_task_lba = transport_lba_64(cdb);
3030 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3027 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3031 break; 3028 break;
3032 case WRITE_6: 3029 case WRITE_6:
@@ -3035,7 +3032,7 @@ static int transport_generic_cmd_sequencer(
3035 goto out_unsupported_cdb; 3032 goto out_unsupported_cdb;
3036 size = transport_get_size(sectors, cdb, cmd); 3033 size = transport_get_size(sectors, cdb, cmd);
3037 cmd->transport_split_cdb = &split_cdb_XX_6; 3034 cmd->transport_split_cdb = &split_cdb_XX_6;
3038 cmd->t_task.t_task_lba = transport_lba_21(cdb); 3035 cmd->t_task_lba = transport_lba_21(cdb);
3039 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3036 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3040 break; 3037 break;
3041 case WRITE_10: 3038 case WRITE_10:
@@ -3044,8 +3041,8 @@ static int transport_generic_cmd_sequencer(
3044 goto out_unsupported_cdb; 3041 goto out_unsupported_cdb;
3045 size = transport_get_size(sectors, cdb, cmd); 3042 size = transport_get_size(sectors, cdb, cmd);
3046 cmd->transport_split_cdb = &split_cdb_XX_10; 3043 cmd->transport_split_cdb = &split_cdb_XX_10;
3047 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3044 cmd->t_task_lba = transport_lba_32(cdb);
3048 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); 3045 cmd->t_tasks_fua = (cdb[1] & 0x8);
3049 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3046 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3050 break; 3047 break;
3051 case WRITE_12: 3048 case WRITE_12:
@@ -3054,8 +3051,8 @@ static int transport_generic_cmd_sequencer(
3054 goto out_unsupported_cdb; 3051 goto out_unsupported_cdb;
3055 size = transport_get_size(sectors, cdb, cmd); 3052 size = transport_get_size(sectors, cdb, cmd);
3056 cmd->transport_split_cdb = &split_cdb_XX_12; 3053 cmd->transport_split_cdb = &split_cdb_XX_12;
3057 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3054 cmd->t_task_lba = transport_lba_32(cdb);
3058 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); 3055 cmd->t_tasks_fua = (cdb[1] & 0x8);
3059 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3056 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3060 break; 3057 break;
3061 case WRITE_16: 3058 case WRITE_16:
@@ -3064,20 +3061,20 @@ static int transport_generic_cmd_sequencer(
3064 goto out_unsupported_cdb; 3061 goto out_unsupported_cdb;
3065 size = transport_get_size(sectors, cdb, cmd); 3062 size = transport_get_size(sectors, cdb, cmd);
3066 cmd->transport_split_cdb = &split_cdb_XX_16; 3063 cmd->transport_split_cdb = &split_cdb_XX_16;
3067 cmd->t_task.t_task_lba = transport_lba_64(cdb); 3064 cmd->t_task_lba = transport_lba_64(cdb);
3068 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); 3065 cmd->t_tasks_fua = (cdb[1] & 0x8);
3069 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3066 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3070 break; 3067 break;
3071 case XDWRITEREAD_10: 3068 case XDWRITEREAD_10:
3072 if ((cmd->data_direction != DMA_TO_DEVICE) || 3069 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3073 !(cmd->t_task.t_tasks_bidi)) 3070 !(cmd->t_tasks_bidi))
3074 goto out_invalid_cdb_field; 3071 goto out_invalid_cdb_field;
3075 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3072 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3076 if (sector_ret) 3073 if (sector_ret)
3077 goto out_unsupported_cdb; 3074 goto out_unsupported_cdb;
3078 size = transport_get_size(sectors, cdb, cmd); 3075 size = transport_get_size(sectors, cdb, cmd);
3079 cmd->transport_split_cdb = &split_cdb_XX_10; 3076 cmd->transport_split_cdb = &split_cdb_XX_10;
3080 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3077 cmd->t_task_lba = transport_lba_32(cdb);
3081 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3078 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3082 passthrough = (dev->transport->transport_type == 3079 passthrough = (dev->transport->transport_type ==
3083 TRANSPORT_PLUGIN_PHBA_PDEV); 3080 TRANSPORT_PLUGIN_PHBA_PDEV);
@@ -3090,7 +3087,7 @@ static int transport_generic_cmd_sequencer(
3090 * Setup BIDI XOR callback to be run during transport_generic_complete_ok() 3087 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3091 */ 3088 */
3092 cmd->transport_complete_callback = &transport_xor_callback; 3089 cmd->transport_complete_callback = &transport_xor_callback;
3093 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); 3090 cmd->t_tasks_fua = (cdb[1] & 0x8);
3094 break; 3091 break;
3095 case VARIABLE_LENGTH_CMD: 3092 case VARIABLE_LENGTH_CMD:
3096 service_action = get_unaligned_be16(&cdb[8]); 3093 service_action = get_unaligned_be16(&cdb[8]);
@@ -3112,7 +3109,7 @@ static int transport_generic_cmd_sequencer(
3112 * XDWRITE_READ_32 logic. 3109 * XDWRITE_READ_32 logic.
3113 */ 3110 */
3114 cmd->transport_split_cdb = &split_cdb_XX_32; 3111 cmd->transport_split_cdb = &split_cdb_XX_32;
3115 cmd->t_task.t_task_lba = transport_lba_64_ext(cdb); 3112 cmd->t_task_lba = transport_lba_64_ext(cdb);
3116 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3113 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3117 3114
3118 /* 3115 /*
@@ -3126,7 +3123,7 @@ static int transport_generic_cmd_sequencer(
3126 * transport_generic_complete_ok() 3123 * transport_generic_complete_ok()
3127 */ 3124 */
3128 cmd->transport_complete_callback = &transport_xor_callback; 3125 cmd->transport_complete_callback = &transport_xor_callback;
3129 cmd->t_task.t_tasks_fua = (cdb[10] & 0x8); 3126 cmd->t_tasks_fua = (cdb[10] & 0x8);
3130 break; 3127 break;
3131 case WRITE_SAME_32: 3128 case WRITE_SAME_32:
3132 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 3129 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3138,7 +3135,7 @@ static int transport_generic_cmd_sequencer(
3138 else 3135 else
3139 size = dev->se_sub_dev->se_dev_attrib.block_size; 3136 size = dev->se_sub_dev->se_dev_attrib.block_size;
3140 3137
3141 cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]); 3138 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3142 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3139 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3143 3140
3144 /* 3141 /*
@@ -3373,10 +3370,10 @@ static int transport_generic_cmd_sequencer(
3373 */ 3370 */
3374 if (cdb[0] == SYNCHRONIZE_CACHE) { 3371 if (cdb[0] == SYNCHRONIZE_CACHE) {
3375 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3372 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3376 cmd->t_task.t_task_lba = transport_lba_32(cdb); 3373 cmd->t_task_lba = transport_lba_32(cdb);
3377 } else { 3374 } else {
3378 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 3375 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3379 cmd->t_task.t_task_lba = transport_lba_64(cdb); 3376 cmd->t_task_lba = transport_lba_64(cdb);
3380 } 3377 }
3381 if (sector_ret) 3378 if (sector_ret)
3382 goto out_unsupported_cdb; 3379 goto out_unsupported_cdb;
@@ -3398,7 +3395,7 @@ static int transport_generic_cmd_sequencer(
3398 * Check to ensure that LBA + Range does not exceed past end of 3395 * Check to ensure that LBA + Range does not exceed past end of
3399 * device. 3396 * device.
3400 */ 3397 */
3401 if (transport_get_sectors(cmd) < 0) 3398 if (!transport_cmd_get_valid_sectors(cmd))
3402 goto out_invalid_cdb_field; 3399 goto out_invalid_cdb_field;
3403 break; 3400 break;
3404 case UNMAP: 3401 case UNMAP:
@@ -3427,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
3427 else 3424 else
3428 size = dev->se_sub_dev->se_dev_attrib.block_size; 3425 size = dev->se_sub_dev->se_dev_attrib.block_size;
3429 3426
3430 cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[2]); 3427 cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
3431 passthrough = (dev->transport->transport_type == 3428 passthrough = (dev->transport->transport_type ==
3432 TRANSPORT_PLUGIN_PHBA_PDEV); 3429 TRANSPORT_PLUGIN_PHBA_PDEV);
3433 /* 3430 /*
@@ -3542,88 +3539,22 @@ out_invalid_cdb_field:
3542 3539
3543static inline void transport_release_tasks(struct se_cmd *); 3540static inline void transport_release_tasks(struct se_cmd *);
3544 3541
3545/*
3546 * This function will copy a contiguous *src buffer into a destination
3547 * struct scatterlist array.
3548 */
3549static void transport_memcpy_write_contig(
3550 struct se_cmd *cmd,
3551 struct scatterlist *sg_d,
3552 unsigned char *src)
3553{
3554 u32 i = 0, length = 0, total_length = cmd->data_length;
3555 void *dst;
3556
3557 while (total_length) {
3558 length = sg_d[i].length;
3559
3560 if (length > total_length)
3561 length = total_length;
3562
3563 dst = sg_virt(&sg_d[i]);
3564
3565 memcpy(dst, src, length);
3566
3567 if (!(total_length -= length))
3568 return;
3569
3570 src += length;
3571 i++;
3572 }
3573}
3574
3575/*
3576 * This function will copy a struct scatterlist array *sg_s into a destination
3577 * contiguous *dst buffer.
3578 */
3579static void transport_memcpy_read_contig(
3580 struct se_cmd *cmd,
3581 unsigned char *dst,
3582 struct scatterlist *sg_s)
3583{
3584 u32 i = 0, length = 0, total_length = cmd->data_length;
3585 void *src;
3586
3587 while (total_length) {
3588 length = sg_s[i].length;
3589
3590 if (length > total_length)
3591 length = total_length;
3592
3593 src = sg_virt(&sg_s[i]);
3594
3595 memcpy(dst, src, length);
3596
3597 if (!(total_length -= length))
3598 return;
3599
3600 dst += length;
3601 i++;
3602 }
3603}
3604
3605static void transport_memcpy_se_mem_read_contig( 3542static void transport_memcpy_se_mem_read_contig(
3606 struct se_cmd *cmd,
3607 unsigned char *dst, 3543 unsigned char *dst,
3608 struct list_head *se_mem_list) 3544 struct list_head *se_mem_list,
3545 u32 tot_len)
3609{ 3546{
3610 struct se_mem *se_mem; 3547 struct se_mem *se_mem;
3611 void *src; 3548 void *src;
3612 u32 length = 0, total_length = cmd->data_length; 3549 u32 length;
3613 3550
3614 list_for_each_entry(se_mem, se_mem_list, se_list) { 3551 list_for_each_entry(se_mem, se_mem_list, se_list) {
3615 length = se_mem->se_len; 3552 length = min_t(u32, se_mem->se_len, tot_len);
3616
3617 if (length > total_length)
3618 length = total_length;
3619
3620 src = page_address(se_mem->se_page) + se_mem->se_off; 3553 src = page_address(se_mem->se_page) + se_mem->se_off;
3621
3622 memcpy(dst, src, length); 3554 memcpy(dst, src, length);
3623 3555 tot_len -= length;
3624 if (!(total_length -= length)) 3556 if (!tot_len)
3625 return; 3557 break;
3626
3627 dst += length; 3558 dst += length;
3628 } 3559 }
3629} 3560}
@@ -3744,14 +3675,15 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
3744 } 3675 }
3745 spin_unlock(&cmd->se_lun->lun_sep_lock); 3676 spin_unlock(&cmd->se_lun->lun_sep_lock);
3746 /* 3677 /*
3747 * If enabled by TCM fabirc module pre-registered SGL 3678 * If enabled by TCM fabric module pre-registered SGL
3748 * memory, perform the memcpy() from the TCM internal 3679 * memory, perform the memcpy() from the TCM internal
3749 * contigious buffer back to the original SGL. 3680 * contiguous buffer back to the original SGL.
3750 */ 3681 */
3751 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) 3682 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3752 transport_memcpy_write_contig(cmd, 3683 sg_copy_from_buffer(cmd->t_task_pt_sgl,
3753 cmd->t_task.t_task_pt_sgl, 3684 cmd->t_task_pt_sgl_num,
3754 cmd->t_task.t_task_buf); 3685 cmd->t_task_buf,
3686 cmd->data_length);
3755 3687
3756 cmd->se_tfo->queue_data_in(cmd); 3688 cmd->se_tfo->queue_data_in(cmd);
3757 break; 3689 break;
@@ -3765,7 +3697,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
3765 /* 3697 /*
3766 * Check if we need to send READ payload for BIDI-COMMAND 3698 * Check if we need to send READ payload for BIDI-COMMAND
3767 */ 3699 */
3768 if (!list_empty(&cmd->t_task.t_mem_bidi_list)) { 3700 if (!list_empty(&cmd->t_mem_bidi_list)) {
3769 spin_lock(&cmd->se_lun->lun_sep_lock); 3701 spin_lock(&cmd->se_lun->lun_sep_lock);
3770 if (cmd->se_lun->lun_sep) { 3702 if (cmd->se_lun->lun_sep) {
3771 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3703 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3792,9 +3724,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3792 struct se_task *task, *task_tmp; 3724 struct se_task *task, *task_tmp;
3793 unsigned long flags; 3725 unsigned long flags;
3794 3726
3795 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3727 spin_lock_irqsave(&cmd->t_state_lock, flags);
3796 list_for_each_entry_safe(task, task_tmp, 3728 list_for_each_entry_safe(task, task_tmp,
3797 &cmd->t_task.t_task_list, t_list) { 3729 &cmd->t_task_list, t_list) {
3798 if (atomic_read(&task->task_active)) 3730 if (atomic_read(&task->task_active))
3799 continue; 3731 continue;
3800 3732
@@ -3803,15 +3735,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3803 3735
3804 list_del(&task->t_list); 3736 list_del(&task->t_list);
3805 3737
3806 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3738 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3807 if (task->se_dev) 3739 if (task->se_dev)
3808 task->se_dev->transport->free_task(task); 3740 task->se_dev->transport->free_task(task);
3809 else 3741 else
3810 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", 3742 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3811 task->task_no); 3743 task->task_no);
3812 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3744 spin_lock_irqsave(&cmd->t_state_lock, flags);
3813 } 3745 }
3814 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3746 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3815} 3747}
3816 3748
3817static inline void transport_free_pages(struct se_cmd *cmd) 3749static inline void transport_free_pages(struct se_cmd *cmd)
@@ -3824,9 +3756,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3824 if (cmd->se_dev->transport->do_se_mem_map) 3756 if (cmd->se_dev->transport->do_se_mem_map)
3825 free_page = 0; 3757 free_page = 0;
3826 3758
3827 if (cmd->t_task.t_task_buf) { 3759 if (cmd->t_task_buf) {
3828 kfree(cmd->t_task.t_task_buf); 3760 kfree(cmd->t_task_buf);
3829 cmd->t_task.t_task_buf = NULL; 3761 cmd->t_task_buf = NULL;
3830 return; 3762 return;
3831 } 3763 }
3832 3764
@@ -3837,7 +3769,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3837 return; 3769 return;
3838 3770
3839 list_for_each_entry_safe(se_mem, se_mem_tmp, 3771 list_for_each_entry_safe(se_mem, se_mem_tmp,
3840 &cmd->t_task.t_mem_list, se_list) { 3772 &cmd->t_mem_list, se_list) {
3841 /* 3773 /*
3842 * We only release call __free_page(struct se_mem->se_page) when 3774 * We only release call __free_page(struct se_mem->se_page) when
3843 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3775 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3848,10 +3780,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3848 list_del(&se_mem->se_list); 3780 list_del(&se_mem->se_list);
3849 kmem_cache_free(se_mem_cache, se_mem); 3781 kmem_cache_free(se_mem_cache, se_mem);
3850 } 3782 }
3851 cmd->t_task.t_tasks_se_num = 0; 3783 cmd->t_tasks_se_num = 0;
3852 3784
3853 list_for_each_entry_safe(se_mem, se_mem_tmp, 3785 list_for_each_entry_safe(se_mem, se_mem_tmp,
3854 &cmd->t_task.t_mem_bidi_list, se_list) { 3786 &cmd->t_mem_bidi_list, se_list) {
3855 /* 3787 /*
3856 * We only release call __free_page(struct se_mem->se_page) when 3788 * We only release call __free_page(struct se_mem->se_page) when
3857 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3789 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3862,7 +3794,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3862 list_del(&se_mem->se_list); 3794 list_del(&se_mem->se_list);
3863 kmem_cache_free(se_mem_cache, se_mem); 3795 kmem_cache_free(se_mem_cache, se_mem);
3864 } 3796 }
3865 cmd->t_task.t_tasks_se_bidi_num = 0; 3797 cmd->t_tasks_se_bidi_num = 0;
3866} 3798}
3867 3799
3868static inline void transport_release_tasks(struct se_cmd *cmd) 3800static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -3874,23 +3806,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3874{ 3806{
3875 unsigned long flags; 3807 unsigned long flags;
3876 3808
3877 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3809 spin_lock_irqsave(&cmd->t_state_lock, flags);
3878 if (atomic_read(&cmd->t_task.t_fe_count)) { 3810 if (atomic_read(&cmd->t_fe_count)) {
3879 if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) { 3811 if (!(atomic_dec_and_test(&cmd->t_fe_count))) {
3880 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 3812 spin_unlock_irqrestore(&cmd->t_state_lock,
3881 flags); 3813 flags);
3882 return 1; 3814 return 1;
3883 } 3815 }
3884 } 3816 }
3885 3817
3886 if (atomic_read(&cmd->t_task.t_se_count)) { 3818 if (atomic_read(&cmd->t_se_count)) {
3887 if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) { 3819 if (!(atomic_dec_and_test(&cmd->t_se_count))) {
3888 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 3820 spin_unlock_irqrestore(&cmd->t_state_lock,
3889 flags); 3821 flags);
3890 return 1; 3822 return 1;
3891 } 3823 }
3892 } 3824 }
3893 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3825 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3894 3826
3895 return 0; 3827 return 0;
3896} 3828}
@@ -3902,14 +3834,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
3902 if (transport_dec_and_check(cmd)) 3834 if (transport_dec_and_check(cmd))
3903 return; 3835 return;
3904 3836
3905 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3837 spin_lock_irqsave(&cmd->t_state_lock, flags);
3906 if (!(atomic_read(&cmd->t_task.transport_dev_active))) { 3838 if (!(atomic_read(&cmd->transport_dev_active))) {
3907 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3839 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3908 goto free_pages; 3840 goto free_pages;
3909 } 3841 }
3910 atomic_set(&cmd->t_task.transport_dev_active, 0); 3842 atomic_set(&cmd->transport_dev_active, 0);
3911 transport_all_task_dev_remove_state(cmd); 3843 transport_all_task_dev_remove_state(cmd);
3912 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3844 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3913 3845
3914 transport_release_tasks(cmd); 3846 transport_release_tasks(cmd);
3915free_pages: 3847free_pages:
@@ -3927,22 +3859,22 @@ static int transport_generic_remove(
3927 3859
3928 if (transport_dec_and_check(cmd)) { 3860 if (transport_dec_and_check(cmd)) {
3929 if (session_reinstatement) { 3861 if (session_reinstatement) {
3930 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3862 spin_lock_irqsave(&cmd->t_state_lock, flags);
3931 transport_all_task_dev_remove_state(cmd); 3863 transport_all_task_dev_remove_state(cmd);
3932 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 3864 spin_unlock_irqrestore(&cmd->t_state_lock,
3933 flags); 3865 flags);
3934 } 3866 }
3935 return 1; 3867 return 1;
3936 } 3868 }
3937 3869
3938 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 3870 spin_lock_irqsave(&cmd->t_state_lock, flags);
3939 if (!(atomic_read(&cmd->t_task.transport_dev_active))) { 3871 if (!(atomic_read(&cmd->transport_dev_active))) {
3940 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3872 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3941 goto free_pages; 3873 goto free_pages;
3942 } 3874 }
3943 atomic_set(&cmd->t_task.transport_dev_active, 0); 3875 atomic_set(&cmd->transport_dev_active, 0);
3944 transport_all_task_dev_remove_state(cmd); 3876 transport_all_task_dev_remove_state(cmd);
3945 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 3877 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3946 3878
3947 transport_release_tasks(cmd); 3879 transport_release_tasks(cmd);
3948 3880
@@ -3977,7 +3909,6 @@ int transport_generic_map_mem_to_cmd(
3977 struct scatterlist *sgl_bidi, 3909 struct scatterlist *sgl_bidi,
3978 u32 sgl_bidi_count) 3910 u32 sgl_bidi_count)
3979{ 3911{
3980 u32 mapped_sg_count = 0;
3981 int ret; 3912 int ret;
3982 3913
3983 if (!sgl || !sgl_count) 3914 if (!sgl || !sgl_count)
@@ -3993,24 +3924,20 @@ int transport_generic_map_mem_to_cmd(
3993 * processed into a TCM struct se_subsystem_dev, we do the mapping 3924 * processed into a TCM struct se_subsystem_dev, we do the mapping
3994 * from the passed physical memory to struct se_mem->se_page here. 3925 * from the passed physical memory to struct se_mem->se_page here.
3995 */ 3926 */
3996 ret = transport_map_sg_to_mem(cmd, 3927 ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl);
3997 &cmd->t_task.t_mem_list, sgl, &mapped_sg_count);
3998 if (ret < 0) 3928 if (ret < 0)
3999 return -ENOMEM; 3929 return -ENOMEM;
4000 3930
4001 cmd->t_task.t_tasks_se_num = mapped_sg_count; 3931 cmd->t_tasks_se_num = ret;
4002 /* 3932 /*
4003 * Setup BIDI READ list of struct se_mem elements 3933 * Setup BIDI READ list of struct se_mem elements
4004 */ 3934 */
4005 if (sgl_bidi && sgl_bidi_count) { 3935 if (sgl_bidi && sgl_bidi_count) {
4006 mapped_sg_count = 0; 3936 ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi);
4007 ret = transport_map_sg_to_mem(cmd,
4008 &cmd->t_task.t_mem_bidi_list, sgl_bidi,
4009 &mapped_sg_count);
4010 if (ret < 0) 3937 if (ret < 0)
4011 return -ENOMEM; 3938 return -ENOMEM;
4012 3939
4013 cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count; 3940 cmd->t_tasks_se_bidi_num = ret;
4014 } 3941 }
4015 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 3942 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4016 3943
@@ -4021,7 +3948,7 @@ int transport_generic_map_mem_to_cmd(
4021 return -ENOSYS; 3948 return -ENOSYS;
4022 } 3949 }
4023 /* 3950 /*
4024 * For incoming CDBs using a contiguous buffer internall with TCM, 3951 * For incoming CDBs using a contiguous buffer internal with TCM,
4025 * save the passed struct scatterlist memory. After TCM storage object 3952 * save the passed struct scatterlist memory. After TCM storage object
4026 * processing has completed for this struct se_cmd, TCM core will call 3953 * processing has completed for this struct se_cmd, TCM core will call
4027 * transport_memcpy_[write,read]_contig() as necessary from 3954 * transport_memcpy_[write,read]_contig() as necessary from
@@ -4030,8 +3957,8 @@ int transport_generic_map_mem_to_cmd(
4030 * struct scatterlist format. 3957 * struct scatterlist format.
4031 */ 3958 */
4032 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; 3959 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4033 cmd->t_task.t_task_pt_sgl = sgl; 3960 cmd->t_task_pt_sgl = sgl;
4034 /* don't need sgl count? We assume it contains cmd->data_length data */ 3961 cmd->t_task_pt_sgl_num = sgl_count;
4035 } 3962 }
4036 3963
4037 return 0; 3964 return 0;
@@ -4044,54 +3971,51 @@ static inline long long transport_dev_end_lba(struct se_device *dev)
4044 return dev->transport->get_blocks(dev) + 1; 3971 return dev->transport->get_blocks(dev) + 1;
4045} 3972}
4046 3973
4047static int transport_get_sectors(struct se_cmd *cmd) 3974static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
4048{ 3975{
4049 struct se_device *dev = cmd->se_dev; 3976 struct se_device *dev = cmd->se_dev;
4050 3977 u32 sectors;
4051 cmd->t_task.t_tasks_sectors =
4052 (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
4053 if (!(cmd->t_task.t_tasks_sectors))
4054 cmd->t_task.t_tasks_sectors = 1;
4055 3978
4056 if (dev->transport->get_device_type(dev) != TYPE_DISK) 3979 if (dev->transport->get_device_type(dev) != TYPE_DISK)
4057 return 0; 3980 return 0;
4058 3981
4059 if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) > 3982 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
3983
3984 if ((cmd->t_task_lba + sectors) >
4060 transport_dev_end_lba(dev)) { 3985 transport_dev_end_lba(dev)) {
4061 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" 3986 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4062 " transport_dev_end_lba(): %llu\n", 3987 " transport_dev_end_lba(): %llu\n",
4063 cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, 3988 cmd->t_task_lba, sectors,
4064 transport_dev_end_lba(dev)); 3989 transport_dev_end_lba(dev));
4065 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3990 return 0;
4066 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
4067 return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
4068 } 3991 }
4069 3992
4070 return 0; 3993 return sectors;
4071} 3994}
4072 3995
4073static int transport_new_cmd_obj(struct se_cmd *cmd) 3996static int transport_new_cmd_obj(struct se_cmd *cmd)
4074{ 3997{
4075 struct se_device *dev = cmd->se_dev; 3998 struct se_device *dev = cmd->se_dev;
4076 u32 task_cdbs = 0, rc; 3999 u32 task_cdbs;
4000 u32 rc;
4077 4001
4078 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 4002 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4079 task_cdbs++; 4003 task_cdbs = 1;
4080 cmd->t_task.t_task_cdbs++; 4004 cmd->t_task_list_num = 1;
4081 } else { 4005 } else {
4082 int set_counts = 1; 4006 int set_counts = 1;
4083 4007
4084 /* 4008 /*
4085 * Setup any BIDI READ tasks and memory from 4009 * Setup any BIDI READ tasks and memory from
4086 * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks 4010 * cmd->t_mem_bidi_list so the READ struct se_tasks
4087 * are queued first for the non pSCSI passthrough case. 4011 * are queued first for the non pSCSI passthrough case.
4088 */ 4012 */
4089 if (!list_empty(&cmd->t_task.t_mem_bidi_list) && 4013 if (!list_empty(&cmd->t_mem_bidi_list) &&
4090 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { 4014 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4091 rc = transport_generic_get_cdb_count(cmd, 4015 rc = transport_allocate_tasks(cmd,
4092 cmd->t_task.t_task_lba, 4016 cmd->t_task_lba,
4093 cmd->t_task.t_tasks_sectors, 4017 transport_cmd_get_valid_sectors(cmd),
4094 DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list, 4018 DMA_FROM_DEVICE, &cmd->t_mem_bidi_list,
4095 set_counts); 4019 set_counts);
4096 if (!(rc)) { 4020 if (!(rc)) {
4097 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4021 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4102,13 +4026,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4102 set_counts = 0; 4026 set_counts = 0;
4103 } 4027 }
4104 /* 4028 /*
4105 * Setup the tasks and memory from cmd->t_task.t_mem_list 4029 * Setup the tasks and memory from cmd->t_mem_list
4106 * Note for BIDI transfers this will contain the WRITE payload 4030 * Note for BIDI transfers this will contain the WRITE payload
4107 */ 4031 */
4108 task_cdbs = transport_generic_get_cdb_count(cmd, 4032 task_cdbs = transport_allocate_tasks(cmd,
4109 cmd->t_task.t_task_lba, 4033 cmd->t_task_lba,
4110 cmd->t_task.t_tasks_sectors, 4034 transport_cmd_get_valid_sectors(cmd),
4111 cmd->data_direction, &cmd->t_task.t_mem_list, 4035 cmd->data_direction, &cmd->t_mem_list,
4112 set_counts); 4036 set_counts);
4113 if (!(task_cdbs)) { 4037 if (!(task_cdbs)) {
4114 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4038 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4116,26 +4040,25 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4116 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4040 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4117 return PYX_TRANSPORT_LU_COMM_FAILURE; 4041 return PYX_TRANSPORT_LU_COMM_FAILURE;
4118 } 4042 }
4119 cmd->t_task.t_task_cdbs += task_cdbs; 4043 cmd->t_task_list_num = task_cdbs;
4120 4044
4121#if 0 4045#if 0
4122 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" 4046 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4123 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, 4047 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4124 cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, 4048 cmd->t_task_lba, cmd->t_tasks_sectors,
4125 cmd->t_task.t_task_cdbs); 4049 cmd->t_task_cdbs);
4126#endif 4050#endif
4127 } 4051 }
4128 4052
4129 atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs); 4053 atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
4130 atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs); 4054 atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
4131 atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs); 4055 atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
4132 return 0; 4056 return 0;
4133} 4057}
4134 4058
4135static int 4059static int
4136transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) 4060transport_generic_get_mem(struct se_cmd *cmd, u32 length)
4137{ 4061{
4138 unsigned char *buf;
4139 struct se_mem *se_mem; 4062 struct se_mem *se_mem;
4140 4063
4141 /* 4064 /*
@@ -4152,24 +4075,16 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4152 } 4075 }
4153 4076
4154/* #warning FIXME Allocate contigous pages for struct se_mem elements */ 4077/* #warning FIXME Allocate contigous pages for struct se_mem elements */
4155 se_mem->se_page = alloc_pages(GFP_KERNEL, 0); 4078 se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
4156 if (!(se_mem->se_page)) { 4079 if (!(se_mem->se_page)) {
4157 printk(KERN_ERR "alloc_pages() failed\n"); 4080 printk(KERN_ERR "alloc_pages() failed\n");
4158 goto out; 4081 goto out;
4159 } 4082 }
4160 4083
4161 buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
4162 if (!(buf)) {
4163 printk(KERN_ERR "kmap_atomic() failed\n");
4164 goto out;
4165 }
4166 INIT_LIST_HEAD(&se_mem->se_list); 4084 INIT_LIST_HEAD(&se_mem->se_list);
4167 se_mem->se_len = (length > dma_size) ? dma_size : length; 4085 se_mem->se_len = min_t(u32, length, PAGE_SIZE);
4168 memset(buf, 0, se_mem->se_len); 4086 list_add_tail(&se_mem->se_list, &cmd->t_mem_list);
4169 kunmap_atomic(buf, KM_IRQ0); 4087 cmd->t_tasks_se_num++;
4170
4171 list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list);
4172 cmd->t_task.t_tasks_se_num++;
4173 4088
4174 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" 4089 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4175 " Offset(%u)\n", se_mem->se_page, se_mem->se_len, 4090 " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
@@ -4179,7 +4094,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4179 } 4094 }
4180 4095
4181 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", 4096 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4182 cmd->t_task.t_tasks_se_num); 4097 cmd->t_tasks_se_num);
4183 4098
4184 return 0; 4099 return 0;
4185out: 4100out:
@@ -4211,7 +4126,7 @@ int transport_init_task_sg(
4211 sg_length = se_mem->se_len; 4126 sg_length = se_mem->se_len;
4212 4127
4213 if (!(list_is_last(&se_mem->se_list, 4128 if (!(list_is_last(&se_mem->se_list,
4214 &se_cmd->t_task.t_mem_list))) 4129 &se_cmd->t_mem_list)))
4215 se_mem = list_entry(se_mem->se_list.next, 4130 se_mem = list_entry(se_mem->se_list.next,
4216 struct se_mem, se_list); 4131 struct se_mem, se_list);
4217 } else { 4132 } else {
@@ -4231,7 +4146,7 @@ int transport_init_task_sg(
4231 sg_length = (se_mem->se_len - task_offset); 4146 sg_length = (se_mem->se_len - task_offset);
4232 4147
4233 if (!(list_is_last(&se_mem->se_list, 4148 if (!(list_is_last(&se_mem->se_list,
4234 &se_cmd->t_task.t_mem_list))) 4149 &se_cmd->t_mem_list)))
4235 se_mem = list_entry(se_mem->se_list.next, 4150 se_mem = list_entry(se_mem->se_list.next,
4236 struct se_mem, se_list); 4151 struct se_mem, se_list);
4237 } 4152 }
@@ -4272,7 +4187,7 @@ next:
4272 * Setup task->task_sg_bidi for SCSI READ payload for 4187 * Setup task->task_sg_bidi for SCSI READ payload for
4273 * TCM/pSCSI passthrough if present for BIDI-COMMAND 4188 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4274 */ 4189 */
4275 if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) && 4190 if (!list_empty(&se_cmd->t_mem_bidi_list) &&
4276 (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { 4191 (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4277 task->task_sg_bidi = kzalloc(task_sg_num_padded * 4192 task->task_sg_bidi = kzalloc(task_sg_num_padded *
4278 sizeof(struct scatterlist), GFP_KERNEL); 4193 sizeof(struct scatterlist), GFP_KERNEL);
@@ -4308,59 +4223,19 @@ next:
4308 return task->task_sg_num; 4223 return task->task_sg_num;
4309} 4224}
4310 4225
4311static inline int transport_set_tasks_sectors_disk( 4226/* Reduce sectors if they are too long for the device */
4312 struct se_task *task, 4227static inline sector_t transport_limit_task_sectors(
4313 struct se_device *dev, 4228 struct se_device *dev,
4314 unsigned long long lba, 4229 unsigned long long lba,
4315 u32 sectors, 4230 sector_t sectors)
4316 int *max_sectors_set)
4317{ 4231{
4318 if ((lba + sectors) > transport_dev_end_lba(dev)) { 4232 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
4319 task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4320 4233
4321 if (task->task_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 4234 if (dev->transport->get_device_type(dev) == TYPE_DISK)
4322 task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 4235 if ((lba + sectors) > transport_dev_end_lba(dev))
4323 *max_sectors_set = 1; 4236 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4324 }
4325 } else {
4326 if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
4327 task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4328 *max_sectors_set = 1;
4329 } else
4330 task->task_sectors = sectors;
4331 }
4332 4237
4333 return 0; 4238 return sectors;
4334}
4335
4336static inline int transport_set_tasks_sectors_non_disk(
4337 struct se_task *task,
4338 struct se_device *dev,
4339 unsigned long long lba,
4340 u32 sectors,
4341 int *max_sectors_set)
4342{
4343 if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
4344 task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4345 *max_sectors_set = 1;
4346 } else
4347 task->task_sectors = sectors;
4348
4349 return 0;
4350}
4351
4352static inline int transport_set_tasks_sectors(
4353 struct se_task *task,
4354 struct se_device *dev,
4355 unsigned long long lba,
4356 u32 sectors,
4357 int *max_sectors_set)
4358{
4359 return (dev->transport->get_device_type(dev) == TYPE_DISK) ?
4360 transport_set_tasks_sectors_disk(task, dev, lba, sectors,
4361 max_sectors_set) :
4362 transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
4363 max_sectors_set);
4364} 4239}
4365 4240
4366/* 4241/*
@@ -4369,11 +4244,11 @@ static inline int transport_set_tasks_sectors(
4369static int transport_map_sg_to_mem( 4244static int transport_map_sg_to_mem(
4370 struct se_cmd *cmd, 4245 struct se_cmd *cmd,
4371 struct list_head *se_mem_list, 4246 struct list_head *se_mem_list,
4372 struct scatterlist *sg, 4247 struct scatterlist *sg)
4373 u32 *sg_count)
4374{ 4248{
4375 struct se_mem *se_mem; 4249 struct se_mem *se_mem;
4376 u32 cmd_size = cmd->data_length; 4250 u32 cmd_size = cmd->data_length;
4251 int sg_count = 0;
4377 4252
4378 WARN_ON(!sg); 4253 WARN_ON(!sg);
4379 4254
@@ -4403,7 +4278,7 @@ static int transport_map_sg_to_mem(
4403 se_mem->se_len = cmd_size; 4278 se_mem->se_len = cmd_size;
4404 4279
4405 cmd_size -= se_mem->se_len; 4280 cmd_size -= se_mem->se_len;
4406 (*sg_count)++; 4281 sg_count++;
4407 4282
4408 DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", 4283 DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n",
4409 sg_count, cmd_size); 4284 sg_count, cmd_size);
@@ -4415,7 +4290,7 @@ static int transport_map_sg_to_mem(
4415 4290
4416 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); 4291 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count);
4417 4292
4418 return 0; 4293 return sg_count;
4419} 4294}
4420 4295
4421/* transport_map_mem_to_sg(): 4296/* transport_map_mem_to_sg():
@@ -4425,7 +4300,7 @@ static int transport_map_sg_to_mem(
4425int transport_map_mem_to_sg( 4300int transport_map_mem_to_sg(
4426 struct se_task *task, 4301 struct se_task *task,
4427 struct list_head *se_mem_list, 4302 struct list_head *se_mem_list,
4428 void *in_mem, 4303 struct scatterlist *sg,
4429 struct se_mem *in_se_mem, 4304 struct se_mem *in_se_mem,
4430 struct se_mem **out_se_mem, 4305 struct se_mem **out_se_mem,
4431 u32 *se_mem_cnt, 4306 u32 *se_mem_cnt,
@@ -4433,7 +4308,6 @@ int transport_map_mem_to_sg(
4433{ 4308{
4434 struct se_cmd *se_cmd = task->task_se_cmd; 4309 struct se_cmd *se_cmd = task->task_se_cmd;
4435 struct se_mem *se_mem = in_se_mem; 4310 struct se_mem *se_mem = in_se_mem;
4436 struct scatterlist *sg = (struct scatterlist *)in_mem;
4437 u32 task_size = task->task_size, sg_no = 0; 4311 u32 task_size = task->task_size, sg_no = 0;
4438 4312
4439 if (!sg) { 4313 if (!sg) {
@@ -4444,7 +4318,7 @@ int transport_map_mem_to_sg(
4444 4318
4445 while (task_size != 0) { 4319 while (task_size != 0) {
4446 /* 4320 /*
4447 * Setup the contigious array of scatterlists for 4321 * Setup the contiguous array of scatterlists for
4448 * this struct se_task. 4322 * this struct se_task.
4449 */ 4323 */
4450 sg_assign_page(sg, se_mem->se_page); 4324 sg_assign_page(sg, se_mem->se_page);
@@ -4456,7 +4330,7 @@ int transport_map_mem_to_sg(
4456 sg->length = se_mem->se_len; 4330 sg->length = se_mem->se_len;
4457 4331
4458 if (!(list_is_last(&se_mem->se_list, 4332 if (!(list_is_last(&se_mem->se_list,
4459 &se_cmd->t_task.t_mem_list))) { 4333 &se_cmd->t_mem_list))) {
4460 se_mem = list_entry(se_mem->se_list.next, 4334 se_mem = list_entry(se_mem->se_list.next,
4461 struct se_mem, se_list); 4335 struct se_mem, se_list);
4462 (*se_mem_cnt)++; 4336 (*se_mem_cnt)++;
@@ -4492,7 +4366,7 @@ int transport_map_mem_to_sg(
4492 sg->length = (se_mem->se_len - *task_offset); 4366 sg->length = (se_mem->se_len - *task_offset);
4493 4367
4494 if (!(list_is_last(&se_mem->se_list, 4368 if (!(list_is_last(&se_mem->se_list,
4495 &se_cmd->t_task.t_mem_list))) { 4369 &se_cmd->t_mem_list))) {
4496 se_mem = list_entry(se_mem->se_list.next, 4370 se_mem = list_entry(se_mem->se_list.next,
4497 struct se_mem, se_list); 4371 struct se_mem, se_list);
4498 (*se_mem_cnt)++; 4372 (*se_mem_cnt)++;
@@ -4548,9 +4422,9 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4548 } 4422 }
4549 /* 4423 /*
4550 * Walk the struct se_task list and setup scatterlist chains 4424 * Walk the struct se_task list and setup scatterlist chains
4551 * for each contiguosly allocated struct se_task->task_sg[]. 4425 * for each contiguously allocated struct se_task->task_sg[].
4552 */ 4426 */
4553 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { 4427 list_for_each_entry(task, &cmd->t_task_list, t_list) {
4554 if (!(task->task_sg) || !(task->task_padded_sg)) 4428 if (!(task->task_sg) || !(task->task_padded_sg))
4555 continue; 4429 continue;
4556 4430
@@ -4561,7 +4435,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4561 * Either add chain or mark end of scatterlist 4435 * Either add chain or mark end of scatterlist
4562 */ 4436 */
4563 if (!(list_is_last(&task->t_list, 4437 if (!(list_is_last(&task->t_list,
4564 &cmd->t_task.t_task_list))) { 4438 &cmd->t_task_list))) {
4565 /* 4439 /*
4566 * Clear existing SGL termination bit set in 4440 * Clear existing SGL termination bit set in
4567 * transport_init_task_sg(), see sg_mark_end() 4441 * transport_init_task_sg(), see sg_mark_end()
@@ -4587,7 +4461,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4587 /* 4461 /*
4588 * Check for single task.. 4462 * Check for single task..
4589 */ 4463 */
4590 if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) { 4464 if (!(list_is_last(&task->t_list, &cmd->t_task_list))) {
4591 /* 4465 /*
4592 * Clear existing SGL termination bit set in 4466 * Clear existing SGL termination bit set in
4593 * transport_init_task_sg(), see sg_mark_end() 4467 * transport_init_task_sg(), see sg_mark_end()
@@ -4605,15 +4479,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4605 * Setup the starting pointer and total t_tasks_sg_linked_no including 4479 * Setup the starting pointer and total t_tasks_sg_linked_no including
4606 * padding SGs for linking and to mark the end. 4480 * padding SGs for linking and to mark the end.
4607 */ 4481 */
4608 cmd->t_task.t_tasks_sg_chained = sg_first; 4482 cmd->t_tasks_sg_chained = sg_first;
4609 cmd->t_task.t_tasks_sg_chained_no = sg_count; 4483 cmd->t_tasks_sg_chained_no = sg_count;
4610 4484
4611 DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and" 4485 DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4612 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained, 4486 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4613 cmd->t_task.t_tasks_sg_chained_no); 4487 cmd->t_tasks_sg_chained_no);
4614 4488
4615 for_each_sg(cmd->t_task.t_tasks_sg_chained, sg, 4489 for_each_sg(cmd->t_tasks_sg_chained, sg,
4616 cmd->t_task.t_tasks_sg_chained_no, i) { 4490 cmd->t_tasks_sg_chained_no, i) {
4617 4491
4618 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", 4492 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
4619 i, sg, sg_page(sg), sg->length, sg->offset); 4493 i, sg, sg_page(sg), sg->length, sg->offset);
@@ -4646,7 +4520,7 @@ static int transport_do_se_mem_map(
4646 in_mem, in_se_mem, out_se_mem, se_mem_cnt, 4520 in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4647 task_offset_in); 4521 task_offset_in);
4648 if (ret == 0) 4522 if (ret == 0)
4649 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; 4523 task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
4650 4524
4651 return ret; 4525 return ret;
4652 } 4526 }
@@ -4684,7 +4558,10 @@ static int transport_do_se_mem_map(
4684 task_offset_in); 4558 task_offset_in);
4685} 4559}
4686 4560
4687static u32 transport_generic_get_cdb_count( 4561/*
4562 * Break up cmd into chunks transport can handle
4563 */
4564static u32 transport_allocate_tasks(
4688 struct se_cmd *cmd, 4565 struct se_cmd *cmd,
4689 unsigned long long lba, 4566 unsigned long long lba,
4690 u32 sectors, 4567 u32 sectors,
@@ -4694,17 +4571,18 @@ static u32 transport_generic_get_cdb_count(
4694{ 4571{
4695 unsigned char *cdb = NULL; 4572 unsigned char *cdb = NULL;
4696 struct se_task *task; 4573 struct se_task *task;
4697 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 4574 struct se_mem *se_mem = NULL;
4698 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; 4575 struct se_mem *se_mem_lout = NULL;
4576 struct se_mem *se_mem_bidi = NULL;
4577 struct se_mem *se_mem_bidi_lout = NULL;
4699 struct se_device *dev = cmd->se_dev; 4578 struct se_device *dev = cmd->se_dev;
4700 int max_sectors_set = 0, ret; 4579 int ret;
4701 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; 4580 u32 task_offset_in = 0;
4581 u32 se_mem_cnt = 0;
4582 u32 se_mem_bidi_cnt = 0;
4583 u32 task_cdbs = 0;
4702 4584
4703 if (!mem_list) { 4585 BUG_ON(!mem_list);
4704 printk(KERN_ERR "mem_list is NULL in transport_generic_get"
4705 "_cdb_count()\n");
4706 return 0;
4707 }
4708 /* 4586 /*
4709 * While using RAMDISK_DR backstores is the only case where 4587 * While using RAMDISK_DR backstores is the only case where
4710 * mem_list will ever be empty at this point. 4588 * mem_list will ever be empty at this point.
@@ -4715,40 +4593,47 @@ static u32 transport_generic_get_cdb_count(
4715 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to 4593 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4716 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation 4594 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4717 */ 4595 */
4718 if (!list_empty(&cmd->t_task.t_mem_bidi_list) && 4596 if (!list_empty(&cmd->t_mem_bidi_list) &&
4719 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) 4597 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4720 se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list, 4598 se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list,
4721 struct se_mem, se_list); 4599 struct se_mem, se_list);
4722 4600
4723 while (sectors) { 4601 while (sectors) {
4602 sector_t limited_sectors;
4603
4724 DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", 4604 DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
4725 cmd->se_tfo->get_task_tag(cmd), lba, sectors, 4605 cmd->se_tfo->get_task_tag(cmd), lba, sectors,
4726 transport_dev_end_lba(dev)); 4606 transport_dev_end_lba(dev));
4727 4607
4608 limited_sectors = transport_limit_task_sectors(dev, lba, sectors);
4609 if (!limited_sectors)
4610 break;
4611
4728 task = transport_generic_get_task(cmd, data_direction); 4612 task = transport_generic_get_task(cmd, data_direction);
4729 if (!(task)) 4613 if (!task)
4730 goto out; 4614 goto out;
4731 4615
4732 transport_set_tasks_sectors(task, dev, lba, sectors,
4733 &max_sectors_set);
4734
4735 task->task_lba = lba; 4616 task->task_lba = lba;
4617 task->task_sectors = limited_sectors;
4736 lba += task->task_sectors; 4618 lba += task->task_sectors;
4737 sectors -= task->task_sectors; 4619 sectors -= task->task_sectors;
4738 task->task_size = (task->task_sectors * 4620 task->task_size = (task->task_sectors *
4739 dev->se_sub_dev->se_dev_attrib.block_size); 4621 dev->se_sub_dev->se_dev_attrib.block_size);
4740 4622
4741 cdb = dev->transport->get_cdb(task); 4623 cdb = dev->transport->get_cdb(task);
4742 if ((cdb)) { 4624 /* Should be part of task, can't fail */
4743 memcpy(cdb, cmd->t_task.t_task_cdb, 4625 BUG_ON(!cdb);
4744 scsi_command_size(cmd->t_task.t_task_cdb)); 4626
4745 cmd->transport_split_cdb(task->task_lba, 4627 memcpy(cdb, cmd->t_task_cdb,
4746 &task->task_sectors, cdb); 4628 scsi_command_size(cmd->t_task_cdb));
4747 } 4629
4630 /* Update new cdb with updated lba/sectors */
4631 cmd->transport_split_cdb(task->task_lba,
4632 &task->task_sectors, cdb);
4748 4633
4749 /* 4634 /*
4750 * Perform the SE OBJ plugin and/or Transport plugin specific 4635 * Perform the SE OBJ plugin and/or Transport plugin specific
4751 * mapping for cmd->t_task.t_mem_list. And setup the 4636 * mapping for cmd->t_mem_list. And setup the
4752 * task->task_sg and if necessary task->task_sg_bidi 4637 * task->task_sg and if necessary task->task_sg_bidi
4753 */ 4638 */
4754 ret = transport_do_se_mem_map(dev, task, mem_list, 4639 ret = transport_do_se_mem_map(dev, task, mem_list,
@@ -4759,7 +4644,7 @@ static u32 transport_generic_get_cdb_count(
4759 4644
4760 se_mem = se_mem_lout; 4645 se_mem = se_mem_lout;
4761 /* 4646 /*
4762 * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi 4647 * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi
4763 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI 4648 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4764 * 4649 *
4765 * Note that the first call to transport_do_se_mem_map() above will 4650 * Note that the first call to transport_do_se_mem_map() above will
@@ -4769,7 +4654,7 @@ static u32 transport_generic_get_cdb_count(
4769 */ 4654 */
4770 if (task->task_sg_bidi != NULL) { 4655 if (task->task_sg_bidi != NULL) {
4771 ret = transport_do_se_mem_map(dev, task, 4656 ret = transport_do_se_mem_map(dev, task,
4772 &cmd->t_task.t_mem_bidi_list, NULL, 4657 &cmd->t_mem_bidi_list, NULL,
4773 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, 4658 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4774 &task_offset_in); 4659 &task_offset_in);
4775 if (ret < 0) 4660 if (ret < 0)
@@ -4781,19 +4666,11 @@ static u32 transport_generic_get_cdb_count(
4781 4666
4782 DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", 4667 DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
4783 task_cdbs, task->task_sg_num); 4668 task_cdbs, task->task_sg_num);
4784
4785 if (max_sectors_set) {
4786 max_sectors_set = 0;
4787 continue;
4788 }
4789
4790 if (!sectors)
4791 break;
4792 } 4669 }
4793 4670
4794 if (set_counts) { 4671 if (set_counts) {
4795 atomic_inc(&cmd->t_task.t_fe_count); 4672 atomic_inc(&cmd->t_fe_count);
4796 atomic_inc(&cmd->t_task.t_se_count); 4673 atomic_inc(&cmd->t_se_count);
4797 } 4674 }
4798 4675
4799 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", 4676 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
@@ -4818,27 +4695,27 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
4818 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4695 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
4819 4696
4820 cdb = dev->transport->get_cdb(task); 4697 cdb = dev->transport->get_cdb(task);
4821 if (cdb) 4698 BUG_ON(!cdb);
4822 memcpy(cdb, cmd->t_task.t_task_cdb, 4699 memcpy(cdb, cmd->t_task_cdb,
4823 scsi_command_size(cmd->t_task.t_task_cdb)); 4700 scsi_command_size(cmd->t_task_cdb));
4824 4701
4825 task->task_size = cmd->data_length; 4702 task->task_size = cmd->data_length;
4826 task->task_sg_num = 4703 task->task_sg_num =
4827 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; 4704 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
4828 4705
4829 atomic_inc(&cmd->t_task.t_fe_count); 4706 atomic_inc(&cmd->t_fe_count);
4830 atomic_inc(&cmd->t_task.t_se_count); 4707 atomic_inc(&cmd->t_se_count);
4831 4708
4832 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4709 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4833 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 4710 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4834 u32 se_mem_cnt = 0, task_offset = 0; 4711 u32 se_mem_cnt = 0, task_offset = 0;
4835 4712
4836 if (!list_empty(&cmd->t_task.t_mem_list)) 4713 if (!list_empty(&cmd->t_mem_list))
4837 se_mem = list_first_entry(&cmd->t_task.t_mem_list, 4714 se_mem = list_first_entry(&cmd->t_mem_list,
4838 struct se_mem, se_list); 4715 struct se_mem, se_list);
4839 4716
4840 ret = transport_do_se_mem_map(dev, task, 4717 ret = transport_do_se_mem_map(dev, task,
4841 &cmd->t_task.t_mem_list, NULL, se_mem, 4718 &cmd->t_mem_list, NULL, se_mem,
4842 &se_mem_lout, &se_mem_cnt, &task_offset); 4719 &se_mem_lout, &se_mem_cnt, &task_offset);
4843 if (ret < 0) 4720 if (ret < 0)
4844 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4721 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
@@ -4869,9 +4746,8 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
4869 /* 4746 /*
4870 * Generate struct se_task(s) and/or their payloads for this CDB. 4747 * Generate struct se_task(s) and/or their payloads for this CDB.
4871 */ 4748 */
4872static int transport_generic_new_cmd(struct se_cmd *cmd) 4749int transport_generic_new_cmd(struct se_cmd *cmd)
4873{ 4750{
4874 struct se_portal_group *se_tpg;
4875 struct se_task *task; 4751 struct se_task *task;
4876 struct se_device *dev = cmd->se_dev; 4752 struct se_device *dev = cmd->se_dev;
4877 int ret = 0; 4753 int ret = 0;
@@ -4880,7 +4756,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
4880 * Determine is the TCM fabric module has already allocated physical 4756 * Determine is the TCM fabric module has already allocated physical
4881 * memory, and is directly calling transport_generic_map_mem_to_cmd() 4757 * memory, and is directly calling transport_generic_map_mem_to_cmd()
4882 * to setup beforehand the linked list of physical memory at 4758 * to setup beforehand the linked list of physical memory at
4883 * cmd->t_task.t_mem_list of struct se_mem->se_page 4759 * cmd->t_mem_list of struct se_mem->se_page
4884 */ 4760 */
4885 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 4761 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
4886 ret = transport_allocate_resources(cmd); 4762 ret = transport_allocate_resources(cmd);
@@ -4888,28 +4764,12 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
4888 return ret; 4764 return ret;
4889 } 4765 }
4890 4766
4891 ret = transport_get_sectors(cmd);
4892 if (ret < 0)
4893 return ret;
4894
4895 ret = transport_new_cmd_obj(cmd); 4767 ret = transport_new_cmd_obj(cmd);
4896 if (ret < 0) 4768 if (ret < 0)
4897 return ret; 4769 return ret;
4898 4770
4899 /*
4900 * Determine if the calling TCM fabric module is talking to
4901 * Linux/NET via kernel sockets and needs to allocate a
4902 * struct iovec array to complete the struct se_cmd
4903 */
4904 se_tpg = cmd->se_lun->lun_sep->sep_tpg;
4905 if (se_tpg->se_tpg_tfo->alloc_cmd_iovecs != NULL) {
4906 ret = se_tpg->se_tpg_tfo->alloc_cmd_iovecs(cmd);
4907 if (ret < 0)
4908 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
4909 }
4910
4911 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4771 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4912 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { 4772 list_for_each_entry(task, &cmd->t_task_list, t_list) {
4913 if (atomic_read(&task->task_sent)) 4773 if (atomic_read(&task->task_sent))
4914 continue; 4774 continue;
4915 if (!dev->transport->map_task_SG) 4775 if (!dev->transport->map_task_SG)
@@ -4926,7 +4786,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
4926 } 4786 }
4927 4787
4928 /* 4788 /*
4929 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. 4789 * For WRITEs, let the fabric know its buffer is ready..
4930 * This WRITE struct se_cmd (and all of its associated struct se_task's) 4790 * This WRITE struct se_cmd (and all of its associated struct se_task's)
4931 * will be added to the struct se_device execution queue after its WRITE 4791 * will be added to the struct se_device execution queue after its WRITE
4932 * data has arrived. (ie: It gets handled by the transport processing 4792 * data has arrived. (ie: It gets handled by the transport processing
@@ -4943,6 +4803,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
4943 transport_execute_tasks(cmd); 4803 transport_execute_tasks(cmd);
4944 return 0; 4804 return 0;
4945} 4805}
4806EXPORT_SYMBOL(transport_generic_new_cmd);
4946 4807
4947/* transport_generic_process_write(): 4808/* transport_generic_process_write():
4948 * 4809 *
@@ -4956,9 +4817,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
4956 * original EDTL 4817 * original EDTL
4957 */ 4818 */
4958 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 4819 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
4959 if (!cmd->t_task.t_tasks_se_num) { 4820 if (!cmd->t_tasks_se_num) {
4960 unsigned char *dst, *buf = 4821 unsigned char *dst, *buf =
4961 (unsigned char *)cmd->t_task.t_task_buf; 4822 (unsigned char *)cmd->t_task_buf;
4962 4823
4963 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); 4824 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
4964 if (!(dst)) { 4825 if (!(dst)) {
@@ -4970,15 +4831,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
4970 } 4831 }
4971 memcpy(dst, buf, cmd->cmd_spdtl); 4832 memcpy(dst, buf, cmd->cmd_spdtl);
4972 4833
4973 kfree(cmd->t_task.t_task_buf); 4834 kfree(cmd->t_task_buf);
4974 cmd->t_task.t_task_buf = dst; 4835 cmd->t_task_buf = dst;
4975 } else { 4836 } else {
4976 struct scatterlist *sg = 4837 struct scatterlist *sg =
4977 (struct scatterlist *sg)cmd->t_task.t_task_buf; 4838 (struct scatterlist *sg)cmd->t_task_buf;
4978 struct scatterlist *orig_sg; 4839 struct scatterlist *orig_sg;
4979 4840
4980 orig_sg = kzalloc(sizeof(struct scatterlist) * 4841 orig_sg = kzalloc(sizeof(struct scatterlist) *
4981 cmd->t_task.t_tasks_se_num, 4842 cmd->t_tasks_se_num,
4982 GFP_KERNEL))) { 4843 GFP_KERNEL))) {
4983 if (!(orig_sg)) { 4844 if (!(orig_sg)) {
4984 printk(KERN_ERR "Unable to allocate memory" 4845 printk(KERN_ERR "Unable to allocate memory"
@@ -4988,9 +4849,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
4988 return; 4849 return;
4989 } 4850 }
4990 4851
4991 memcpy(orig_sg, cmd->t_task.t_task_buf, 4852 memcpy(orig_sg, cmd->t_task_buf,
4992 sizeof(struct scatterlist) * 4853 sizeof(struct scatterlist) *
4993 cmd->t_task.t_tasks_se_num); 4854 cmd->t_tasks_se_num);
4994 4855
4995 cmd->data_length = cmd->cmd_spdtl; 4856 cmd->data_length = cmd->cmd_spdtl;
4996 /* 4857 /*
@@ -5021,22 +4882,23 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
5021 unsigned long flags; 4882 unsigned long flags;
5022 int ret; 4883 int ret;
5023 4884
5024 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 4885 spin_lock_irqsave(&cmd->t_state_lock, flags);
5025 cmd->t_state = TRANSPORT_WRITE_PENDING; 4886 cmd->t_state = TRANSPORT_WRITE_PENDING;
5026 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 4887 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5027 /* 4888 /*
5028 * For the TCM control CDBs using a contiguous buffer, do the memcpy 4889 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5029 * from the passed Linux/SCSI struct scatterlist located at 4890 * from the passed Linux/SCSI struct scatterlist located at
5030 * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at 4891 * se_cmd->t_task_pt_sgl to the contiguous buffer at
5031 * se_cmd->t_task.t_task_buf. 4892 * se_cmd->t_task_buf.
5032 */ 4893 */
5033 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) 4894 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5034 transport_memcpy_read_contig(cmd, 4895 sg_copy_to_buffer(cmd->t_task_pt_sgl,
5035 cmd->t_task.t_task_buf, 4896 cmd->t_task_pt_sgl_num,
5036 cmd->t_task.t_task_pt_sgl); 4897 cmd->t_task_buf,
4898 cmd->data_length);
5037 /* 4899 /*
5038 * Clear the se_cmd for WRITE_PENDING status in order to set 4900 * Clear the se_cmd for WRITE_PENDING status in order to set
5039 * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data 4901 * cmd->t_transport_active=0 so that transport_generic_handle_data
5040 * can be called from HW target mode interrupt code. This is safe 4902 * can be called from HW target mode interrupt code. This is safe
5041 * to be called with transport_off=1 before the cmd->se_tfo->write_pending 4903 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
5042 * because the se_cmd->se_lun pointer is not being cleared. 4904 * because the se_cmd->se_lun pointer is not being cleared.
@@ -5123,28 +4985,28 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5123 * If the frontend has already requested this struct se_cmd to 4985 * If the frontend has already requested this struct se_cmd to
5124 * be stopped, we can safely ignore this struct se_cmd. 4986 * be stopped, we can safely ignore this struct se_cmd.
5125 */ 4987 */
5126 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 4988 spin_lock_irqsave(&cmd->t_state_lock, flags);
5127 if (atomic_read(&cmd->t_task.t_transport_stop)) { 4989 if (atomic_read(&cmd->t_transport_stop)) {
5128 atomic_set(&cmd->t_task.transport_lun_stop, 0); 4990 atomic_set(&cmd->transport_lun_stop, 0);
5129 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4991 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5130 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4992 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
5131 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 4993 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5132 transport_cmd_check_stop(cmd, 1, 0); 4994 transport_cmd_check_stop(cmd, 1, 0);
5133 return -EPERM; 4995 return -EPERM;
5134 } 4996 }
5135 atomic_set(&cmd->t_task.transport_lun_fe_stop, 1); 4997 atomic_set(&cmd->transport_lun_fe_stop, 1);
5136 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 4998 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5137 4999
5138 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 5000 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
5139 5001
5140 ret = transport_stop_tasks_for_cmd(cmd); 5002 ret = transport_stop_tasks_for_cmd(cmd);
5141 5003
5142 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" 5004 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5143 " %d\n", cmd, cmd->t_task.t_task_cdbs, ret); 5005 " %d\n", cmd, cmd->t_task_cdbs, ret);
5144 if (!ret) { 5006 if (!ret) {
5145 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 5007 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5146 cmd->se_tfo->get_task_tag(cmd)); 5008 cmd->se_tfo->get_task_tag(cmd));
5147 wait_for_completion(&cmd->t_task.transport_lun_stop_comp); 5009 wait_for_completion(&cmd->transport_lun_stop_comp);
5148 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 5010 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5149 cmd->se_tfo->get_task_tag(cmd)); 5011 cmd->se_tfo->get_task_tag(cmd));
5150 } 5012 }
@@ -5174,19 +5036,19 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5174 struct se_cmd, se_lun_node); 5036 struct se_cmd, se_lun_node);
5175 list_del(&cmd->se_lun_node); 5037 list_del(&cmd->se_lun_node);
5176 5038
5177 atomic_set(&cmd->t_task.transport_lun_active, 0); 5039 atomic_set(&cmd->transport_lun_active, 0);
5178 /* 5040 /*
5179 * This will notify iscsi_target_transport.c: 5041 * This will notify iscsi_target_transport.c:
5180 * transport_cmd_check_stop() that a LUN shutdown is in 5042 * transport_cmd_check_stop() that a LUN shutdown is in
5181 * progress for the iscsi_cmd_t. 5043 * progress for the iscsi_cmd_t.
5182 */ 5044 */
5183 spin_lock(&cmd->t_task.t_state_lock); 5045 spin_lock(&cmd->t_state_lock);
5184 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport" 5046 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport"
5185 "_lun_stop for ITT: 0x%08x\n", 5047 "_lun_stop for ITT: 0x%08x\n",
5186 cmd->se_lun->unpacked_lun, 5048 cmd->se_lun->unpacked_lun,
5187 cmd->se_tfo->get_task_tag(cmd)); 5049 cmd->se_tfo->get_task_tag(cmd));
5188 atomic_set(&cmd->t_task.transport_lun_stop, 1); 5050 atomic_set(&cmd->transport_lun_stop, 1);
5189 spin_unlock(&cmd->t_task.t_state_lock); 5051 spin_unlock(&cmd->t_state_lock);
5190 5052
5191 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 5053 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5192 5054
@@ -5214,14 +5076,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5214 cmd->se_lun->unpacked_lun, 5076 cmd->se_lun->unpacked_lun,
5215 cmd->se_tfo->get_task_tag(cmd)); 5077 cmd->se_tfo->get_task_tag(cmd));
5216 5078
5217 spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); 5079 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
5218 if (!(atomic_read(&cmd->t_task.transport_dev_active))) { 5080 if (!(atomic_read(&cmd->transport_dev_active))) {
5219 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); 5081 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
5220 goto check_cond; 5082 goto check_cond;
5221 } 5083 }
5222 atomic_set(&cmd->t_task.transport_dev_active, 0); 5084 atomic_set(&cmd->transport_dev_active, 0);
5223 transport_all_task_dev_remove_state(cmd); 5085 transport_all_task_dev_remove_state(cmd);
5224 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); 5086 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
5225 5087
5226 transport_free_dev_tasks(cmd); 5088 transport_free_dev_tasks(cmd);
5227 /* 5089 /*
@@ -5238,24 +5100,24 @@ check_cond:
5238 * be released, notify the waiting thread now that LU has 5100 * be released, notify the waiting thread now that LU has
5239 * finished accessing it. 5101 * finished accessing it.
5240 */ 5102 */
5241 spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); 5103 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
5242 if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) { 5104 if (atomic_read(&cmd->transport_lun_fe_stop)) {
5243 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" 5105 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5244 " struct se_cmd: %p ITT: 0x%08x\n", 5106 " struct se_cmd: %p ITT: 0x%08x\n",
5245 lun->unpacked_lun, 5107 lun->unpacked_lun,
5246 cmd, cmd->se_tfo->get_task_tag(cmd)); 5108 cmd, cmd->se_tfo->get_task_tag(cmd));
5247 5109
5248 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, 5110 spin_unlock_irqrestore(&cmd->t_state_lock,
5249 cmd_flags); 5111 cmd_flags);
5250 transport_cmd_check_stop(cmd, 1, 0); 5112 transport_cmd_check_stop(cmd, 1, 0);
5251 complete(&cmd->t_task.transport_lun_fe_stop_comp); 5113 complete(&cmd->transport_lun_fe_stop_comp);
5252 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 5114 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5253 continue; 5115 continue;
5254 } 5116 }
5255 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 5117 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5256 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 5118 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
5257 5119
5258 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); 5120 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
5259 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 5121 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5260 } 5122 }
5261 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 5123 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5301,15 +5163,15 @@ static void transport_generic_wait_for_tasks(
5301 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) 5163 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5302 return; 5164 return;
5303 5165
5304 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5166 spin_lock_irqsave(&cmd->t_state_lock, flags);
5305 /* 5167 /*
5306 * If we are already stopped due to an external event (ie: LUN shutdown) 5168 * If we are already stopped due to an external event (ie: LUN shutdown)
5307 * sleep until the connection can have the passed struct se_cmd back. 5169 * sleep until the connection can have the passed struct se_cmd back.
5308 * The cmd->t_task.transport_lun_stopped_sem will be upped by 5170 * The cmd->transport_lun_stopped_sem will be upped by
5309 * transport_clear_lun_from_sessions() once the ConfigFS context caller 5171 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5310 * has completed its operation on the struct se_cmd. 5172 * has completed its operation on the struct se_cmd.
5311 */ 5173 */
5312 if (atomic_read(&cmd->t_task.transport_lun_stop)) { 5174 if (atomic_read(&cmd->transport_lun_stop)) {
5313 5175
5314 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" 5176 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5315 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 5177 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
@@ -5322,10 +5184,10 @@ static void transport_generic_wait_for_tasks(
5322 * We go ahead and up transport_lun_stop_comp just to be sure 5184 * We go ahead and up transport_lun_stop_comp just to be sure
5323 * here. 5185 * here.
5324 */ 5186 */
5325 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 5187 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5326 complete(&cmd->t_task.transport_lun_stop_comp); 5188 complete(&cmd->transport_lun_stop_comp);
5327 wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp); 5189 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
5328 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5190 spin_lock_irqsave(&cmd->t_state_lock, flags);
5329 5191
5330 transport_all_task_dev_remove_state(cmd); 5192 transport_all_task_dev_remove_state(cmd);
5331 /* 5193 /*
@@ -5338,13 +5200,13 @@ static void transport_generic_wait_for_tasks(
5338 "stop_comp); for ITT: 0x%08x\n", 5200 "stop_comp); for ITT: 0x%08x\n",
5339 cmd->se_tfo->get_task_tag(cmd)); 5201 cmd->se_tfo->get_task_tag(cmd));
5340 5202
5341 atomic_set(&cmd->t_task.transport_lun_stop, 0); 5203 atomic_set(&cmd->transport_lun_stop, 0);
5342 } 5204 }
5343 if (!atomic_read(&cmd->t_task.t_transport_active) || 5205 if (!atomic_read(&cmd->t_transport_active) ||
5344 atomic_read(&cmd->t_task.t_transport_aborted)) 5206 atomic_read(&cmd->t_transport_aborted))
5345 goto remove; 5207 goto remove;
5346 5208
5347 atomic_set(&cmd->t_task.t_transport_stop, 1); 5209 atomic_set(&cmd->t_transport_stop, 1);
5348 5210
5349 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" 5211 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5350 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" 5212 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
@@ -5352,21 +5214,21 @@ static void transport_generic_wait_for_tasks(
5352 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 5214 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
5353 cmd->deferred_t_state); 5215 cmd->deferred_t_state);
5354 5216
5355 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 5217 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5356 5218
5357 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 5219 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
5358 5220
5359 wait_for_completion(&cmd->t_task.t_transport_stop_comp); 5221 wait_for_completion(&cmd->t_transport_stop_comp);
5360 5222
5361 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5223 spin_lock_irqsave(&cmd->t_state_lock, flags);
5362 atomic_set(&cmd->t_task.t_transport_active, 0); 5224 atomic_set(&cmd->t_transport_active, 0);
5363 atomic_set(&cmd->t_task.t_transport_stop, 0); 5225 atomic_set(&cmd->t_transport_stop, 0);
5364 5226
5365 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" 5227 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5366 "&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n", 5228 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
5367 cmd->se_tfo->get_task_tag(cmd)); 5229 cmd->se_tfo->get_task_tag(cmd));
5368remove: 5230remove:
5369 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 5231 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5370 if (!remove_cmd) 5232 if (!remove_cmd)
5371 return; 5233 return;
5372 5234
@@ -5405,13 +5267,13 @@ int transport_send_check_condition_and_sense(
5405 int offset; 5267 int offset;
5406 u8 asc = 0, ascq = 0; 5268 u8 asc = 0, ascq = 0;
5407 5269
5408 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5270 spin_lock_irqsave(&cmd->t_state_lock, flags);
5409 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 5271 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5410 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 5272 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5411 return 0; 5273 return 0;
5412 } 5274 }
5413 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 5275 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5414 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); 5276 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5415 5277
5416 if (!reason && from_transport) 5278 if (!reason && from_transport)
5417 goto after_reason; 5279 goto after_reason;
@@ -5570,14 +5432,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5570{ 5432{
5571 int ret = 0; 5433 int ret = 0;
5572 5434
5573 if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) { 5435 if (atomic_read(&cmd->t_transport_aborted) != 0) {
5574 if (!(send_status) || 5436 if (!(send_status) ||
5575 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 5437 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5576 return 1; 5438 return 1;
5577#if 0 5439#if 0
5578 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" 5440 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5579 " status for CDB: 0x%02x ITT: 0x%08x\n", 5441 " status for CDB: 0x%02x ITT: 0x%08x\n",
5580 cmd->t_task.t_task_cdb[0], 5442 cmd->t_task_cdb[0],
5581 cmd->se_tfo->get_task_tag(cmd)); 5443 cmd->se_tfo->get_task_tag(cmd));
5582#endif 5444#endif
5583 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 5445 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
@@ -5598,7 +5460,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5598 */ 5460 */
5599 if (cmd->data_direction == DMA_TO_DEVICE) { 5461 if (cmd->data_direction == DMA_TO_DEVICE) {
5600 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 5462 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
5601 atomic_inc(&cmd->t_task.t_transport_aborted); 5463 atomic_inc(&cmd->t_transport_aborted);
5602 smp_mb__after_atomic_inc(); 5464 smp_mb__after_atomic_inc();
5603 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 5465 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5604 transport_new_cmd_failure(cmd); 5466 transport_new_cmd_failure(cmd);
@@ -5608,7 +5470,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5608 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 5470 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5609#if 0 5471#if 0
5610 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 5472 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5611 " ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0], 5473 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
5612 cmd->se_tfo->get_task_tag(cmd)); 5474 cmd->se_tfo->get_task_tag(cmd));
5613#endif 5475#endif
5614 cmd->se_tfo->queue_status(cmd); 5476 cmd->se_tfo->queue_status(cmd);
@@ -5620,14 +5482,12 @@ void transport_send_task_abort(struct se_cmd *cmd)
5620 */ 5482 */
5621int transport_generic_do_tmr(struct se_cmd *cmd) 5483int transport_generic_do_tmr(struct se_cmd *cmd)
5622{ 5484{
5623 struct se_cmd *ref_cmd;
5624 struct se_device *dev = cmd->se_dev; 5485 struct se_device *dev = cmd->se_dev;
5625 struct se_tmr_req *tmr = cmd->se_tmr_req; 5486 struct se_tmr_req *tmr = cmd->se_tmr_req;
5626 int ret; 5487 int ret;
5627 5488
5628 switch (tmr->function) { 5489 switch (tmr->function) {
5629 case TMR_ABORT_TASK: 5490 case TMR_ABORT_TASK:
5630 ref_cmd = tmr->ref_cmd;
5631 tmr->response = TMR_FUNCTION_REJECTED; 5491 tmr->response = TMR_FUNCTION_REJECTED;
5632 break; 5492 break;
5633 case TMR_ABORT_TASK_SET: 5493 case TMR_ABORT_TASK_SET:
@@ -5699,7 +5559,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5699 5559
5700 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 5560 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5701 5561
5702 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5562 spin_lock_irqsave(&cmd->t_state_lock, flags);
5703 5563
5704 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," 5564 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5705 " i_state/def_i_state: %d/%d, t_state/def_t_state:" 5565 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
@@ -5707,22 +5567,22 @@ static void transport_processing_shutdown(struct se_device *dev)
5707 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, 5567 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
5708 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, 5568 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
5709 cmd->t_state, cmd->deferred_t_state, 5569 cmd->t_state, cmd->deferred_t_state,
5710 cmd->t_task.t_task_cdb[0]); 5570 cmd->t_task_cdb[0]);
5711 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" 5571 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5712 " %d t_task_cdbs_sent: %d -- t_transport_active: %d" 5572 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5713 " t_transport_stop: %d t_transport_sent: %d\n", 5573 " t_transport_stop: %d t_transport_sent: %d\n",
5714 cmd->se_tfo->get_task_tag(cmd), 5574 cmd->se_tfo->get_task_tag(cmd),
5715 cmd->t_task.t_task_cdbs, 5575 cmd->t_task_cdbs,
5716 atomic_read(&cmd->t_task.t_task_cdbs_left), 5576 atomic_read(&cmd->t_task_cdbs_left),
5717 atomic_read(&cmd->t_task.t_task_cdbs_sent), 5577 atomic_read(&cmd->t_task_cdbs_sent),
5718 atomic_read(&cmd->t_task.t_transport_active), 5578 atomic_read(&cmd->t_transport_active),
5719 atomic_read(&cmd->t_task.t_transport_stop), 5579 atomic_read(&cmd->t_transport_stop),
5720 atomic_read(&cmd->t_task.t_transport_sent)); 5580 atomic_read(&cmd->t_transport_sent));
5721 5581
5722 if (atomic_read(&task->task_active)) { 5582 if (atomic_read(&task->task_active)) {
5723 atomic_set(&task->task_stop, 1); 5583 atomic_set(&task->task_stop, 1);
5724 spin_unlock_irqrestore( 5584 spin_unlock_irqrestore(
5725 &cmd->t_task.t_state_lock, flags); 5585 &cmd->t_state_lock, flags);
5726 5586
5727 DEBUG_DO("Waiting for task: %p to shutdown for dev:" 5587 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5728 " %p\n", task, dev); 5588 " %p\n", task, dev);
@@ -5730,8 +5590,8 @@ static void transport_processing_shutdown(struct se_device *dev)
5730 DEBUG_DO("Completed task: %p shutdown for dev: %p\n", 5590 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5731 task, dev); 5591 task, dev);
5732 5592
5733 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); 5593 spin_lock_irqsave(&cmd->t_state_lock, flags);
5734 atomic_dec(&cmd->t_task.t_task_cdbs_left); 5594 atomic_dec(&cmd->t_task_cdbs_left);
5735 5595
5736 atomic_set(&task->task_active, 0); 5596 atomic_set(&task->task_active, 0);
5737 atomic_set(&task->task_stop, 0); 5597 atomic_set(&task->task_stop, 0);
@@ -5741,25 +5601,25 @@ static void transport_processing_shutdown(struct se_device *dev)
5741 } 5601 }
5742 __transport_stop_task_timer(task, &flags); 5602 __transport_stop_task_timer(task, &flags);
5743 5603
5744 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { 5604 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
5745 spin_unlock_irqrestore( 5605 spin_unlock_irqrestore(
5746 &cmd->t_task.t_state_lock, flags); 5606 &cmd->t_state_lock, flags);
5747 5607
5748 DEBUG_DO("Skipping task: %p, dev: %p for" 5608 DEBUG_DO("Skipping task: %p, dev: %p for"
5749 " t_task_cdbs_ex_left: %d\n", task, dev, 5609 " t_task_cdbs_ex_left: %d\n", task, dev,
5750 atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); 5610 atomic_read(&cmd->t_task_cdbs_ex_left));
5751 5611
5752 spin_lock_irqsave(&dev->execute_task_lock, flags); 5612 spin_lock_irqsave(&dev->execute_task_lock, flags);
5753 continue; 5613 continue;
5754 } 5614 }
5755 5615
5756 if (atomic_read(&cmd->t_task.t_transport_active)) { 5616 if (atomic_read(&cmd->t_transport_active)) {
5757 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" 5617 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5758 " %p\n", task, dev); 5618 " %p\n", task, dev);
5759 5619
5760 if (atomic_read(&cmd->t_task.t_fe_count)) { 5620 if (atomic_read(&cmd->t_fe_count)) {
5761 spin_unlock_irqrestore( 5621 spin_unlock_irqrestore(
5762 &cmd->t_task.t_state_lock, flags); 5622 &cmd->t_state_lock, flags);
5763 transport_send_check_condition_and_sense( 5623 transport_send_check_condition_and_sense(
5764 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 5624 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5765 0); 5625 0);
@@ -5770,7 +5630,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5770 transport_cmd_check_stop(cmd, 1, 0); 5630 transport_cmd_check_stop(cmd, 1, 0);
5771 } else { 5631 } else {
5772 spin_unlock_irqrestore( 5632 spin_unlock_irqrestore(
5773 &cmd->t_task.t_state_lock, flags); 5633 &cmd->t_state_lock, flags);
5774 5634
5775 transport_remove_cmd_from_queue(cmd, 5635 transport_remove_cmd_from_queue(cmd,
5776 &cmd->se_dev->dev_queue_obj); 5636 &cmd->se_dev->dev_queue_obj);
@@ -5787,9 +5647,9 @@ static void transport_processing_shutdown(struct se_device *dev)
5787 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", 5647 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
5788 task, dev); 5648 task, dev);
5789 5649
5790 if (atomic_read(&cmd->t_task.t_fe_count)) { 5650 if (atomic_read(&cmd->t_fe_count)) {
5791 spin_unlock_irqrestore( 5651 spin_unlock_irqrestore(
5792 &cmd->t_task.t_state_lock, flags); 5652 &cmd->t_state_lock, flags);
5793 transport_send_check_condition_and_sense(cmd, 5653 transport_send_check_condition_and_sense(cmd,
5794 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 5654 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5795 transport_remove_cmd_from_queue(cmd, 5655 transport_remove_cmd_from_queue(cmd,
@@ -5799,7 +5659,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5799 transport_cmd_check_stop(cmd, 1, 0); 5659 transport_cmd_check_stop(cmd, 1, 0);
5800 } else { 5660 } else {
5801 spin_unlock_irqrestore( 5661 spin_unlock_irqrestore(
5802 &cmd->t_task.t_state_lock, flags); 5662 &cmd->t_state_lock, flags);
5803 5663
5804 transport_remove_cmd_from_queue(cmd, 5664 transport_remove_cmd_from_queue(cmd,
5805 &cmd->se_dev->dev_queue_obj); 5665 &cmd->se_dev->dev_queue_obj);
@@ -5820,7 +5680,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5820 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", 5680 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
5821 cmd, cmd->t_state); 5681 cmd, cmd->t_state);
5822 5682
5823 if (atomic_read(&cmd->t_task.t_fe_count)) { 5683 if (atomic_read(&cmd->t_fe_count)) {
5824 transport_send_check_condition_and_sense(cmd, 5684 transport_send_check_condition_and_sense(cmd,
5825 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 5685 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5826 5686
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 3b8b02cf4b41..d28e9c4a1c99 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition(
270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
271 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 271 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
272 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, 272 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
273 cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq); 273 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
274} 274}
275 275
276int core_scsi3_ua_clear_for_request_sense( 276int core_scsi3_ua_clear_for_request_sense(
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 6d9553bbba30..910306ce48de 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -60,7 +60,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
60 struct fc_seq *sp; 60 struct fc_seq *sp;
61 struct se_cmd *se_cmd; 61 struct se_cmd *se_cmd;
62 struct se_mem *mem; 62 struct se_mem *mem;
63 struct se_transport_task *task;
64 63
65 if (!(ft_debug_logging & FT_DEBUG_IO)) 64 if (!(ft_debug_logging & FT_DEBUG_IO))
66 return; 65 return;
@@ -72,12 +71,11 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
72 caller, cmd, cmd->cdb); 71 caller, cmd, cmd->cdb);
73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74 73
75 task = &se_cmd->t_task; 74 printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
76 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", 75 caller, cmd, se_cmd->t_tasks_se_num,
77 caller, cmd, task, task->t_tasks_se_num, 76 se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
78 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
79 77
80 list_for_each_entry(mem, &task->t_mem_list, se_list) 78 list_for_each_entry(mem, &se_cmd->t_mem_list, se_list)
81 printk(KERN_INFO "%s: cmd %p mem %p page %p " 79 printk(KERN_INFO "%s: cmd %p mem %p page %p "
82 "len 0x%x off 0x%x\n", 80 "len 0x%x off 0x%x\n",
83 caller, cmd, mem, 81 caller, cmd, mem,
@@ -262,9 +260,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
262 * TCM/LIO target 260 * TCM/LIO target
263 */ 261 */
264 transport_do_task_sg_chain(se_cmd); 262 transport_do_task_sg_chain(se_cmd);
265 cmd->sg = se_cmd->t_task.t_tasks_sg_chained; 263 cmd->sg = se_cmd->t_tasks_sg_chained;
266 cmd->sg_cnt = 264 cmd->sg_cnt =
267 se_cmd->t_task.t_tasks_sg_chained_no; 265 se_cmd->t_tasks_sg_chained_no;
268 } 266 }
269 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, 267 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
270 cmd->sg, cmd->sg_cnt)) 268 cmd->sg, cmd->sg_cnt))
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index f18af6e99b83..8560182f0dad 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -65,7 +65,6 @@
65int ft_queue_data_in(struct se_cmd *se_cmd) 65int ft_queue_data_in(struct se_cmd *se_cmd)
66{ 66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL; 68 struct fc_frame *fp = NULL;
70 struct fc_exch *ep; 69 struct fc_exch *ep;
71 struct fc_lport *lport; 70 struct fc_lport *lport;
@@ -90,14 +89,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
90 lport = ep->lp; 89 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq); 90 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92 91
93 task = &se_cmd->t_task;
94 remaining = se_cmd->data_length; 92 remaining = se_cmd->data_length;
95 93
96 /* 94 /*
97 * Setup to use first mem list entry if any. 95 * Setup to use first mem list entry if any.
98 */ 96 */
99 if (task->t_tasks_se_num) { 97 if (se_cmd->t_tasks_se_num) {
100 mem = list_first_entry(&task->t_mem_list, 98 mem = list_first_entry(&se_cmd->t_mem_list,
101 struct se_mem, se_list); 99 struct se_mem, se_list);
102 mem_len = mem->se_len; 100 mem_len = mem->se_len;
103 mem_off = mem->se_off; 101 mem_off = mem->se_off;
@@ -148,8 +146,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
148 146
149 if (use_sg) { 147 if (use_sg) {
150 if (!mem) { 148 if (!mem) {
151 BUG_ON(!task->t_task_buf); 149 BUG_ON(!se_cmd->t_task_buf);
152 page_addr = task->t_task_buf + mem_off; 150 page_addr = se_cmd->t_task_buf + mem_off;
153 /* 151 /*
154 * In this case, offset is 'offset_in_page' of 152 * In this case, offset is 'offset_in_page' of
155 * (t_task_buf + mem_off) instead of 'mem_off'. 153 * (t_task_buf + mem_off) instead of 'mem_off'.
@@ -180,7 +178,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
180 kunmap_atomic(page_addr, KM_SOFTIRQ0); 178 kunmap_atomic(page_addr, KM_SOFTIRQ0);
181 to += tlen; 179 to += tlen;
182 } else { 180 } else {
183 from = task->t_task_buf + mem_off; 181 from = se_cmd->t_task_buf + mem_off;
184 memcpy(to, from, tlen); 182 memcpy(to, from, tlen);
185 to += tlen; 183 to += tlen;
186 } 184 }
@@ -220,7 +218,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
220 struct fc_seq *seq = cmd->seq; 218 struct fc_seq *seq = cmd->seq;
221 struct fc_exch *ep; 219 struct fc_exch *ep;
222 struct fc_lport *lport; 220 struct fc_lport *lport;
223 struct se_transport_task *task;
224 struct fc_frame_header *fh; 221 struct fc_frame_header *fh;
225 struct se_mem *mem; 222 struct se_mem *mem;
226 u32 mem_off; 223 u32 mem_off;
@@ -235,8 +232,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
235 u32 f_ctl; 232 u32 f_ctl;
236 void *buf; 233 void *buf;
237 234
238 task = &se_cmd->t_task;
239
240 fh = fc_frame_header_get(fp); 235 fh = fc_frame_header_get(fp);
241 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) 236 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
242 goto drop; 237 goto drop;
@@ -312,8 +307,8 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
312 /* 307 /*
313 * Setup to use first mem list entry if any. 308 * Setup to use first mem list entry if any.
314 */ 309 */
315 if (task->t_tasks_se_num) { 310 if (se_cmd->t_tasks_se_num) {
316 mem = list_first_entry(&task->t_mem_list, 311 mem = list_first_entry(&se_cmd->t_mem_list,
317 struct se_mem, se_list); 312 struct se_mem, se_list);
318 mem_len = mem->se_len; 313 mem_len = mem->se_len;
319 mem_off = mem->se_off; 314 mem_off = mem->se_off;
@@ -355,7 +350,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
355 memcpy(to, from, tlen); 350 memcpy(to, from, tlen);
356 kunmap_atomic(page_addr, KM_SOFTIRQ0); 351 kunmap_atomic(page_addr, KM_SOFTIRQ0);
357 } else { 352 } else {
358 to = task->t_task_buf + mem_off; 353 to = se_cmd->t_task_buf + mem_off;
359 memcpy(to, from, tlen); 354 memcpy(to, from, tlen);
360 } 355 }
361 from += tlen; 356 from += tlen;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 94c838dcfc3c..71c96ce9287e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -403,64 +403,10 @@ struct se_queue_obj {
403 wait_queue_head_t thread_wq; 403 wait_queue_head_t thread_wq;
404} ____cacheline_aligned; 404} ____cacheline_aligned;
405 405
406/*
407 * Used one per struct se_cmd to hold all extra struct se_task
408 * metadata. This structure is setup and allocated in
409 * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
410 */
411struct se_transport_task {
412 unsigned char *t_task_cdb;
413 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
414 unsigned long long t_task_lba;
415 int t_tasks_failed;
416 int t_tasks_fua;
417 bool t_tasks_bidi;
418 u32 t_task_cdbs;
419 u32 t_tasks_check;
420 u32 t_tasks_no;
421 u32 t_tasks_sectors;
422 u32 t_tasks_se_num;
423 u32 t_tasks_se_bidi_num;
424 u32 t_tasks_sg_chained_no;
425 atomic_t t_fe_count;
426 atomic_t t_se_count;
427 atomic_t t_task_cdbs_left;
428 atomic_t t_task_cdbs_ex_left;
429 atomic_t t_task_cdbs_timeout_left;
430 atomic_t t_task_cdbs_sent;
431 atomic_t t_transport_aborted;
432 atomic_t t_transport_active;
433 atomic_t t_transport_complete;
434 atomic_t t_transport_queue_active;
435 atomic_t t_transport_sent;
436 atomic_t t_transport_stop;
437 atomic_t t_transport_timeout;
438 atomic_t transport_dev_active;
439 atomic_t transport_lun_active;
440 atomic_t transport_lun_fe_stop;
441 atomic_t transport_lun_stop;
442 spinlock_t t_state_lock;
443 struct completion t_transport_stop_comp;
444 struct completion transport_lun_fe_stop_comp;
445 struct completion transport_lun_stop_comp;
446 struct scatterlist *t_tasks_sg_chained;
447 struct scatterlist t_tasks_sg_bounce;
448 void *t_task_buf;
449 /*
450 * Used for pre-registered fabric SGL passthrough WRITE and READ
451 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
452 * and other HW target mode fabric modules.
453 */
454 struct scatterlist *t_task_pt_sgl;
455 struct list_head t_mem_list;
456 /* Used for BIDI READ */
457 struct list_head t_mem_bidi_list;
458 struct list_head t_task_list;
459} ____cacheline_aligned;
460
461struct se_task { 406struct se_task {
462 unsigned char task_sense; 407 unsigned char task_sense;
463 struct scatterlist *task_sg; 408 struct scatterlist *task_sg;
409 u32 task_sg_num;
464 struct scatterlist *task_sg_bidi; 410 struct scatterlist *task_sg_bidi;
465 u8 task_scsi_status; 411 u8 task_scsi_status;
466 u8 task_flags; 412 u8 task_flags;
@@ -471,8 +417,6 @@ struct se_task {
471 u32 task_no; 417 u32 task_no;
472 u32 task_sectors; 418 u32 task_sectors;
473 u32 task_size; 419 u32 task_size;
474 u32 task_sg_num;
475 u32 task_sg_offset;
476 enum dma_data_direction task_data_direction; 420 enum dma_data_direction task_data_direction;
477 struct se_cmd *task_se_cmd; 421 struct se_cmd *task_se_cmd;
478 struct se_device *se_dev; 422 struct se_device *se_dev;
@@ -534,13 +478,58 @@ struct se_cmd {
534 /* Only used for internal passthrough and legacy TCM fabric modules */ 478 /* Only used for internal passthrough and legacy TCM fabric modules */
535 struct se_session *se_sess; 479 struct se_session *se_sess;
536 struct se_tmr_req *se_tmr_req; 480 struct se_tmr_req *se_tmr_req;
537 struct se_transport_task t_task;
538 struct list_head se_queue_node; 481 struct list_head se_queue_node;
539 struct target_core_fabric_ops *se_tfo; 482 struct target_core_fabric_ops *se_tfo;
540 int (*transport_emulate_cdb)(struct se_cmd *); 483 int (*transport_emulate_cdb)(struct se_cmd *);
541 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); 484 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
542 void (*transport_wait_for_tasks)(struct se_cmd *, int, int); 485 void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
543 void (*transport_complete_callback)(struct se_cmd *); 486 void (*transport_complete_callback)(struct se_cmd *);
487 unsigned char *t_task_cdb;
488 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
489 unsigned long long t_task_lba;
490 int t_tasks_failed;
491 int t_tasks_fua;
492 bool t_tasks_bidi;
493 u32 t_tasks_se_num;
494 u32 t_tasks_se_bidi_num;
495 u32 t_tasks_sg_chained_no;
496 atomic_t t_fe_count;
497 atomic_t t_se_count;
498 atomic_t t_task_cdbs_left;
499 atomic_t t_task_cdbs_ex_left;
500 atomic_t t_task_cdbs_timeout_left;
501 atomic_t t_task_cdbs_sent;
502 atomic_t t_transport_aborted;
503 atomic_t t_transport_active;
504 atomic_t t_transport_complete;
505 atomic_t t_transport_queue_active;
506 atomic_t t_transport_sent;
507 atomic_t t_transport_stop;
508 atomic_t t_transport_timeout;
509 atomic_t transport_dev_active;
510 atomic_t transport_lun_active;
511 atomic_t transport_lun_fe_stop;
512 atomic_t transport_lun_stop;
513 spinlock_t t_state_lock;
514 struct completion t_transport_stop_comp;
515 struct completion transport_lun_fe_stop_comp;
516 struct completion transport_lun_stop_comp;
517 struct scatterlist *t_tasks_sg_chained;
518 struct scatterlist t_tasks_sg_bounce;
519 void *t_task_buf;
520 /*
521 * Used for pre-registered fabric SGL passthrough WRITE and READ
522 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
523 * and other HW target mode fabric modules.
524 */
525 struct scatterlist *t_task_pt_sgl;
526 u32 t_task_pt_sgl_num;
527 struct list_head t_mem_list;
528 /* Used for BIDI READ */
529 struct list_head t_mem_bidi_list;
530 struct list_head t_task_list;
531 u32 t_task_list_num;
532
544} ____cacheline_aligned; 533} ____cacheline_aligned;
545 534
546struct se_tmr_req { 535struct se_tmr_req {
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index 747e1404dca0..1752ed3f77fa 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -39,11 +39,6 @@ struct target_core_fabric_ops {
39 */ 39 */
40 int (*new_cmd_map)(struct se_cmd *); 40 int (*new_cmd_map)(struct se_cmd *);
41 /* 41 /*
42 * Optional function pointer for TCM fabric modules that use
43 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
44 */
45 int (*alloc_cmd_iovecs)(struct se_cmd *);
46 /*
47 * Optional to release struct se_cmd and fabric dependent allocated 42 * Optional to release struct se_cmd and fabric dependent allocated
48 * I/O descriptor in transport_cmd_check_stop() 43 * I/O descriptor in transport_cmd_check_stop()
49 */ 44 */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index acd591491767..c9846d521945 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -184,10 +184,11 @@ extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
184extern void transport_generic_wait_for_cmds(struct se_cmd *, int); 184extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
185extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); 185extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
186extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, 186extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
187 void *, struct se_mem *, 187 struct scatterlist *, struct se_mem *,
188 struct se_mem **, u32 *, u32 *); 188 struct se_mem **, u32 *, u32 *);
189extern void transport_do_task_sg_chain(struct se_cmd *); 189extern void transport_do_task_sg_chain(struct se_cmd *);
190extern void transport_generic_process_write(struct se_cmd *); 190extern void transport_generic_process_write(struct se_cmd *);
191extern int transport_generic_new_cmd(struct se_cmd *);
191extern int transport_generic_do_tmr(struct se_cmd *); 192extern int transport_generic_do_tmr(struct se_cmd *);
192/* From target_core_alua.c */ 193/* From target_core_alua.c */
193extern int core_alua_check_nonop_delay(struct se_cmd *); 194extern int core_alua_check_nonop_delay(struct se_cmd *);