aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-07-19 06:26:37 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:43 -0400
commit5951146dea1ac8ff2f177477c907084d63913cad (patch)
tree699cb7c498ca1799ae3e349cb4360171d9fa63e0 /drivers/target
parentf22c119683e73498d8126581a1be75e1b7a339a3 (diff)
target: More core cleanups from AGrover (round 2)
This patch contains the squashed version of second round of target core cleanups and simplifications and Andy and Co. It also contains a handful of fixes to address bugs the original series and other minor cleanups. Here is the condensed shortlog: target: Remove unneeded casts to void* target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun target: Make t_task a member of se_cmd, not a pointer target: Handle functions returning "-2" target: Use cmd->se_dev over cmd->se_lun->lun_se_dev target: Embed qr in struct se_cmd target: Replace embedded struct se_queue_req with a list_head target: Rename list_heads that are nodes in struct se_cmd to "*_node" target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() target: Make t_mem_list and t_mem_list_bidi members of t_task target: Add comment & cleanup transport_map_sg_to_mem() target: Remove unneeded checks in transport_free_pages() (Roland: Fix se_queue_req removal leftovers OOPs) (nab: Fix transport_lookup_tmr_lun failure case) (nab: Fix list_empty(&cmd->t_task.t_mem_bidi_list) inversion bugs) Signed-off-by: Andy Grover <agrover@redhat.com> Signed-off-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/loopback/tcm_loop.c56
-rw-r--r--drivers/target/target_core_alua.c8
-rw-r--r--drivers/target/target_core_cdb.c62
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c167
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c14
-rw-r--r--drivers/target/target_core_pr.c86
-rw-r--r--drivers/target/target_core_pscsi.c20
-rw-r--r--drivers/target/target_core_rd.c14
-rw-r--r--drivers/target/target_core_tmr.c89
-rw-r--r--drivers/target/target_core_transport.c1106
-rw-r--r--drivers/target/target_core_ua.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c26
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c2
-rw-r--r--drivers/target/tcm_fc/tfc_io.c10
16 files changed, 751 insertions, 929 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 2f19e1926493..eeb7ee7ab9f7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -118,17 +118,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi 118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
119 */ 119 */
120 if (scsi_bidi_cmnd(sc)) 120 if (scsi_bidi_cmnd(sc))
121 se_cmd->t_task->t_tasks_bidi = 1; 121 se_cmd->t_task.t_tasks_bidi = 1;
122 /* 122 /*
123 * Locate the struct se_lun pointer and attach it to struct se_cmd 123 * Locate the struct se_lun pointer and attach it to struct se_cmd
124 */ 124 */
125 if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) { 125 if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
126 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 126 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
127 set_host_byte(sc, DID_NO_CONNECT); 127 set_host_byte(sc, DID_NO_CONNECT);
128 return NULL; 128 return NULL;
129 } 129 }
130 130
131 transport_device_setup_cmd(se_cmd);
132 return se_cmd; 131 return se_cmd;
133} 132}
134 133
@@ -143,17 +142,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
143 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 142 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
144 struct tcm_loop_cmd, tl_se_cmd); 143 struct tcm_loop_cmd, tl_se_cmd);
145 struct scsi_cmnd *sc = tl_cmd->sc; 144 struct scsi_cmnd *sc = tl_cmd->sc;
146 void *mem_ptr, *mem_bidi_ptr = NULL; 145 struct scatterlist *sgl_bidi = NULL;
147 u32 sg_no_bidi = 0; 146 u32 sgl_bidi_count = 0;
148 int ret; 147 int ret;
149 /* 148 /*
150 * Allocate the necessary tasks to complete the received CDB+data 149 * Allocate the necessary tasks to complete the received CDB+data
151 */ 150 */
152 ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); 151 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
153 if (ret == -1) { 152 if (ret == -ENOMEM) {
154 /* Out of Resources */ 153 /* Out of Resources */
155 return PYX_TRANSPORT_LU_COMM_FAILURE; 154 return PYX_TRANSPORT_LU_COMM_FAILURE;
156 } else if (ret == -2) { 155 } else if (ret == -EINVAL) {
157 /* 156 /*
158 * Handle case for SAM_STAT_RESERVATION_CONFLICT 157 * Handle case for SAM_STAT_RESERVATION_CONFLICT
159 */ 158 */
@@ -165,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
165 */ 164 */
166 return PYX_TRANSPORT_USE_SENSE_REASON; 165 return PYX_TRANSPORT_USE_SENSE_REASON;
167 } 166 }
167
168 /* 168 /*
169 * Setup the struct scatterlist memory from the received 169 * For BIDI commands, pass in the extra READ buffer
170 * struct scsi_cmnd. 170 * to transport_generic_map_mem_to_cmd() below..
171 */ 171 */
172 if (scsi_sg_count(sc)) { 172 if (se_cmd->t_task.t_tasks_bidi) {
173 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; 173 struct scsi_data_buffer *sdb = scsi_in(sc);
174 mem_ptr = (void *)scsi_sglist(sc);
175 /*
176 * For BIDI commands, pass in the extra READ buffer
177 * to transport_generic_map_mem_to_cmd() below..
178 */
179 if (se_cmd->t_task->t_tasks_bidi) {
180 struct scsi_data_buffer *sdb = scsi_in(sc);
181 174
182 mem_bidi_ptr = (void *)sdb->table.sgl; 175 sgl_bidi = sdb->table.sgl;
183 sg_no_bidi = sdb->table.nents; 176 sgl_bidi_count = sdb->table.nents;
184 }
185 } else {
186 /*
187 * Used for DMA_NONE
188 */
189 mem_ptr = NULL;
190 } 177 }
178
191 /* 179 /*
192 * Map the SG memory into struct se_mem->page linked list using the same 180 * Map the SG memory into struct se_mem->page linked list using the same
193 * physical memory at sg->page_link. 181 * physical memory at sg->page_link.
194 */ 182 */
195 ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, 183 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
196 scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); 184 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
197 if (ret < 0) 185 if (ret < 0)
198 return PYX_TRANSPORT_LU_COMM_FAILURE; 186 return PYX_TRANSPORT_LU_COMM_FAILURE;
199 187
@@ -384,14 +372,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
384 /* 372 /*
385 * Allocate the LUN_RESET TMR 373 * Allocate the LUN_RESET TMR
386 */ 374 */
387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, 375 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
388 TMR_LUN_RESET); 376 TMR_LUN_RESET);
389 if (IS_ERR(se_cmd->se_tmr_req)) 377 if (IS_ERR(se_cmd->se_tmr_req))
390 goto release; 378 goto release;
391 /* 379 /*
392 * Locate the underlying TCM struct se_lun from sc->device->lun 380 * Locate the underlying TCM struct se_lun from sc->device->lun
393 */ 381 */
394 if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) 382 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
395 goto release; 383 goto release;
396 /* 384 /*
397 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 385 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -904,7 +892,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
904 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 892 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
905 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 893 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
906 894
907 memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, 895 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
908 SCSI_SENSE_BUFFERSIZE); 896 SCSI_SENSE_BUFFERSIZE);
909 sc->result = SAM_STAT_CHECK_CONDITION; 897 sc->result = SAM_STAT_CHECK_CONDITION;
910 set_driver_byte(sc, DRIVER_SENSE); 898 set_driver_byte(sc, DRIVER_SENSE);
@@ -1054,7 +1042,7 @@ static int tcm_loop_make_nexus(
1054 * transport_register_session() 1042 * transport_register_session()
1055 */ 1043 */
1056 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1044 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1057 tl_nexus->se_sess, (void *)tl_nexus); 1045 tl_nexus->se_sess, tl_nexus);
1058 tl_tpg->tl_hba->tl_nexus = tl_nexus; 1046 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1059 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1047 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1060 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1048 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
@@ -1242,7 +1230,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1242 * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1230 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1243 */ 1231 */
1244 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, 1232 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1245 wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, 1233 wwn, &tl_tpg->tl_se_tpg, tl_tpg,
1246 TRANSPORT_TPG_TYPE_NORMAL); 1234 TRANSPORT_TPG_TYPE_NORMAL);
1247 if (ret < 0) 1235 if (ret < 0)
1248 return ERR_PTR(-ENOMEM); 1236 return ERR_PTR(-ENOMEM);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index bfc42adea510..76abd86b6a73 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -61,11 +61,11 @@ struct t10_alua_lu_gp *default_lu_gp;
61 */ 61 */
62int core_emulate_report_target_port_groups(struct se_cmd *cmd) 62int core_emulate_report_target_port_groups(struct se_cmd *cmd)
63{ 63{
64 struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev; 64 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
65 struct se_port *port; 65 struct se_port *port;
66 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 68 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
69 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first 69 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
70 Target port group descriptor */ 70 Target port group descriptor */
71 71
@@ -151,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
151 */ 151 */
152int core_emulate_set_target_port_groups(struct se_cmd *cmd) 152int core_emulate_set_target_port_groups(struct se_cmd *cmd)
153{ 153{
154 struct se_device *dev = cmd->se_lun->lun_se_dev; 154 struct se_device *dev = cmd->se_dev;
155 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 155 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
156 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 156 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
157 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 157 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
158 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 158 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
159 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 159 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
160 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 160 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
161 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ 161 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
162 u32 len = 4; /* Skip over RESERVED area in header */ 162 u32 len = 4; /* Skip over RESERVED area in header */
163 int alua_access_state, primary = 0, rc; 163 int alua_access_state, primary = 0, rc;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 7d9ccf3aa9c3..95195d718101 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -65,8 +65,8 @@ static int
65target_emulate_inquiry_std(struct se_cmd *cmd) 65target_emulate_inquiry_std(struct se_cmd *cmd)
66{ 66{
67 struct se_lun *lun = cmd->se_lun; 67 struct se_lun *lun = cmd->se_lun;
68 struct se_device *dev = cmd->se_lun->lun_se_dev; 68 struct se_device *dev = cmd->se_dev;
69 unsigned char *buf = cmd->t_task->t_task_buf; 69 unsigned char *buf = cmd->t_task.t_task_buf;
70 70
71 /* 71 /*
72 * Make sure we at least have 6 bytes of INQUIRY response 72 * Make sure we at least have 6 bytes of INQUIRY response
@@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
128 * Registered Extended LUN WWN has been set via ConfigFS 128 * Registered Extended LUN WWN has been set via ConfigFS
129 * during device creation/restart. 129 * during device creation/restart.
130 */ 130 */
131 if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags & 131 if (cmd->se_dev->se_sub_dev->su_dev_flags &
132 SDF_EMULATED_VPD_UNIT_SERIAL) { 132 SDF_EMULATED_VPD_UNIT_SERIAL) {
133 buf[3] = 3; 133 buf[3] = 3;
134 buf[5] = 0x80; 134 buf[5] = 0x80;
@@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
143static int 143static int
144target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 144target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
145{ 145{
146 struct se_device *dev = cmd->se_lun->lun_se_dev; 146 struct se_device *dev = cmd->se_dev;
147 u16 len = 0; 147 u16 len = 0;
148 148
149 buf[1] = 0x80; 149 buf[1] = 0x80;
@@ -176,7 +176,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
176static int 176static int
177target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 177target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
178{ 178{
179 struct se_device *dev = cmd->se_lun->lun_se_dev; 179 struct se_device *dev = cmd->se_dev;
180 struct se_lun *lun = cmd->se_lun; 180 struct se_lun *lun = cmd->se_lun;
181 struct se_port *port = NULL; 181 struct se_port *port = NULL;
182 struct se_portal_group *tpg = NULL; 182 struct se_portal_group *tpg = NULL;
@@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
477 buf[5] = 0x07; 477 buf[5] = 0x07;
478 478
479 /* If WriteCache emulation is enabled, set V_SUP */ 479 /* If WriteCache emulation is enabled, set V_SUP */
480 if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 480 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
481 buf[6] = 0x01; 481 buf[6] = 0x01;
482 return 0; 482 return 0;
483} 483}
@@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
486static int 486static int
487target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 487target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
488{ 488{
489 struct se_device *dev = cmd->se_lun->lun_se_dev; 489 struct se_device *dev = cmd->se_dev;
490 int have_tp = 0; 490 int have_tp = 0;
491 491
492 /* 492 /*
@@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
568static int 568static int
569target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 569target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
570{ 570{
571 struct se_device *dev = cmd->se_lun->lun_se_dev; 571 struct se_device *dev = cmd->se_dev;
572 572
573 /* 573 /*
574 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: 574 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -620,9 +620,9 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
620static int 620static int
621target_emulate_inquiry(struct se_cmd *cmd) 621target_emulate_inquiry(struct se_cmd *cmd)
622{ 622{
623 struct se_device *dev = cmd->se_lun->lun_se_dev; 623 struct se_device *dev = cmd->se_dev;
624 unsigned char *buf = cmd->t_task->t_task_buf; 624 unsigned char *buf = cmd->t_task.t_task_buf;
625 unsigned char *cdb = cmd->t_task->t_task_cdb; 625 unsigned char *cdb = cmd->t_task.t_task_cdb;
626 626
627 if (!(cdb[1] & 0x1)) 627 if (!(cdb[1] & 0x1))
628 return target_emulate_inquiry_std(cmd); 628 return target_emulate_inquiry_std(cmd);
@@ -665,8 +665,8 @@ target_emulate_inquiry(struct se_cmd *cmd)
665static int 665static int
666target_emulate_readcapacity(struct se_cmd *cmd) 666target_emulate_readcapacity(struct se_cmd *cmd)
667{ 667{
668 struct se_device *dev = cmd->se_lun->lun_se_dev; 668 struct se_device *dev = cmd->se_dev;
669 unsigned char *buf = cmd->t_task->t_task_buf; 669 unsigned char *buf = cmd->t_task.t_task_buf;
670 unsigned long long blocks_long = dev->transport->get_blocks(dev); 670 unsigned long long blocks_long = dev->transport->get_blocks(dev);
671 u32 blocks; 671 u32 blocks;
672 672
@@ -695,8 +695,8 @@ target_emulate_readcapacity(struct se_cmd *cmd)
695static int 695static int
696target_emulate_readcapacity_16(struct se_cmd *cmd) 696target_emulate_readcapacity_16(struct se_cmd *cmd)
697{ 697{
698 struct se_device *dev = cmd->se_lun->lun_se_dev; 698 struct se_device *dev = cmd->se_dev;
699 unsigned char *buf = cmd->t_task->t_task_buf; 699 unsigned char *buf = cmd->t_task.t_task_buf;
700 unsigned long long blocks = dev->transport->get_blocks(dev); 700 unsigned long long blocks = dev->transport->get_blocks(dev);
701 701
702 buf[0] = (blocks >> 56) & 0xff; 702 buf[0] = (blocks >> 56) & 0xff;
@@ -830,9 +830,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
830static int 830static int
831target_emulate_modesense(struct se_cmd *cmd, int ten) 831target_emulate_modesense(struct se_cmd *cmd, int ten)
832{ 832{
833 struct se_device *dev = cmd->se_lun->lun_se_dev; 833 struct se_device *dev = cmd->se_dev;
834 char *cdb = cmd->t_task->t_task_cdb; 834 char *cdb = cmd->t_task.t_task_cdb;
835 unsigned char *rbuf = cmd->t_task->t_task_buf; 835 unsigned char *rbuf = cmd->t_task.t_task_buf;
836 int type = dev->transport->get_device_type(dev); 836 int type = dev->transport->get_device_type(dev);
837 int offset = (ten) ? 8 : 4; 837 int offset = (ten) ? 8 : 4;
838 int length = 0; 838 int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
903static int 903static int
904target_emulate_request_sense(struct se_cmd *cmd) 904target_emulate_request_sense(struct se_cmd *cmd)
905{ 905{
906 unsigned char *cdb = cmd->t_task->t_task_cdb; 906 unsigned char *cdb = cmd->t_task.t_task_cdb;
907 unsigned char *buf = cmd->t_task->t_task_buf; 907 unsigned char *buf = cmd->t_task.t_task_buf;
908 u8 ua_asc = 0, ua_ascq = 0; 908 u8 ua_asc = 0, ua_ascq = 0;
909 909
910 if (cdb[1] & 0x01) { 910 if (cdb[1] & 0x01) {
@@ -964,9 +964,9 @@ static int
964target_emulate_unmap(struct se_task *task) 964target_emulate_unmap(struct se_task *task)
965{ 965{
966 struct se_cmd *cmd = task->task_se_cmd; 966 struct se_cmd *cmd = task->task_se_cmd;
967 struct se_device *dev = cmd->se_lun->lun_se_dev; 967 struct se_device *dev = cmd->se_dev;
968 unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; 968 unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
969 unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; 969 unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
970 sector_t lba; 970 sector_t lba;
971 unsigned int size = cmd->data_length, range; 971 unsigned int size = cmd->data_length, range;
972 int ret, offset; 972 int ret, offset;
@@ -1011,8 +1011,8 @@ static int
1011target_emulate_write_same(struct se_task *task) 1011target_emulate_write_same(struct se_task *task)
1012{ 1012{
1013 struct se_cmd *cmd = task->task_se_cmd; 1013 struct se_cmd *cmd = task->task_se_cmd;
1014 struct se_device *dev = cmd->se_lun->lun_se_dev; 1014 struct se_device *dev = cmd->se_dev;
1015 sector_t lba = cmd->t_task->t_task_lba; 1015 sector_t lba = cmd->t_task.t_task_lba;
1016 unsigned int range; 1016 unsigned int range;
1017 int ret; 1017 int ret;
1018 1018
@@ -1036,11 +1036,11 @@ int
1036transport_emulate_control_cdb(struct se_task *task) 1036transport_emulate_control_cdb(struct se_task *task)
1037{ 1037{
1038 struct se_cmd *cmd = task->task_se_cmd; 1038 struct se_cmd *cmd = task->task_se_cmd;
1039 struct se_device *dev = cmd->se_lun->lun_se_dev; 1039 struct se_device *dev = cmd->se_dev;
1040 unsigned short service_action; 1040 unsigned short service_action;
1041 int ret = 0; 1041 int ret = 0;
1042 1042
1043 switch (cmd->t_task->t_task_cdb[0]) { 1043 switch (cmd->t_task.t_task_cdb[0]) {
1044 case INQUIRY: 1044 case INQUIRY:
1045 ret = target_emulate_inquiry(cmd); 1045 ret = target_emulate_inquiry(cmd);
1046 break; 1046 break;
@@ -1054,13 +1054,13 @@ transport_emulate_control_cdb(struct se_task *task)
1054 ret = target_emulate_modesense(cmd, 1); 1054 ret = target_emulate_modesense(cmd, 1);
1055 break; 1055 break;
1056 case SERVICE_ACTION_IN: 1056 case SERVICE_ACTION_IN:
1057 switch (cmd->t_task->t_task_cdb[1] & 0x1f) { 1057 switch (cmd->t_task.t_task_cdb[1] & 0x1f) {
1058 case SAI_READ_CAPACITY_16: 1058 case SAI_READ_CAPACITY_16:
1059 ret = target_emulate_readcapacity_16(cmd); 1059 ret = target_emulate_readcapacity_16(cmd);
1060 break; 1060 break;
1061 default: 1061 default:
1062 printk(KERN_ERR "Unsupported SA: 0x%02x\n", 1062 printk(KERN_ERR "Unsupported SA: 0x%02x\n",
1063 cmd->t_task->t_task_cdb[1] & 0x1f); 1063 cmd->t_task.t_task_cdb[1] & 0x1f);
1064 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1064 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1065 } 1065 }
1066 break; 1066 break;
@@ -1085,7 +1085,7 @@ transport_emulate_control_cdb(struct se_task *task)
1085 break; 1085 break;
1086 case VARIABLE_LENGTH_CMD: 1086 case VARIABLE_LENGTH_CMD:
1087 service_action = 1087 service_action =
1088 get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); 1088 get_unaligned_be16(&cmd->t_task.t_task_cdb[8]);
1089 switch (service_action) { 1089 switch (service_action) {
1090 case WRITE_SAME_32: 1090 case WRITE_SAME_32:
1091 if (!dev->transport->do_discard) { 1091 if (!dev->transport->do_discard) {
@@ -1124,7 +1124,7 @@ transport_emulate_control_cdb(struct se_task *task)
1124 break; 1124 break;
1125 default: 1125 default:
1126 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", 1126 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
1127 cmd->t_task->t_task_cdb[0], dev->transport->name); 1127 cmd->t_task.t_task_cdb[0], dev->transport->name);
1128 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1128 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1129 } 1129 }
1130 1130
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 64418efa671b..ac7f7655570e 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2037,7 +2037,7 @@ static ssize_t target_core_dev_show(struct config_item *item,
2037 if (!(tc_attr->show)) 2037 if (!(tc_attr->show))
2038 return -EINVAL; 2038 return -EINVAL;
2039 2039
2040 return tc_attr->show((void *)se_dev, page); 2040 return tc_attr->show(se_dev, page);
2041} 2041}
2042 2042
2043static ssize_t target_core_dev_store(struct config_item *item, 2043static ssize_t target_core_dev_store(struct config_item *item,
@@ -2053,7 +2053,7 @@ static ssize_t target_core_dev_store(struct config_item *item,
2053 if (!(tc_attr->store)) 2053 if (!(tc_attr->store))
2054 return -EINVAL; 2054 return -EINVAL;
2055 2055
2056 return tc_attr->store((void *)se_dev, page, count); 2056 return tc_attr->store(se_dev, page, count);
2057} 2057}
2058 2058
2059static struct configfs_item_operations target_core_dev_item_ops = { 2059static struct configfs_item_operations target_core_dev_item_ops = {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index fd923854505c..ea92f75d215e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -59,15 +59,12 @@ static struct se_subsystem_dev *lun0_su_dev;
59/* not static, needed by tpg.c */ 59/* not static, needed by tpg.c */
60struct se_device *g_lun0_dev; 60struct se_device *g_lun0_dev;
61 61
62int transport_get_lun_for_cmd( 62int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
63 struct se_cmd *se_cmd,
64 u32 unpacked_lun)
65{ 63{
66 struct se_dev_entry *deve;
67 struct se_lun *se_lun = NULL; 64 struct se_lun *se_lun = NULL;
68 struct se_session *se_sess = se_cmd->se_sess; 65 struct se_session *se_sess = se_cmd->se_sess;
66 struct se_device *dev;
69 unsigned long flags; 67 unsigned long flags;
70 int read_only = 0;
71 68
72 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 69 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
73 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 70 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
@@ -76,91 +73,87 @@ int transport_get_lun_for_cmd(
76 } 73 }
77 74
78 spin_lock_irq(&se_sess->se_node_acl->device_list_lock); 75 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
79 deve = se_cmd->se_deve = 76 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
80 &se_sess->se_node_acl->device_list[unpacked_lun]; 77 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
81 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 78 struct se_dev_entry *deve = se_cmd->se_deve;
82 if (se_cmd) { 79
83 deve->total_cmds++; 80 deve->total_cmds++;
84 deve->total_bytes += se_cmd->data_length; 81 deve->total_bytes += se_cmd->data_length;
85 82
86 if (se_cmd->data_direction == DMA_TO_DEVICE) { 83 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
87 if (deve->lun_flags & 84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
88 TRANSPORT_LUNFLAGS_READ_ONLY) { 85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
89 read_only = 1; 86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
90 goto out; 87 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 } 88 " Access for 0x%08x\n",
92 deve->write_bytes += se_cmd->data_length; 89 se_cmd->se_tfo->get_fabric_name(),
93 } else if (se_cmd->data_direction == 90 unpacked_lun);
94 DMA_FROM_DEVICE) { 91 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
95 deve->read_bytes += se_cmd->data_length; 92 return -EACCES;
96 }
97 } 93 }
94
95 if (se_cmd->data_direction == DMA_TO_DEVICE)
96 deve->write_bytes += se_cmd->data_length;
97 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
98 deve->read_bytes += se_cmd->data_length;
99
98 deve->deve_cmds++; 100 deve->deve_cmds++;
99 101
100 se_lun = se_cmd->se_lun = deve->se_lun; 102 se_lun = deve->se_lun;
103 se_cmd->se_lun = deve->se_lun;
101 se_cmd->pr_res_key = deve->pr_res_key; 104 se_cmd->pr_res_key = deve->pr_res_key;
102 se_cmd->orig_fe_lun = unpacked_lun; 105 se_cmd->orig_fe_lun = unpacked_lun;
103 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; 106 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
104 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
105 } 108 }
106out:
107 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); 109 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
108 110
109 if (!se_lun) { 111 if (!se_lun) {
110 if (read_only) { 112 /*
111 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 113 * Use the se_portal_group->tpg_virt_lun0 to allow for
114 * REPORT_LUNS, et al to be returned when no active
115 * MappedLUN=0 exists for this Initiator Port.
116 */
117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
112 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
113 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 120 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
114 " Access for 0x%08x\n", 121 " Access for 0x%08x\n",
115 se_cmd->se_tfo->get_fabric_name(), 122 se_cmd->se_tfo->get_fabric_name(),
116 unpacked_lun); 123 unpacked_lun);
124 return -ENODEV;
125 }
126 /*
127 * Force WRITE PROTECT for virtual LUN 0
128 */
129 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
130 (se_cmd->data_direction != DMA_NONE)) {
131 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
132 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
117 return -EACCES; 133 return -EACCES;
118 } else {
119 /*
120 * Use the se_portal_group->tpg_virt_lun0 to allow for
121 * REPORT_LUNS, et al to be returned when no active
122 * MappedLUN=0 exists for this Initiator Port.
123 */
124 if (unpacked_lun != 0) {
125 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
126 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
127 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
128 " Access for 0x%08x\n",
129 se_cmd->se_tfo->get_fabric_name(),
130 unpacked_lun);
131 return -ENODEV;
132 }
133 /*
134 * Force WRITE PROTECT for virtual LUN 0
135 */
136 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
137 (se_cmd->data_direction != DMA_NONE)) {
138 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
139 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
140 return -EACCES;
141 }
142#if 0
143 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
144 se_cmd->se_tfo->get_fabric_name());
145#endif
146 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
147 se_cmd->orig_fe_lun = 0;
148 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
149 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
150 } 134 }
135
136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->orig_fe_lun = 0;
139 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
140 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
151 } 141 }
152 /* 142 /*
153 * Determine if the struct se_lun is online. 143 * Determine if the struct se_lun is online.
144 * FIXME: Check for LUN_RESET + UNIT Attention
154 */ 145 */
155/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
156 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
157 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
158 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
159 return -ENODEV; 149 return -ENODEV;
160 } 150 }
161 151
162 { 152 /* Directly associate cmd with se_dev */
163 struct se_device *dev = se_lun->lun_se_dev; 153 se_cmd->se_dev = se_lun->lun_se_dev;
154
155 /* TODO: get rid of this and use atomics for stats */
156 dev = se_lun->lun_se_dev;
164 spin_lock_irq(&dev->stats_lock); 157 spin_lock_irq(&dev->stats_lock);
165 dev->num_cmds++; 158 dev->num_cmds++;
166 if (se_cmd->data_direction == DMA_TO_DEVICE) 159 if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -168,30 +161,22 @@ out:
168 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 161 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
169 dev->read_bytes += se_cmd->data_length; 162 dev->read_bytes += se_cmd->data_length;
170 spin_unlock_irq(&dev->stats_lock); 163 spin_unlock_irq(&dev->stats_lock);
171 }
172 164
173 /* 165 /*
174 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used 166 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
175 * for tracking state of struct se_cmds during LUN shutdown events. 167 * for tracking state of struct se_cmds during LUN shutdown events.
176 */ 168 */
177 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
178 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); 170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
179 atomic_set(&se_cmd->t_task->transport_lun_active, 1); 171 atomic_set(&se_cmd->t_task.transport_lun_active, 1);
180#if 0
181 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
182 se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun);
183#endif
184 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
185 173
186 return 0; 174 return 0;
187} 175}
188EXPORT_SYMBOL(transport_get_lun_for_cmd); 176EXPORT_SYMBOL(transport_lookup_cmd_lun);
189 177
190int transport_get_lun_for_tmr( 178int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
191 struct se_cmd *se_cmd,
192 u32 unpacked_lun)
193{ 179{
194 struct se_device *dev = NULL;
195 struct se_dev_entry *deve; 180 struct se_dev_entry *deve;
196 struct se_lun *se_lun = NULL; 181 struct se_lun *se_lun = NULL;
197 struct se_session *se_sess = se_cmd->se_sess; 182 struct se_session *se_sess = se_cmd->se_sess;
@@ -204,15 +189,16 @@ int transport_get_lun_for_tmr(
204 } 189 }
205 190
206 spin_lock_irq(&se_sess->se_node_acl->device_list_lock); 191 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
207 deve = se_cmd->se_deve = 192 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
208 &se_sess->se_node_acl->device_list[unpacked_lun]; 193 deve = se_cmd->se_deve;
194
209 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 195 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
210 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; 196 se_tmr->tmr_lun = deve->se_lun;
211 dev = se_lun->lun_se_dev; 197 se_cmd->se_lun = deve->se_lun;
198 se_lun = deve->se_lun;
212 se_cmd->pr_res_key = deve->pr_res_key; 199 se_cmd->pr_res_key = deve->pr_res_key;
213 se_cmd->orig_fe_lun = unpacked_lun; 200 se_cmd->orig_fe_lun = unpacked_lun;
214 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; 201 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
215/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
216 } 202 }
217 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); 203 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
218 204
@@ -226,21 +212,24 @@ int transport_get_lun_for_tmr(
226 } 212 }
227 /* 213 /*
228 * Determine if the struct se_lun is online. 214 * Determine if the struct se_lun is online.
215 * FIXME: Check for LUN_RESET + UNIT Attention
229 */ 216 */
230/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
231 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 217 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
232 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 218 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
233 return -ENODEV; 219 return -ENODEV;
234 } 220 }
235 se_tmr->tmr_dev = dev;
236 221
237 spin_lock(&dev->se_tmr_lock); 222 /* Directly associate cmd with se_dev */
238 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); 223 se_cmd->se_dev = se_lun->lun_se_dev;
239 spin_unlock(&dev->se_tmr_lock); 224 se_tmr->tmr_dev = se_lun->lun_se_dev;
225
226 spin_lock(&se_tmr->tmr_dev->se_tmr_lock);
227 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
228 spin_unlock(&se_tmr->tmr_dev->se_tmr_lock);
240 229
241 return 0; 230 return 0;
242} 231}
243EXPORT_SYMBOL(transport_get_lun_for_tmr); 232EXPORT_SYMBOL(transport_lookup_tmr_lun);
244 233
245/* 234/*
246 * This function is called from core_scsi3_emulate_pro_register_and_move() 235 * This function is called from core_scsi3_emulate_pro_register_and_move()
@@ -667,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
667 struct se_lun *se_lun; 656 struct se_lun *se_lun;
668 struct se_session *se_sess = se_cmd->se_sess; 657 struct se_session *se_sess = se_cmd->se_sess;
669 struct se_task *se_task; 658 struct se_task *se_task;
670 unsigned char *buf = se_cmd->t_task->t_task_buf; 659 unsigned char *buf = se_cmd->t_task.t_task_buf;
671 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 660 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
672 661
673 list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) 662 list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
674 break; 663 break;
675 664
676 if (!(se_task)) { 665 if (!(se_task)) {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0c44bc051484..2e7ea7457501 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -223,7 +223,7 @@ static struct se_device *fd_create_virtdevice(
223 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 223 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
224 224
225 dev = transport_add_device_to_core_hba(hba, &fileio_template, 225 dev = transport_add_device_to_core_hba(hba, &fileio_template,
226 se_dev, dev_flags, (void *)fd_dev, 226 se_dev, dev_flags, fd_dev,
227 &dev_limits, "FILEIO", FD_VERSION); 227 &dev_limits, "FILEIO", FD_VERSION);
228 if (!(dev)) 228 if (!(dev))
229 goto fail; 229 goto fail;
@@ -279,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd)
279 return NULL; 279 return NULL;
280 } 280 }
281 281
282 fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr; 282 fd_req->fd_dev = cmd->se_dev->dev_ptr;
283 283
284 return &fd_req->fd_task; 284 return &fd_req->fd_task;
285} 285}
@@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
377 struct se_cmd *cmd = task->task_se_cmd; 377 struct se_cmd *cmd = task->task_se_cmd;
378 struct se_device *dev = cmd->se_dev; 378 struct se_device *dev = cmd->se_dev;
379 struct fd_dev *fd_dev = dev->dev_ptr; 379 struct fd_dev *fd_dev = dev->dev_ptr;
380 int immed = (cmd->t_task->t_task_cdb[1] & 0x2); 380 int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
381 loff_t start, end; 381 loff_t start, end;
382 int ret; 382 int ret;
383 383
@@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
391 /* 391 /*
392 * Determine if we will be flushing the entire device. 392 * Determine if we will be flushing the entire device.
393 */ 393 */
394 if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { 394 if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) {
395 start = 0; 395 start = 0;
396 end = LLONG_MAX; 396 end = LLONG_MAX;
397 } else { 397 } else {
398 start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 398 start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
399 if (cmd->data_length) 399 if (cmd->data_length)
400 end = start + cmd->data_length; 400 end = start + cmd->data_length;
401 else 401 else
@@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task)
475 if (ret > 0 && 475 if (ret > 0 &&
476 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 476 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
477 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 477 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
478 cmd->t_task->t_tasks_fua) { 478 cmd->t_task.t_tasks_fua) {
479 /* 479 /*
480 * We might need to be a bit smarter here 480 * We might need to be a bit smarter here
481 * and return some sense data to let the initiator 481 * and return some sense data to let the initiator
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index fb159876fffc..c73baefeab8e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -74,7 +74,7 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
74 74
75 ib_host->iblock_host_id = host_id; 75 ib_host->iblock_host_id = host_id;
76 76
77 hba->hba_ptr = (void *) ib_host; 77 hba->hba_ptr = ib_host;
78 78
79 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 79 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
80 " Generic Target Core Stack %s\n", hba->hba_id, 80 " Generic Target Core Stack %s\n", hba->hba_id,
@@ -172,7 +172,7 @@ static struct se_device *iblock_create_virtdevice(
172 ib_dev->ibd_bd = bd; 172 ib_dev->ibd_bd = bd;
173 173
174 dev = transport_add_device_to_core_hba(hba, 174 dev = transport_add_device_to_core_hba(hba,
175 &iblock_template, se_dev, dev_flags, (void *)ib_dev, 175 &iblock_template, se_dev, dev_flags, ib_dev,
176 &dev_limits, "IBLOCK", IBLOCK_VERSION); 176 &dev_limits, "IBLOCK", IBLOCK_VERSION);
177 if (!(dev)) 177 if (!(dev))
178 goto failed; 178 goto failed;
@@ -240,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd)
240 return NULL; 240 return NULL;
241 } 241 }
242 242
243 ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr; 243 ib_req->ib_dev = cmd->se_dev->dev_ptr;
244 atomic_set(&ib_req->ib_bio_cnt, 0); 244 atomic_set(&ib_req->ib_bio_cnt, 0);
245 return &ib_req->ib_task; 245 return &ib_req->ib_task;
246} 246}
@@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
331{ 331{
332 struct se_cmd *cmd = task->task_se_cmd; 332 struct se_cmd *cmd = task->task_se_cmd;
333 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 333 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
334 int immed = (cmd->t_task->t_task_cdb[1] & 0x2); 334 int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
335 sector_t error_sector; 335 sector_t error_sector;
336 int ret; 336 int ret;
337 337
@@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task)
400 */ 400 */
401 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 401 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
402 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 402 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
403 task->task_se_cmd->t_task->t_tasks_fua)) 403 task->task_se_cmd->t_task.t_tasks_fua))
404 rw = WRITE_FUA; 404 rw = WRITE_FUA;
405 else 405 else
406 rw = WRITE; 406 rw = WRITE;
@@ -593,7 +593,7 @@ static struct bio *iblock_get_bio(
593 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); 593 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
594 594
595 bio->bi_bdev = ib_dev->ibd_bd; 595 bio->bi_bdev = ib_dev->ibd_bd;
596 bio->bi_private = (void *) task; 596 bio->bi_private = task;
597 bio->bi_destructor = iblock_bio_destructor; 597 bio->bi_destructor = iblock_bio_destructor;
598 bio->bi_end_io = &iblock_bio_done; 598 bio->bi_end_io = &iblock_bio_done;
599 bio->bi_sector = lba; 599 bio->bi_sector = lba;
@@ -608,7 +608,7 @@ static struct bio *iblock_get_bio(
608static int iblock_map_task_SG(struct se_task *task) 608static int iblock_map_task_SG(struct se_task *task)
609{ 609{
610 struct se_cmd *cmd = task->task_se_cmd; 610 struct se_cmd *cmd = task->task_se_cmd;
611 struct se_device *dev = cmd->se_lun->lun_se_dev; 611 struct se_device *dev = cmd->se_dev;
612 struct iblock_dev *ib_dev = task->se_dev->dev_ptr; 612 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
613 struct iblock_req *ib_req = IBLOCK_REQ(task); 613 struct iblock_req *ib_req = IBLOCK_REQ(task);
614 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 614 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 27a7525971b9..19406a3474c3 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
157 struct se_session *sess = cmd->se_sess; 157 struct se_session *sess = cmd->se_sess;
158 struct se_portal_group *tpg = sess->se_tpg; 158 struct se_portal_group *tpg = sess->se_tpg;
159 159
160 if ((cmd->t_task->t_task_cdb[1] & 0x01) && 160 if ((cmd->t_task.t_task_cdb[1] & 0x01) &&
161 (cmd->t_task->t_task_cdb[1] & 0x02)) { 161 (cmd->t_task.t_task_cdb[1] & 0x02)) {
162 printk(KERN_ERR "LongIO and Obselete Bits set, returning" 162 printk(KERN_ERR "LongIO and Obselete Bits set, returning"
163 " ILLEGAL_REQUEST\n"); 163 " ILLEGAL_REQUEST\n");
164 return PYX_TRANSPORT_ILLEGAL_REQUEST; 164 return PYX_TRANSPORT_ILLEGAL_REQUEST;
@@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
217 struct t10_pr_registration *pr_reg; 217 struct t10_pr_registration *pr_reg;
218 struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 218 struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
219 unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; 219 unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 220 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
221 int conflict = 0; 221 int conflict = 0;
222 222
@@ -1471,7 +1471,7 @@ static int core_scsi3_decode_spec_i_port(
1471 int all_tg_pt, 1471 int all_tg_pt,
1472 int aptpl) 1472 int aptpl)
1473{ 1473{
1474 struct se_device *dev = cmd->se_lun->lun_se_dev; 1474 struct se_device *dev = cmd->se_dev;
1475 struct se_port *tmp_port; 1475 struct se_port *tmp_port;
1476 struct se_portal_group *dest_tpg = NULL, *tmp_tpg; 1476 struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
1477 struct se_session *se_sess = cmd->se_sess; 1477 struct se_session *se_sess = cmd->se_sess;
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
1482 struct list_head tid_dest_list; 1482 struct list_head tid_dest_list;
1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1484 struct target_core_fabric_ops *tmp_tf_ops; 1484 struct target_core_fabric_ops *tmp_tf_ops;
1485 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 1485 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
1488 u32 tpdl, tid_len = 0; 1488 u32 tpdl, tid_len = 0;
@@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port(
1509 tidh_new->dest_node_acl = se_sess->se_node_acl; 1509 tidh_new->dest_node_acl = se_sess->se_node_acl;
1510 tidh_new->dest_se_deve = local_se_deve; 1510 tidh_new->dest_se_deve = local_se_deve;
1511 1511
1512 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, 1512 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1513 se_sess->se_node_acl, local_se_deve, l_isid, 1513 se_sess->se_node_acl, local_se_deve, l_isid,
1514 sa_res_key, all_tg_pt, aptpl); 1514 sa_res_key, all_tg_pt, aptpl);
1515 if (!(local_pr_reg)) { 1515 if (!(local_pr_reg)) {
@@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port(
1741 * and then call __core_scsi3_add_registration() in the 1741 * and then call __core_scsi3_add_registration() in the
1742 * 2nd loop which will never fail. 1742 * 2nd loop which will never fail.
1743 */ 1743 */
1744 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, 1744 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1745 dest_node_acl, dest_se_deve, iport_ptr, 1745 dest_node_acl, dest_se_deve, iport_ptr,
1746 sa_res_key, all_tg_pt, aptpl); 1746 sa_res_key, all_tg_pt, aptpl);
1747 if (!(dest_pr_reg)) { 1747 if (!(dest_pr_reg)) {
@@ -1787,7 +1787,7 @@ static int core_scsi3_decode_spec_i_port(
1787 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], 1787 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
1788 PR_REG_ISID_ID_LEN); 1788 PR_REG_ISID_ID_LEN);
1789 1789
1790 __core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl, 1790 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
1791 dest_pr_reg, 0, 0); 1791 dest_pr_reg, 0, 0);
1792 1792
1793 printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" 1793 printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
@@ -2071,7 +2071,7 @@ static int core_scsi3_emulate_pro_register(
2071 int ignore_key) 2071 int ignore_key)
2072{ 2072{
2073 struct se_session *se_sess = cmd->se_sess; 2073 struct se_session *se_sess = cmd->se_sess;
2074 struct se_device *dev = cmd->se_lun->lun_se_dev; 2074 struct se_device *dev = cmd->se_dev;
2075 struct se_dev_entry *se_deve; 2075 struct se_dev_entry *se_deve;
2076 struct se_lun *se_lun = cmd->se_lun; 2076 struct se_lun *se_lun = cmd->se_lun;
2077 struct se_portal_group *se_tpg; 2077 struct se_portal_group *se_tpg;
@@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register(
2117 * Port Endpoint that the PRO was received from on the 2117 * Port Endpoint that the PRO was received from on the
2118 * Logical Unit of the SCSI device server. 2118 * Logical Unit of the SCSI device server.
2119 */ 2119 */
2120 ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, 2120 ret = core_scsi3_alloc_registration(cmd->se_dev,
2121 se_sess->se_node_acl, se_deve, isid_ptr, 2121 se_sess->se_node_acl, se_deve, isid_ptr,
2122 sa_res_key, all_tg_pt, aptpl, 2122 sa_res_key, all_tg_pt, aptpl,
2123 ignore_key, 0); 2123 ignore_key, 0);
@@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register(
2145 */ 2145 */
2146 if (!(aptpl)) { 2146 if (!(aptpl)) {
2147 pr_tmpl->pr_aptpl_active = 0; 2147 pr_tmpl->pr_aptpl_active = 0;
2148 core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); 2148 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2149 printk("SPC-3 PR: Set APTPL Bit Deactivated for" 2149 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
2150 " REGISTER\n"); 2150 " REGISTER\n");
2151 return 0; 2151 return 0;
@@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register(
2155 * update the APTPL metadata information using its 2155 * update the APTPL metadata information using its
2156 * preallocated *pr_reg->pr_aptpl_buf. 2156 * preallocated *pr_reg->pr_aptpl_buf.
2157 */ 2157 */
2158 pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, 2158 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
2159 se_sess->se_node_acl, se_sess); 2159 se_sess->se_node_acl, se_sess);
2160 2160
2161 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 2161 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2162 &pr_reg->pr_aptpl_buf[0], 2162 &pr_reg->pr_aptpl_buf[0],
2163 pr_tmpl->pr_aptpl_buf_len); 2163 pr_tmpl->pr_aptpl_buf_len);
2164 if (!(ret)) { 2164 if (!(ret)) {
@@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register(
2223 */ 2223 */
2224 if (!(sa_res_key)) { 2224 if (!(sa_res_key)) {
2225 pr_holder = core_scsi3_check_implict_release( 2225 pr_holder = core_scsi3_check_implict_release(
2226 cmd->se_lun->lun_se_dev, pr_reg); 2226 cmd->se_dev, pr_reg);
2227 if (pr_holder < 0) { 2227 if (pr_holder < 0) {
2228 kfree(pr_aptpl_buf); 2228 kfree(pr_aptpl_buf);
2229 core_scsi3_put_pr_reg(pr_reg); 2229 core_scsi3_put_pr_reg(pr_reg);
@@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register(
2260 /* 2260 /*
2261 * Release the calling I_T Nexus registration now.. 2261 * Release the calling I_T Nexus registration now..
2262 */ 2262 */
2263 __core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg, 2263 __core_scsi3_free_registration(cmd->se_dev, pr_reg,
2264 NULL, 1); 2264 NULL, 1);
2265 /* 2265 /*
2266 * From spc4r17, section 5.7.11.3 Unregistering 2266 * From spc4r17, section 5.7.11.3 Unregistering
@@ -2315,7 +2315,7 @@ static int core_scsi3_emulate_pro_register(
2315 * READ_KEYS service action. 2315 * READ_KEYS service action.
2316 */ 2316 */
2317 pr_reg->pr_res_generation = core_scsi3_pr_generation( 2317 pr_reg->pr_res_generation = core_scsi3_pr_generation(
2318 cmd->se_lun->lun_se_dev); 2318 cmd->se_dev);
2319 pr_reg->pr_res_key = sa_res_key; 2319 pr_reg->pr_res_key = sa_res_key;
2320 printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2320 printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2321 " Key for %s to: 0x%016Lx PRgeneration:" 2321 " Key for %s to: 0x%016Lx PRgeneration:"
@@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve(
2398 /* 2398 /*
2399 * Locate the existing *pr_reg via struct se_node_acl pointers 2399 * Locate the existing *pr_reg via struct se_node_acl pointers
2400 */ 2400 */
2401 pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, 2401 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
2402 se_sess); 2402 se_sess);
2403 if (!(pr_reg)) { 2403 if (!(pr_reg)) {
2404 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2404 printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -2527,7 +2527,7 @@ static int core_scsi3_pro_reserve(
2527 spin_unlock(&dev->dev_reservation_lock); 2527 spin_unlock(&dev->dev_reservation_lock);
2528 2528
2529 if (pr_tmpl->pr_aptpl_active) { 2529 if (pr_tmpl->pr_aptpl_active) {
2530 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 2530 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2531 &pr_reg->pr_aptpl_buf[0], 2531 &pr_reg->pr_aptpl_buf[0],
2532 pr_tmpl->pr_aptpl_buf_len); 2532 pr_tmpl->pr_aptpl_buf_len);
2533 if (!(ret)) 2533 if (!(ret))
@@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release(
2758 2758
2759write_aptpl: 2759write_aptpl:
2760 if (pr_tmpl->pr_aptpl_active) { 2760 if (pr_tmpl->pr_aptpl_active) {
2761 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 2761 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2762 &pr_reg->pr_aptpl_buf[0], 2762 &pr_reg->pr_aptpl_buf[0],
2763 pr_tmpl->pr_aptpl_buf_len); 2763 pr_tmpl->pr_aptpl_buf_len);
2764 if (!(ret)) 2764 if (!(ret))
@@ -2783,7 +2783,7 @@ static int core_scsi3_emulate_pro_clear(
2783 /* 2783 /*
2784 * Locate the existing *pr_reg via struct se_node_acl pointers 2784 * Locate the existing *pr_reg via struct se_node_acl pointers
2785 */ 2785 */
2786 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, 2786 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
2787 se_sess->se_node_acl, se_sess); 2787 se_sess->se_node_acl, se_sess);
2788 if (!(pr_reg_n)) { 2788 if (!(pr_reg_n)) {
2789 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2789 printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -2849,7 +2849,7 @@ static int core_scsi3_emulate_pro_clear(
2849 cmd->se_tfo->get_fabric_name()); 2849 cmd->se_tfo->get_fabric_name());
2850 2850
2851 if (pr_tmpl->pr_aptpl_active) { 2851 if (pr_tmpl->pr_aptpl_active) {
2852 core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); 2852 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2853 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" 2853 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
2854 " for CLEAR\n"); 2854 " for CLEAR\n");
2855 } 2855 }
@@ -2954,7 +2954,7 @@ static int core_scsi3_pro_preempt(
2954 u64 sa_res_key, 2954 u64 sa_res_key,
2955 int abort) 2955 int abort)
2956{ 2956{
2957 struct se_device *dev = cmd->se_lun->lun_se_dev; 2957 struct se_device *dev = cmd->se_dev;
2958 struct se_dev_entry *se_deve; 2958 struct se_dev_entry *se_deve;
2959 struct se_node_acl *pr_reg_nacl; 2959 struct se_node_acl *pr_reg_nacl;
2960 struct se_session *se_sess = cmd->se_sess; 2960 struct se_session *se_sess = cmd->se_sess;
@@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt(
2969 return PYX_TRANSPORT_LU_COMM_FAILURE; 2969 return PYX_TRANSPORT_LU_COMM_FAILURE;
2970 2970
2971 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2971 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2972 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, 2972 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
2973 se_sess); 2973 se_sess);
2974 if (!(pr_reg_n)) { 2974 if (!(pr_reg_n)) {
2975 printk(KERN_ERR "SPC-3 PR: Unable to locate" 2975 printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt(
3111 spin_unlock(&dev->dev_reservation_lock); 3111 spin_unlock(&dev->dev_reservation_lock);
3112 3112
3113 if (pr_tmpl->pr_aptpl_active) { 3113 if (pr_tmpl->pr_aptpl_active) {
3114 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 3114 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3115 &pr_reg_n->pr_aptpl_buf[0], 3115 &pr_reg_n->pr_aptpl_buf[0],
3116 pr_tmpl->pr_aptpl_buf_len); 3116 pr_tmpl->pr_aptpl_buf_len);
3117 if (!(ret)) 3117 if (!(ret))
@@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt(
3121 } 3121 }
3122 3122
3123 core_scsi3_put_pr_reg(pr_reg_n); 3123 core_scsi3_put_pr_reg(pr_reg_n);
3124 core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); 3124 core_scsi3_pr_generation(cmd->se_dev);
3125 return 0; 3125 return 0;
3126 } 3126 }
3127 /* 3127 /*
@@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt(
3247 } 3247 }
3248 3248
3249 if (pr_tmpl->pr_aptpl_active) { 3249 if (pr_tmpl->pr_aptpl_active) {
3250 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 3250 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3251 &pr_reg_n->pr_aptpl_buf[0], 3251 &pr_reg_n->pr_aptpl_buf[0],
3252 pr_tmpl->pr_aptpl_buf_len); 3252 pr_tmpl->pr_aptpl_buf_len);
3253 if (!(ret)) 3253 if (!(ret))
@@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt(
3256 } 3256 }
3257 3257
3258 core_scsi3_put_pr_reg(pr_reg_n); 3258 core_scsi3_put_pr_reg(pr_reg_n);
3259 core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); 3259 core_scsi3_pr_generation(cmd->se_dev);
3260 return 0; 3260 return 0;
3261} 3261}
3262 3262
@@ -3298,7 +3298,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3298 int unreg) 3298 int unreg)
3299{ 3299{
3300 struct se_session *se_sess = cmd->se_sess; 3300 struct se_session *se_sess = cmd->se_sess;
3301 struct se_device *dev = cmd->se_lun->lun_se_dev; 3301 struct se_device *dev = cmd->se_dev;
3302 struct se_dev_entry *se_deve, *dest_se_deve = NULL; 3302 struct se_dev_entry *se_deve, *dest_se_deve = NULL;
3303 struct se_lun *se_lun = cmd->se_lun; 3303 struct se_lun *se_lun = cmd->se_lun;
3304 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; 3304 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
@@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3309 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3309 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3310 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 3310 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
3311 unsigned char *initiator_str; 3311 unsigned char *initiator_str;
3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
3313 u32 tid_len, tmp_tid_len; 3313 u32 tid_len, tmp_tid_len;
@@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move(
3330 * 3330 *
3331 * Locate the existing *pr_reg via struct se_node_acl pointers 3331 * Locate the existing *pr_reg via struct se_node_acl pointers
3332 */ 3332 */
3333 pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, 3333 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
3334 se_sess); 3334 se_sess);
3335 if (!(pr_reg)) { 3335 if (!(pr_reg)) {
3336 printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" 3336 printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
@@ -3612,7 +3612,7 @@ after_iport_check:
3612 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3612 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3613 iport_ptr); 3613 iport_ptr);
3614 if (!(dest_pr_reg)) { 3614 if (!(dest_pr_reg)) {
3615 ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, 3615 ret = core_scsi3_alloc_registration(cmd->se_dev,
3616 dest_node_acl, dest_se_deve, iport_ptr, 3616 dest_node_acl, dest_se_deve, iport_ptr,
3617 sa_res_key, 0, aptpl, 2, 1); 3617 sa_res_key, 0, aptpl, 2, 1);
3618 if (ret != 0) { 3618 if (ret != 0) {
@@ -3683,12 +3683,12 @@ after_iport_check:
3683 */ 3683 */
3684 if (!(aptpl)) { 3684 if (!(aptpl)) {
3685 pr_tmpl->pr_aptpl_active = 0; 3685 pr_tmpl->pr_aptpl_active = 0;
3686 core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); 3686 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
3687 printk("SPC-3 PR: Set APTPL Bit Deactivated for" 3687 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
3688 " REGISTER_AND_MOVE\n"); 3688 " REGISTER_AND_MOVE\n");
3689 } else { 3689 } else {
3690 pr_tmpl->pr_aptpl_active = 1; 3690 pr_tmpl->pr_aptpl_active = 1;
3691 ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, 3691 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3692 &dest_pr_reg->pr_aptpl_buf[0], 3692 &dest_pr_reg->pr_aptpl_buf[0],
3693 pr_tmpl->pr_aptpl_buf_len); 3693 pr_tmpl->pr_aptpl_buf_len);
3694 if (!(ret)) 3694 if (!(ret))
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3723 */ 3723 */
3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) 3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3725{ 3725{
3726 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 3726 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
3727 u64 res_key, sa_res_key; 3727 u64 res_key, sa_res_key;
3728 int sa, scope, type, aptpl; 3728 int sa, scope, type, aptpl;
3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; 3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3827,10 +3827,10 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3827 */ 3827 */
3828static int core_scsi3_pri_read_keys(struct se_cmd *cmd) 3828static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3829{ 3829{
3830 struct se_device *se_dev = cmd->se_lun->lun_se_dev; 3830 struct se_device *se_dev = cmd->se_dev;
3831 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3831 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3832 struct t10_pr_registration *pr_reg; 3832 struct t10_pr_registration *pr_reg;
3833 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 3833 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
3834 u32 add_len = 0, off = 8; 3834 u32 add_len = 0, off = 8;
3835 3835
3836 if (cmd->data_length < 8) { 3836 if (cmd->data_length < 8) {
@@ -3882,10 +3882,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3882 */ 3882 */
3883static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) 3883static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3884{ 3884{
3885 struct se_device *se_dev = cmd->se_lun->lun_se_dev; 3885 struct se_device *se_dev = cmd->se_dev;
3886 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3886 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3887 struct t10_pr_registration *pr_reg; 3887 struct t10_pr_registration *pr_reg;
3888 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 3888 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
3889 u64 pr_res_key; 3889 u64 pr_res_key;
3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ 3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
3891 3891
@@ -3963,9 +3963,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3963 */ 3963 */
3964static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) 3964static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3965{ 3965{
3966 struct se_device *dev = cmd->se_lun->lun_se_dev; 3966 struct se_device *dev = cmd->se_dev;
3967 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3967 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3968 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 3968 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
3969 u16 add_len = 8; /* Hardcoded to 8. */ 3969 u16 add_len = 8; /* Hardcoded to 8. */
3970 3970
3971 if (cmd->data_length < 6) { 3971 if (cmd->data_length < 6) {
@@ -4014,13 +4014,13 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4014 */ 4014 */
4015static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) 4015static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4016{ 4016{
4017 struct se_device *se_dev = cmd->se_lun->lun_se_dev; 4017 struct se_device *se_dev = cmd->se_dev;
4018 struct se_node_acl *se_nacl; 4018 struct se_node_acl *se_nacl;
4019 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 4019 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
4020 struct se_portal_group *se_tpg; 4020 struct se_portal_group *se_tpg;
4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4022 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; 4022 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
4023 unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; 4023 unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4025 u32 off = 8; /* off into first Full Status descriptor */ 4025 u32 off = 8; /* off into first Full Status descriptor */
4026 int format_code = 0; 4026 int format_code = 0;
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
4174 4174
4175int core_scsi3_emulate_pr(struct se_cmd *cmd) 4175int core_scsi3_emulate_pr(struct se_cmd *cmd)
4176{ 4176{
4177 unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; 4177 unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
4178 struct se_device *dev = cmd->se_dev; 4178 struct se_device *dev = cmd->se_dev;
4179 /* 4179 /*
4180 * Following spc2r20 5.5.1 Reservations overview: 4180 * Following spc2r20 5.5.1 Reservations overview:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 44a79a5c6d32..ecfe889cb0ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -72,7 +72,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
72 phv->phv_host_id = host_id; 72 phv->phv_host_id = host_id;
73 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 73 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
74 74
75 hba->hba_ptr = (void *)phv; 75 hba->hba_ptr = phv;
76 76
77 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 77 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
78 " Generic Target Core Stack %s\n", hba->hba_id, 78 " Generic Target Core Stack %s\n", hba->hba_id,
@@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list(
355 pdv->pdv_sd = sd; 355 pdv->pdv_sd = sd;
356 356
357 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 357 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
358 se_dev, dev_flags, (void *)pdv, 358 se_dev, dev_flags, pdv,
359 &dev_limits, NULL, NULL); 359 &dev_limits, NULL, NULL);
360 if (!(dev)) { 360 if (!(dev)) {
361 pdv->pdv_sd = NULL; 361 pdv->pdv_sd = NULL;
@@ -394,7 +394,7 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
394 pdv->pdv_se_hba = hba; 394 pdv->pdv_se_hba = hba;
395 395
396 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); 396 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
397 return (void *)pdv; 397 return pdv;
398} 398}
399 399
400/* 400/*
@@ -697,7 +697,7 @@ static int pscsi_transport_complete(struct se_task *task)
697 697
698 if (task->task_se_cmd->se_deve->lun_flags & 698 if (task->task_se_cmd->se_deve->lun_flags &
699 TRANSPORT_LUNFLAGS_READ_ONLY) { 699 TRANSPORT_LUNFLAGS_READ_ONLY) {
700 unsigned char *buf = task->task_se_cmd->t_task->t_task_buf; 700 unsigned char *buf = task->task_se_cmd->t_task.t_task_buf;
701 701
702 if (cdb[0] == MODE_SENSE_10) { 702 if (cdb[0] == MODE_SENSE_10) {
703 if (!(buf[3] & 0x80)) 703 if (!(buf[3] & 0x80))
@@ -763,7 +763,7 @@ static struct se_task *
763pscsi_alloc_task(struct se_cmd *cmd) 763pscsi_alloc_task(struct se_cmd *cmd)
764{ 764{
765 struct pscsi_plugin_task *pt; 765 struct pscsi_plugin_task *pt;
766 unsigned char *cdb = cmd->t_task->t_task_cdb; 766 unsigned char *cdb = cmd->t_task.t_task_cdb;
767 767
768 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); 768 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
769 if (!pt) { 769 if (!pt) {
@@ -776,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
776 * allocate the extended CDB buffer for per struct se_task context 776 * allocate the extended CDB buffer for per struct se_task context
777 * pt->pscsi_cdb now. 777 * pt->pscsi_cdb now.
778 */ 778 */
779 if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) { 779 if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) {
780 780
781 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); 781 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
782 if (!(pt->pscsi_cdb)) { 782 if (!(pt->pscsi_cdb)) {
@@ -812,7 +812,7 @@ static inline void pscsi_blk_init_request(
812 * also set the end_io_data pointer.to struct se_task. 812 * also set the end_io_data pointer.to struct se_task.
813 */ 813 */
814 req->end_io = pscsi_req_done; 814 req->end_io = pscsi_req_done;
815 req->end_io_data = (void *)task; 815 req->end_io_data = task;
816 /* 816 /*
817 * Load the referenced struct se_task's SCSI CDB into 817 * Load the referenced struct se_task's SCSI CDB into
818 * include/linux/blkdev.h:struct request->cmd 818 * include/linux/blkdev.h:struct request->cmd
@@ -822,7 +822,7 @@ static inline void pscsi_blk_init_request(
822 /* 822 /*
823 * Setup pointer for outgoing sense data. 823 * Setup pointer for outgoing sense data.
824 */ 824 */
825 req->sense = (void *)&pt->pscsi_sense[0]; 825 req->sense = &pt->pscsi_sense[0];
826 req->sense_len = 0; 826 req->sense_len = 0;
827} 827}
828 828
@@ -889,7 +889,7 @@ static void pscsi_free_task(struct se_task *task)
889 * Release the extended CDB allocation from pscsi_alloc_task() 889 * Release the extended CDB allocation from pscsi_alloc_task()
890 * if one exists. 890 * if one exists.
891 */ 891 */
892 if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) 892 if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb)
893 kfree(pt->pscsi_cdb); 893 kfree(pt->pscsi_cdb);
894 /* 894 /*
895 * We do not release the bio(s) here associated with this task, as 895 * We do not release the bio(s) here associated with this task, as
@@ -1266,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
1266 return 0; 1266 return 0;
1267 1267
1268 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, 1268 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
1269 pt->pscsi_req, cmd->t_task->t_task_buf, 1269 pt->pscsi_req, cmd->t_task.t_task_buf,
1270 task->task_size, GFP_KERNEL); 1270 task->task_size, GFP_KERNEL);
1271 if (ret < 0) { 1271 if (ret < 0) {
1272 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); 1272 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index fbf06c3994fd..384a8e2083e3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -66,7 +66,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
66 66
67 rd_host->rd_host_id = host_id; 67 rd_host->rd_host_id = host_id;
68 68
69 hba->hba_ptr = (void *) rd_host; 69 hba->hba_ptr = rd_host;
70 70
71 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 71 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
72 " Generic Target Core Stack %s\n", hba->hba_id, 72 " Generic Target Core Stack %s\n", hba->hba_id,
@@ -271,7 +271,7 @@ static struct se_device *rd_create_virtdevice(
271 271
272 dev = transport_add_device_to_core_hba(hba, 272 dev = transport_add_device_to_core_hba(hba,
273 (rd_dev->rd_direct) ? &rd_dr_template : 273 (rd_dev->rd_direct) ? &rd_dr_template :
274 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, 274 &rd_mcp_template, se_dev, dev_flags, rd_dev,
275 &dev_limits, prod, rev); 275 &dev_limits, prod, rev);
276 if (!(dev)) 276 if (!(dev))
277 goto fail; 277 goto fail;
@@ -336,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd)
336 printk(KERN_ERR "Unable to allocate struct rd_request\n"); 336 printk(KERN_ERR "Unable to allocate struct rd_request\n");
337 return NULL; 337 return NULL;
338 } 338 }
339 rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr; 339 rd_req->rd_dev = cmd->se_dev->dev_ptr;
340 340
341 return &rd_req->rd_task; 341 return &rd_req->rd_task;
342} 342}
@@ -737,7 +737,7 @@ check_eot:
737 } 737 }
738 738
739out: 739out:
740 task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; 740 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
741#ifdef DEBUG_RAMDISK_DR 741#ifdef DEBUG_RAMDISK_DR
742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
743 *se_mem_cnt); 743 *se_mem_cnt);
@@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset(
819 } 819 }
820 820
821out: 821out:
822 task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; 822 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
823#ifdef DEBUG_RAMDISK_DR 823#ifdef DEBUG_RAMDISK_DR
824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
825 *se_mem_cnt); 825 *se_mem_cnt);
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
880 * across multiple struct se_task->task_sg[]. 880 * across multiple struct se_task->task_sg[].
881 */ 881 */
882 ret = transport_init_task_sg(task, 882 ret = transport_init_task_sg(task,
883 list_entry(cmd->t_task->t_mem_list->next, 883 list_first_entry(&cmd->t_task.t_mem_list,
884 struct se_mem, se_list), 884 struct se_mem, se_list),
885 task_offset); 885 task_offset);
886 if (ret <= 0) 886 if (ret <= 0)
887 return ret; 887 return ret;
888 888
889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, 889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
890 list_entry(cmd->t_task->t_mem_list->next, 890 list_first_entry(&cmd->t_task.t_mem_list,
891 struct se_mem, se_list), 891 struct se_mem, se_list),
892 out_se_mem, se_mem_cnt, task_offset_in); 892 out_se_mem, se_mem_cnt, task_offset_in);
893} 893}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 2f73749b8151..e1f99f75ac35 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -113,15 +113,14 @@ int core_tmr_lun_reset(
113 struct list_head *preempt_and_abort_list, 113 struct list_head *preempt_and_abort_list,
114 struct se_cmd *prout_cmd) 114 struct se_cmd *prout_cmd)
115{ 115{
116 struct se_cmd *cmd; 116 struct se_cmd *cmd, *tcmd;
117 struct se_queue_req *qr, *qr_tmp;
118 struct se_node_acl *tmr_nacl = NULL; 117 struct se_node_acl *tmr_nacl = NULL;
119 struct se_portal_group *tmr_tpg = NULL; 118 struct se_portal_group *tmr_tpg = NULL;
120 struct se_queue_obj *qobj = &dev->dev_queue_obj; 119 struct se_queue_obj *qobj = &dev->dev_queue_obj;
121 struct se_tmr_req *tmr_p, *tmr_pp; 120 struct se_tmr_req *tmr_p, *tmr_pp;
122 struct se_task *task, *task_tmp; 121 struct se_task *task, *task_tmp;
123 unsigned long flags; 122 unsigned long flags;
124 int fe_count, state, tas; 123 int fe_count, tas;
125 /* 124 /*
126 * TASK_ABORTED status bit, this is configurable via ConfigFS 125 * TASK_ABORTED status bit, this is configurable via ConfigFS
127 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page 126 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
@@ -179,14 +178,14 @@ int core_tmr_lun_reset(
179 continue; 178 continue;
180 spin_unlock(&dev->se_tmr_lock); 179 spin_unlock(&dev->se_tmr_lock);
181 180
182 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 181 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
183 if (!(atomic_read(&cmd->t_task->t_transport_active))) { 182 if (!(atomic_read(&cmd->t_task.t_transport_active))) {
184 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 183 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
185 spin_lock(&dev->se_tmr_lock); 184 spin_lock(&dev->se_tmr_lock);
186 continue; 185 continue;
187 } 186 }
188 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 187 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
189 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 188 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
190 spin_lock(&dev->se_tmr_lock); 189 spin_lock(&dev->se_tmr_lock);
191 continue; 190 continue;
192 } 191 }
@@ -194,7 +193,7 @@ int core_tmr_lun_reset(
194 " Response: 0x%02x, t_state: %d\n", 193 " Response: 0x%02x, t_state: %d\n",
195 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 194 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
196 tmr_p->function, tmr_p->response, cmd->t_state); 195 tmr_p->function, tmr_p->response, cmd->t_state);
197 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 196 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
198 197
199 transport_cmd_finish_abort_tmr(cmd); 198 transport_cmd_finish_abort_tmr(cmd);
200 spin_lock(&dev->se_tmr_lock); 199 spin_lock(&dev->se_tmr_lock);
@@ -230,12 +229,6 @@ int core_tmr_lun_reset(
230 } 229 }
231 cmd = task->task_se_cmd; 230 cmd = task->task_se_cmd;
232 231
233 if (!cmd->t_task) {
234 printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:"
235 " %p ITT: 0x%08x\n", task, cmd,
236 cmd->se_tfo->get_task_tag(cmd));
237 continue;
238 }
239 /* 232 /*
240 * For PREEMPT_AND_ABORT usage, only process commands 233 * For PREEMPT_AND_ABORT usage, only process commands
241 * with a matching reservation key. 234 * with a matching reservation key.
@@ -254,38 +247,38 @@ int core_tmr_lun_reset(
254 atomic_set(&task->task_state_active, 0); 247 atomic_set(&task->task_state_active, 0);
255 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 248 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
256 249
257 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 250 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
258 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" 251 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
259 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" 252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
260 "def_t_state: %d/%d cdb: 0x%02x\n", 253 "def_t_state: %d/%d cdb: 0x%02x\n",
261 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
262 cmd->se_tfo->get_task_tag(cmd), 0, 255 cmd->se_tfo->get_task_tag(cmd), 0,
263 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 256 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
264 cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]); 257 cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]);
265 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 258 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
266 " t_task_cdbs: %d t_task_cdbs_left: %d" 259 " t_task_cdbs: %d t_task_cdbs_left: %d"
267 " t_task_cdbs_sent: %d -- t_transport_active: %d" 260 " t_task_cdbs_sent: %d -- t_transport_active: %d"
268 " t_transport_stop: %d t_transport_sent: %d\n", 261 " t_transport_stop: %d t_transport_sent: %d\n",
269 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 262 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
270 cmd->t_task->t_task_cdbs, 263 cmd->t_task.t_task_cdbs,
271 atomic_read(&cmd->t_task->t_task_cdbs_left), 264 atomic_read(&cmd->t_task.t_task_cdbs_left),
272 atomic_read(&cmd->t_task->t_task_cdbs_sent), 265 atomic_read(&cmd->t_task.t_task_cdbs_sent),
273 atomic_read(&cmd->t_task->t_transport_active), 266 atomic_read(&cmd->t_task.t_transport_active),
274 atomic_read(&cmd->t_task->t_transport_stop), 267 atomic_read(&cmd->t_task.t_transport_stop),
275 atomic_read(&cmd->t_task->t_transport_sent)); 268 atomic_read(&cmd->t_task.t_transport_sent));
276 269
277 if (atomic_read(&task->task_active)) { 270 if (atomic_read(&task->task_active)) {
278 atomic_set(&task->task_stop, 1); 271 atomic_set(&task->task_stop, 1);
279 spin_unlock_irqrestore( 272 spin_unlock_irqrestore(
280 &cmd->t_task->t_state_lock, flags); 273 &cmd->t_task.t_state_lock, flags);
281 274
282 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" 275 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
283 " for dev: %p\n", task, dev); 276 " for dev: %p\n", task, dev);
284 wait_for_completion(&task->task_stop_comp); 277 wait_for_completion(&task->task_stop_comp);
285 DEBUG_LR("LUN_RESET Completed task: %p shutdown for" 278 DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
286 " dev: %p\n", task, dev); 279 " dev: %p\n", task, dev);
287 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 280 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
288 atomic_dec(&cmd->t_task->t_task_cdbs_left); 281 atomic_dec(&cmd->t_task.t_task_cdbs_left);
289 282
290 atomic_set(&task->task_active, 0); 283 atomic_set(&task->task_active, 0);
291 atomic_set(&task->task_stop, 0); 284 atomic_set(&task->task_stop, 0);
@@ -295,24 +288,24 @@ int core_tmr_lun_reset(
295 } 288 }
296 __transport_stop_task_timer(task, &flags); 289 __transport_stop_task_timer(task, &flags);
297 290
298 if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { 291 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
299 spin_unlock_irqrestore( 292 spin_unlock_irqrestore(
300 &cmd->t_task->t_state_lock, flags); 293 &cmd->t_task.t_state_lock, flags);
301 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" 294 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
302 " t_task_cdbs_ex_left: %d\n", task, dev, 295 " t_task_cdbs_ex_left: %d\n", task, dev,
303 atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); 296 atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
304 297
305 spin_lock_irqsave(&dev->execute_task_lock, flags); 298 spin_lock_irqsave(&dev->execute_task_lock, flags);
306 continue; 299 continue;
307 } 300 }
308 fe_count = atomic_read(&cmd->t_task->t_fe_count); 301 fe_count = atomic_read(&cmd->t_task.t_fe_count);
309 302
310 if (atomic_read(&cmd->t_task->t_transport_active)) { 303 if (atomic_read(&cmd->t_task.t_transport_active)) {
311 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
312 " task: %p, t_fe_count: %d dev: %p\n", task, 305 " task: %p, t_fe_count: %d dev: %p\n", task,
313 fe_count, dev); 306 fe_count, dev);
314 atomic_set(&cmd->t_task->t_transport_aborted, 1); 307 atomic_set(&cmd->t_task.t_transport_aborted, 1);
315 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 308 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
316 flags); 309 flags);
317 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 310 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
318 311
@@ -321,8 +314,8 @@ int core_tmr_lun_reset(
321 } 314 }
322 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
323 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 316 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
324 atomic_set(&cmd->t_task->t_transport_aborted, 1); 317 atomic_set(&cmd->t_task.t_transport_aborted, 1);
325 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 318 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
326 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 319 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
327 320
328 spin_lock_irqsave(&dev->execute_task_lock, flags); 321 spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -337,20 +330,7 @@ int core_tmr_lun_reset(
337 * reference, otherwise the struct se_cmd is released. 330 * reference, otherwise the struct se_cmd is released.
338 */ 331 */
339 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 332 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
340 list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { 333 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
341 cmd = (struct se_cmd *)qr->cmd;
342 if (!(cmd)) {
343 /*
344 * Skip these for non PREEMPT_AND_ABORT usage..
345 */
346 if (preempt_and_abort_list != NULL)
347 continue;
348
349 atomic_dec(&qobj->queue_cnt);
350 list_del(&qr->qr_list);
351 kfree(qr);
352 continue;
353 }
354 /* 334 /*
355 * For PREEMPT_AND_ABORT usage, only process commands 335 * For PREEMPT_AND_ABORT usage, only process commands
356 * with a matching reservation key. 336 * with a matching reservation key.
@@ -365,18 +345,15 @@ int core_tmr_lun_reset(
365 if (prout_cmd == cmd) 345 if (prout_cmd == cmd)
366 continue; 346 continue;
367 347
368 atomic_dec(&cmd->t_task->t_transport_queue_active); 348 atomic_dec(&cmd->t_task.t_transport_queue_active);
369 atomic_dec(&qobj->queue_cnt); 349 atomic_dec(&qobj->queue_cnt);
370 list_del(&qr->qr_list); 350 list_del(&cmd->se_queue_node);
371 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 351 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
372 352
373 state = qr->state;
374 kfree(qr);
375
376 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 353 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
377 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
378 "Preempt" : "", cmd, state, 355 "Preempt" : "", cmd, cmd->t_state,
379 atomic_read(&cmd->t_task->t_fe_count)); 356 atomic_read(&cmd->t_task.t_fe_count));
380 /* 357 /*
381 * Signal that the command has failed via cmd->se_cmd_flags, 358 * Signal that the command has failed via cmd->se_cmd_flags,
382 * and call TFO->new_cmd_failure() to wakeup any fabric 359 * and call TFO->new_cmd_failure() to wakeup any fabric
@@ -388,7 +365,7 @@ int core_tmr_lun_reset(
388 transport_new_cmd_failure(cmd); 365 transport_new_cmd_failure(cmd);
389 366
390 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 367 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
391 atomic_read(&cmd->t_task->t_fe_count)); 368 atomic_read(&cmd->t_task.t_fe_count));
392 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 369 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
393 } 370 }
394 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 371 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6f5d4dfa14e8..d0cd6016d3b5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -201,7 +201,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
201typedef int (*map_func_t)(struct se_task *, u32); 201typedef int (*map_func_t)(struct se_task *, u32);
202 202
203static int transport_generic_write_pending(struct se_cmd *); 203static int transport_generic_write_pending(struct se_cmd *);
204static int transport_processing_thread(void *); 204static int transport_processing_thread(void *param);
205static int __transport_execute_tasks(struct se_device *dev); 205static int __transport_execute_tasks(struct se_device *dev);
206static void transport_complete_task_attr(struct se_cmd *cmd); 206static void transport_complete_task_attr(struct se_cmd *cmd);
207static void transport_direct_request_timeout(struct se_cmd *cmd); 207static void transport_direct_request_timeout(struct se_cmd *cmd);
@@ -215,9 +215,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
215static int transport_generic_remove(struct se_cmd *cmd, 215static int transport_generic_remove(struct se_cmd *cmd,
216 int release_to_pool, int session_reinstatement); 216 int release_to_pool, int session_reinstatement);
217static int transport_get_sectors(struct se_cmd *cmd); 217static int transport_get_sectors(struct se_cmd *cmd);
218static struct list_head *transport_init_se_mem_list(void);
219static int transport_map_sg_to_mem(struct se_cmd *cmd, 218static int transport_map_sg_to_mem(struct se_cmd *cmd,
220 struct list_head *se_mem_list, void *in_mem, 219 struct list_head *se_mem_list, struct scatterlist *sgl,
221 u32 *se_mem_cnt); 220 u32 *se_mem_cnt);
222static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, 221static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
223 unsigned char *dst, struct list_head *se_mem_list); 222 unsigned char *dst, struct list_head *se_mem_list);
@@ -574,7 +573,7 @@ void transport_deregister_session(struct se_session *se_sess)
574EXPORT_SYMBOL(transport_deregister_session); 573EXPORT_SYMBOL(transport_deregister_session);
575 574
576/* 575/*
577 * Called with cmd->t_task->t_state_lock held. 576 * Called with cmd->t_task.t_state_lock held.
578 */ 577 */
579static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 578static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
580{ 579{
@@ -582,10 +581,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
582 struct se_task *task; 581 struct se_task *task;
583 unsigned long flags; 582 unsigned long flags;
584 583
585 if (!cmd->t_task) 584 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
586 return;
587
588 list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
589 dev = task->se_dev; 585 dev = task->se_dev;
590 if (!(dev)) 586 if (!(dev))
591 continue; 587 continue;
@@ -603,7 +599,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
603 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 599 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
604 600
605 atomic_set(&task->task_state_active, 0); 601 atomic_set(&task->task_state_active, 0);
606 atomic_dec(&cmd->t_task->t_task_cdbs_ex_left); 602 atomic_dec(&cmd->t_task.t_task_cdbs_ex_left);
607 } 603 }
608} 604}
609 605
@@ -622,32 +618,32 @@ static int transport_cmd_check_stop(
622{ 618{
623 unsigned long flags; 619 unsigned long flags;
624 620
625 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 621 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
626 /* 622 /*
627 * Determine if IOCTL context caller in requesting the stopping of this 623 * Determine if IOCTL context caller in requesting the stopping of this
628 * command for LUN shutdown purposes. 624 * command for LUN shutdown purposes.
629 */ 625 */
630 if (atomic_read(&cmd->t_task->transport_lun_stop)) { 626 if (atomic_read(&cmd->t_task.transport_lun_stop)) {
631 DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)" 627 DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)"
632 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 628 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
633 cmd->se_tfo->get_task_tag(cmd)); 629 cmd->se_tfo->get_task_tag(cmd));
634 630
635 cmd->deferred_t_state = cmd->t_state; 631 cmd->deferred_t_state = cmd->t_state;
636 cmd->t_state = TRANSPORT_DEFERRED_CMD; 632 cmd->t_state = TRANSPORT_DEFERRED_CMD;
637 atomic_set(&cmd->t_task->t_transport_active, 0); 633 atomic_set(&cmd->t_task.t_transport_active, 0);
638 if (transport_off == 2) 634 if (transport_off == 2)
639 transport_all_task_dev_remove_state(cmd); 635 transport_all_task_dev_remove_state(cmd);
640 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 636 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
641 637
642 complete(&cmd->t_task->transport_lun_stop_comp); 638 complete(&cmd->t_task.transport_lun_stop_comp);
643 return 1; 639 return 1;
644 } 640 }
645 /* 641 /*
646 * Determine if frontend context caller is requesting the stopping of 642 * Determine if frontend context caller is requesting the stopping of
647 * this command for frontend exceptions. 643 * this command for frontend exceptions.
648 */ 644 */
649 if (atomic_read(&cmd->t_task->t_transport_stop)) { 645 if (atomic_read(&cmd->t_task.t_transport_stop)) {
650 DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) ==" 646 DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) =="
651 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 647 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
652 cmd->se_tfo->get_task_tag(cmd)); 648 cmd->se_tfo->get_task_tag(cmd));
653 649
@@ -662,13 +658,13 @@ static int transport_cmd_check_stop(
662 */ 658 */
663 if (transport_off == 2) 659 if (transport_off == 2)
664 cmd->se_lun = NULL; 660 cmd->se_lun = NULL;
665 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 661 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
666 662
667 complete(&cmd->t_task->t_transport_stop_comp); 663 complete(&cmd->t_task.t_transport_stop_comp);
668 return 1; 664 return 1;
669 } 665 }
670 if (transport_off) { 666 if (transport_off) {
671 atomic_set(&cmd->t_task->t_transport_active, 0); 667 atomic_set(&cmd->t_task.t_transport_active, 0);
672 if (transport_off == 2) { 668 if (transport_off == 2) {
673 transport_all_task_dev_remove_state(cmd); 669 transport_all_task_dev_remove_state(cmd);
674 /* 670 /*
@@ -683,18 +679,18 @@ static int transport_cmd_check_stop(
683 */ 679 */
684 if (cmd->se_tfo->check_stop_free != NULL) { 680 if (cmd->se_tfo->check_stop_free != NULL) {
685 spin_unlock_irqrestore( 681 spin_unlock_irqrestore(
686 &cmd->t_task->t_state_lock, flags); 682 &cmd->t_task.t_state_lock, flags);
687 683
688 cmd->se_tfo->check_stop_free(cmd); 684 cmd->se_tfo->check_stop_free(cmd);
689 return 1; 685 return 1;
690 } 686 }
691 } 687 }
692 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 688 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
693 689
694 return 0; 690 return 0;
695 } else if (t_state) 691 } else if (t_state)
696 cmd->t_state = t_state; 692 cmd->t_state = t_state;
697 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 693 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
698 694
699 return 0; 695 return 0;
700} 696}
@@ -712,21 +708,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
712 if (!lun) 708 if (!lun)
713 return; 709 return;
714 710
715 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 711 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
716 if (!(atomic_read(&cmd->t_task->transport_dev_active))) { 712 if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
717 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 713 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
718 goto check_lun; 714 goto check_lun;
719 } 715 }
720 atomic_set(&cmd->t_task->transport_dev_active, 0); 716 atomic_set(&cmd->t_task.transport_dev_active, 0);
721 transport_all_task_dev_remove_state(cmd); 717 transport_all_task_dev_remove_state(cmd);
722 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 718 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
723 719
724 720
725check_lun: 721check_lun:
726 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 722 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
727 if (atomic_read(&cmd->t_task->transport_lun_active)) { 723 if (atomic_read(&cmd->t_task.transport_lun_active)) {
728 list_del(&cmd->se_lun_list); 724 list_del(&cmd->se_lun_node);
729 atomic_set(&cmd->t_task->transport_lun_active, 0); 725 atomic_set(&cmd->t_task.transport_lun_active, 0);
730#if 0 726#if 0
731 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" 727 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
732 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 728 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
@@ -737,7 +733,7 @@ check_lun:
737 733
738void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 734void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
739{ 735{
740 transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); 736 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
741 transport_lun_remove_cmd(cmd); 737 transport_lun_remove_cmd(cmd);
742 738
743 if (transport_cmd_check_stop_to_fabric(cmd)) 739 if (transport_cmd_check_stop_to_fabric(cmd))
@@ -748,7 +744,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
748 744
749void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) 745void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
750{ 746{
751 transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); 747 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
752 748
753 if (transport_cmd_check_stop_to_fabric(cmd)) 749 if (transport_cmd_check_stop_to_fabric(cmd))
754 return; 750 return;
@@ -756,50 +752,36 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
756 transport_generic_remove(cmd, 0, 0); 752 transport_generic_remove(cmd, 0, 0);
757} 753}
758 754
759static int transport_add_cmd_to_queue( 755static void transport_add_cmd_to_queue(
760 struct se_cmd *cmd, 756 struct se_cmd *cmd,
761 int t_state) 757 int t_state)
762{ 758{
763 struct se_device *dev = cmd->se_dev; 759 struct se_device *dev = cmd->se_dev;
764 struct se_queue_obj *qobj = &dev->dev_queue_obj; 760 struct se_queue_obj *qobj = &dev->dev_queue_obj;
765 struct se_queue_req *qr;
766 unsigned long flags; 761 unsigned long flags;
767 762
768 qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); 763 INIT_LIST_HEAD(&cmd->se_queue_node);
769 if (!(qr)) {
770 printk(KERN_ERR "Unable to allocate memory for"
771 " struct se_queue_req\n");
772 return -ENOMEM;
773 }
774 INIT_LIST_HEAD(&qr->qr_list);
775
776 qr->cmd = cmd;
777 qr->state = t_state;
778 764
779 if (t_state) { 765 if (t_state) {
780 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 766 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
781 cmd->t_state = t_state; 767 cmd->t_state = t_state;
782 atomic_set(&cmd->t_task->t_transport_active, 1); 768 atomic_set(&cmd->t_task.t_transport_active, 1);
783 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 769 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
784 } 770 }
785 771
786 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 772 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
787 list_add_tail(&qr->qr_list, &qobj->qobj_list); 773 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
788 atomic_inc(&cmd->t_task->t_transport_queue_active); 774 atomic_inc(&cmd->t_task.t_transport_queue_active);
789 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 775 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
790 776
791 atomic_inc(&qobj->queue_cnt); 777 atomic_inc(&qobj->queue_cnt);
792 wake_up_interruptible(&qobj->thread_wq); 778 wake_up_interruptible(&qobj->thread_wq);
793 return 0;
794} 779}
795 780
796/* 781static struct se_cmd *
797 * Called with struct se_queue_obj->cmd_queue_lock held. 782transport_get_cmd_from_queue(struct se_queue_obj *qobj)
798 */
799static struct se_queue_req *
800transport_get_qr_from_queue(struct se_queue_obj *qobj)
801{ 783{
802 struct se_queue_req *qr; 784 struct se_cmd *cmd;
803 unsigned long flags; 785 unsigned long flags;
804 786
805 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 787 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -807,47 +789,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
807 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 789 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
808 return NULL; 790 return NULL;
809 } 791 }
792 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
810 793
811 list_for_each_entry(qr, &qobj->qobj_list, qr_list) 794 atomic_dec(&cmd->t_task.t_transport_queue_active);
812 break;
813 795
814 if (qr->cmd) 796 list_del(&cmd->se_queue_node);
815 atomic_dec(&qr->cmd->t_task->t_transport_queue_active);
816
817 list_del(&qr->qr_list);
818 atomic_dec(&qobj->queue_cnt); 797 atomic_dec(&qobj->queue_cnt);
819 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 798 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
820 799
821 return qr; 800 return cmd;
822} 801}
823 802
824static void transport_remove_cmd_from_queue(struct se_cmd *cmd, 803static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
825 struct se_queue_obj *qobj) 804 struct se_queue_obj *qobj)
826{ 805{
827 struct se_queue_req *qr = NULL, *qr_p = NULL; 806 struct se_cmd *t;
828 unsigned long flags; 807 unsigned long flags;
829 808
830 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 809 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
831 if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) { 810 if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) {
832 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 811 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
833 return; 812 return;
834 } 813 }
835 814
836 list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { 815 list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
837 if (qr->cmd != cmd) 816 if (t == cmd) {
838 continue; 817 atomic_dec(&cmd->t_task.t_transport_queue_active);
839 818 atomic_dec(&qobj->queue_cnt);
840 atomic_dec(&qr->cmd->t_task->t_transport_queue_active); 819 list_del(&cmd->se_queue_node);
841 atomic_dec(&qobj->queue_cnt); 820 break;
842 list_del(&qr->qr_list); 821 }
843 kfree(qr);
844 }
845 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 822 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
846 823
847 if (atomic_read(&cmd->t_task->t_transport_queue_active)) { 824 if (atomic_read(&cmd->t_task.t_transport_queue_active)) {
848 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", 825 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
849 cmd->se_tfo->get_task_tag(cmd), 826 cmd->se_tfo->get_task_tag(cmd),
850 atomic_read(&cmd->t_task->t_transport_queue_active)); 827 atomic_read(&cmd->t_task.t_transport_queue_active));
851 } 828 }
852} 829}
853 830
@@ -857,7 +834,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
857 */ 834 */
858void transport_complete_sync_cache(struct se_cmd *cmd, int good) 835void transport_complete_sync_cache(struct se_cmd *cmd, int good)
859{ 836{
860 struct se_task *task = list_entry(cmd->t_task->t_task_list.next, 837 struct se_task *task = list_entry(cmd->t_task.t_task_list.next,
861 struct se_task, t_list); 838 struct se_task, t_list);
862 839
863 if (good) { 840 if (good) {
@@ -887,12 +864,12 @@ void transport_complete_task(struct se_task *task, int success)
887 unsigned long flags; 864 unsigned long flags;
888#if 0 865#if 0
889 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, 866 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
890 cmd->t_task->t_task_cdb[0], dev); 867 cmd->t_task.t_task_cdb[0], dev);
891#endif 868#endif
892 if (dev) 869 if (dev)
893 atomic_inc(&dev->depth_left); 870 atomic_inc(&dev->depth_left);
894 871
895 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 872 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
896 atomic_set(&task->task_active, 0); 873 atomic_set(&task->task_active, 0);
897 874
898 /* 875 /*
@@ -914,14 +891,14 @@ void transport_complete_task(struct se_task *task, int success)
914 */ 891 */
915 if (atomic_read(&task->task_stop)) { 892 if (atomic_read(&task->task_stop)) {
916 /* 893 /*
917 * Decrement cmd->t_task->t_se_count if this task had 894 * Decrement cmd->t_task.t_se_count if this task had
918 * previously thrown its timeout exception handler. 895 * previously thrown its timeout exception handler.
919 */ 896 */
920 if (atomic_read(&task->task_timeout)) { 897 if (atomic_read(&task->task_timeout)) {
921 atomic_dec(&cmd->t_task->t_se_count); 898 atomic_dec(&cmd->t_task.t_se_count);
922 atomic_set(&task->task_timeout, 0); 899 atomic_set(&task->task_timeout, 0);
923 } 900 }
924 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 901 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
925 902
926 complete(&task->task_stop_comp); 903 complete(&task->task_stop_comp);
927 return; 904 return;
@@ -933,33 +910,33 @@ void transport_complete_task(struct se_task *task, int success)
933 */ 910 */
934 if (atomic_read(&task->task_timeout)) { 911 if (atomic_read(&task->task_timeout)) {
935 if (!(atomic_dec_and_test( 912 if (!(atomic_dec_and_test(
936 &cmd->t_task->t_task_cdbs_timeout_left))) { 913 &cmd->t_task.t_task_cdbs_timeout_left))) {
937 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 914 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
938 flags); 915 flags);
939 return; 916 return;
940 } 917 }
941 t_state = TRANSPORT_COMPLETE_TIMEOUT; 918 t_state = TRANSPORT_COMPLETE_TIMEOUT;
942 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 919 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
943 920
944 transport_add_cmd_to_queue(cmd, t_state); 921 transport_add_cmd_to_queue(cmd, t_state);
945 return; 922 return;
946 } 923 }
947 atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left); 924 atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left);
948 925
949 /* 926 /*
950 * Decrement the outstanding t_task_cdbs_left count. The last 927 * Decrement the outstanding t_task_cdbs_left count. The last
951 * struct se_task from struct se_cmd will complete itself into the 928 * struct se_task from struct se_cmd will complete itself into the
952 * device queue depending upon int success. 929 * device queue depending upon int success.
953 */ 930 */
954 if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { 931 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
955 if (!success) 932 if (!success)
956 cmd->t_task->t_tasks_failed = 1; 933 cmd->t_task.t_tasks_failed = 1;
957 934
958 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 935 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
959 return; 936 return;
960 } 937 }
961 938
962 if (!success || cmd->t_task->t_tasks_failed) { 939 if (!success || cmd->t_task.t_tasks_failed) {
963 t_state = TRANSPORT_COMPLETE_FAILURE; 940 t_state = TRANSPORT_COMPLETE_FAILURE;
964 if (!task->task_error_status) { 941 if (!task->task_error_status) {
965 task->task_error_status = 942 task->task_error_status =
@@ -968,10 +945,10 @@ void transport_complete_task(struct se_task *task, int success)
968 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 945 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
969 } 946 }
970 } else { 947 } else {
971 atomic_set(&cmd->t_task->t_transport_complete, 1); 948 atomic_set(&cmd->t_task.t_transport_complete, 1);
972 t_state = TRANSPORT_COMPLETE_OK; 949 t_state = TRANSPORT_COMPLETE_OK;
973 } 950 }
974 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 951 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
975 952
976 transport_add_cmd_to_queue(cmd, t_state); 953 transport_add_cmd_to_queue(cmd, t_state);
977} 954}
@@ -1064,8 +1041,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1064 struct se_task *task; 1041 struct se_task *task;
1065 unsigned long flags; 1042 unsigned long flags;
1066 1043
1067 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 1044 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
1068 list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { 1045 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
1069 dev = task->se_dev; 1046 dev = task->se_dev;
1070 1047
1071 if (atomic_read(&task->task_state_active)) 1048 if (atomic_read(&task->task_state_active))
@@ -1081,17 +1058,17 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1081 1058
1082 spin_unlock(&dev->execute_task_lock); 1059 spin_unlock(&dev->execute_task_lock);
1083 } 1060 }
1084 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 1061 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
1085} 1062}
1086 1063
1087static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 1064static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1088{ 1065{
1089 struct se_device *dev = cmd->se_lun->lun_se_dev; 1066 struct se_device *dev = cmd->se_dev;
1090 struct se_task *task, *task_prev = NULL; 1067 struct se_task *task, *task_prev = NULL;
1091 unsigned long flags; 1068 unsigned long flags;
1092 1069
1093 spin_lock_irqsave(&dev->execute_task_lock, flags); 1070 spin_lock_irqsave(&dev->execute_task_lock, flags);
1094 list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { 1071 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
1095 if (atomic_read(&task->task_execute_queue)) 1072 if (atomic_read(&task->task_execute_queue))
1096 continue; 1073 continue;
1097 /* 1074 /*
@@ -1184,19 +1161,15 @@ void transport_dump_dev_state(
1184 */ 1161 */
1185static void transport_release_all_cmds(struct se_device *dev) 1162static void transport_release_all_cmds(struct se_device *dev)
1186{ 1163{
1187 struct se_cmd *cmd = NULL; 1164 struct se_cmd *cmd, *tcmd;
1188 struct se_queue_req *qr = NULL, *qr_p = NULL;
1189 int bug_out = 0, t_state; 1165 int bug_out = 0, t_state;
1190 unsigned long flags; 1166 unsigned long flags;
1191 1167
1192 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); 1168 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1193 list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list, 1169 list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
1194 qr_list) { 1170 se_queue_node) {
1195 1171 t_state = cmd->t_state;
1196 cmd = qr->cmd; 1172 list_del(&cmd->se_queue_node);
1197 t_state = qr->state;
1198 list_del(&qr->qr_list);
1199 kfree(qr);
1200 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, 1173 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1201 flags); 1174 flags);
1202 1175
@@ -1548,7 +1521,7 @@ struct se_device *transport_add_device_to_core_hba(
1548 transport_init_queue_obj(&dev->dev_queue_obj); 1521 transport_init_queue_obj(&dev->dev_queue_obj);
1549 dev->dev_flags = device_flags; 1522 dev->dev_flags = device_flags;
1550 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 1523 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1551 dev->dev_ptr = (void *) transport_dev; 1524 dev->dev_ptr = transport_dev;
1552 dev->se_hba = hba; 1525 dev->se_hba = hba;
1553 dev->se_sub_dev = se_dev; 1526 dev->se_sub_dev = se_dev;
1554 dev->transport = transport; 1527 dev->transport = transport;
@@ -1684,7 +1657,7 @@ transport_generic_get_task(struct se_cmd *cmd,
1684 enum dma_data_direction data_direction) 1657 enum dma_data_direction data_direction)
1685{ 1658{
1686 struct se_task *task; 1659 struct se_task *task;
1687 struct se_device *dev = cmd->se_lun->lun_se_dev; 1660 struct se_device *dev = cmd->se_dev;
1688 unsigned long flags; 1661 unsigned long flags;
1689 1662
1690 task = dev->transport->alloc_task(cmd); 1663 task = dev->transport->alloc_task(cmd);
@@ -1697,26 +1670,20 @@ transport_generic_get_task(struct se_cmd *cmd,
1697 INIT_LIST_HEAD(&task->t_execute_list); 1670 INIT_LIST_HEAD(&task->t_execute_list);
1698 INIT_LIST_HEAD(&task->t_state_list); 1671 INIT_LIST_HEAD(&task->t_state_list);
1699 init_completion(&task->task_stop_comp); 1672 init_completion(&task->task_stop_comp);
1700 task->task_no = cmd->t_task->t_tasks_no++; 1673 task->task_no = cmd->t_task.t_tasks_no++;
1701 task->task_se_cmd = cmd; 1674 task->task_se_cmd = cmd;
1702 task->se_dev = dev; 1675 task->se_dev = dev;
1703 task->task_data_direction = data_direction; 1676 task->task_data_direction = data_direction;
1704 1677
1705 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 1678 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
1706 list_add_tail(&task->t_list, &cmd->t_task->t_task_list); 1679 list_add_tail(&task->t_list, &cmd->t_task.t_task_list);
1707 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 1680 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
1708 1681
1709 return task; 1682 return task;
1710} 1683}
1711 1684
1712static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1685static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1713 1686
1714void transport_device_setup_cmd(struct se_cmd *cmd)
1715{
1716 cmd->se_dev = cmd->se_lun->lun_se_dev;
1717}
1718EXPORT_SYMBOL(transport_device_setup_cmd);
1719
1720/* 1687/*
1721 * Used by fabric modules containing a local struct se_cmd within their 1688 * Used by fabric modules containing a local struct se_cmd within their
1722 * fabric dependent per I/O descriptor. 1689 * fabric dependent per I/O descriptor.
@@ -1730,20 +1697,18 @@ void transport_init_se_cmd(
1730 int task_attr, 1697 int task_attr,
1731 unsigned char *sense_buffer) 1698 unsigned char *sense_buffer)
1732{ 1699{
1733 INIT_LIST_HEAD(&cmd->se_lun_list); 1700 INIT_LIST_HEAD(&cmd->se_lun_node);
1734 INIT_LIST_HEAD(&cmd->se_delayed_list); 1701 INIT_LIST_HEAD(&cmd->se_delayed_node);
1735 INIT_LIST_HEAD(&cmd->se_ordered_list); 1702 INIT_LIST_HEAD(&cmd->se_ordered_node);
1736 /*
1737 * Setup t_task pointer to t_task_backstore
1738 */
1739 cmd->t_task = &cmd->t_task_backstore;
1740 1703
1741 INIT_LIST_HEAD(&cmd->t_task->t_task_list); 1704 INIT_LIST_HEAD(&cmd->t_task.t_mem_list);
1742 init_completion(&cmd->t_task->transport_lun_fe_stop_comp); 1705 INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list);
1743 init_completion(&cmd->t_task->transport_lun_stop_comp); 1706 INIT_LIST_HEAD(&cmd->t_task.t_task_list);
1744 init_completion(&cmd->t_task->t_transport_stop_comp); 1707 init_completion(&cmd->t_task.transport_lun_fe_stop_comp);
1745 spin_lock_init(&cmd->t_task->t_state_lock); 1708 init_completion(&cmd->t_task.transport_lun_stop_comp);
1746 atomic_set(&cmd->t_task->transport_dev_active, 1); 1709 init_completion(&cmd->t_task.t_transport_stop_comp);
1710 spin_lock_init(&cmd->t_task.t_state_lock);
1711 atomic_set(&cmd->t_task.transport_dev_active, 1);
1747 1712
1748 cmd->se_tfo = tfo; 1713 cmd->se_tfo = tfo;
1749 cmd->se_sess = se_sess; 1714 cmd->se_sess = se_sess;
@@ -1760,7 +1725,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1760 * Check if SAM Task Attribute emulation is enabled for this 1725 * Check if SAM Task Attribute emulation is enabled for this
1761 * struct se_device storage object 1726 * struct se_device storage object
1762 */ 1727 */
1763 if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1728 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1764 return 0; 1729 return 0;
1765 1730
1766 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1731 if (cmd->sam_task_attr == MSG_ACA_TAG) {
@@ -1772,7 +1737,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1772 * Used to determine when ORDERED commands should go from 1737 * Used to determine when ORDERED commands should go from
1773 * Dormant to Active status. 1738 * Dormant to Active status.
1774 */ 1739 */
1775 cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id); 1740 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1776 smp_mb__after_atomic_inc(); 1741 smp_mb__after_atomic_inc();
1777 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1742 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1778 cmd->se_ordered_id, cmd->sam_task_attr, 1743 cmd->se_ordered_id, cmd->sam_task_attr,
@@ -1788,8 +1753,8 @@ void transport_free_se_cmd(
1788 /* 1753 /*
1789 * Check and free any extended CDB buffer that was allocated 1754 * Check and free any extended CDB buffer that was allocated
1790 */ 1755 */
1791 if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb) 1756 if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb)
1792 kfree(se_cmd->t_task->t_task_cdb); 1757 kfree(se_cmd->t_task.t_task_cdb);
1793} 1758}
1794EXPORT_SYMBOL(transport_free_se_cmd); 1759EXPORT_SYMBOL(transport_free_se_cmd);
1795 1760
@@ -1812,7 +1777,6 @@ int transport_generic_allocate_tasks(
1812 */ 1777 */
1813 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; 1778 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1814 1779
1815 transport_device_setup_cmd(cmd);
1816 /* 1780 /*
1817 * Ensure that the received CDB is less than the max (252 + 8) bytes 1781 * Ensure that the received CDB is less than the max (252 + 8) bytes
1818 * for VARIABLE_LENGTH_CMD 1782 * for VARIABLE_LENGTH_CMD
@@ -1828,26 +1792,26 @@ int transport_generic_allocate_tasks(
1828 * allocate the additional extended CDB buffer now.. Otherwise 1792 * allocate the additional extended CDB buffer now.. Otherwise
1829 * setup the pointer from __t_task_cdb to t_task_cdb. 1793 * setup the pointer from __t_task_cdb to t_task_cdb.
1830 */ 1794 */
1831 if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) { 1795 if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) {
1832 cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb), 1796 cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb),
1833 GFP_KERNEL); 1797 GFP_KERNEL);
1834 if (!(cmd->t_task->t_task_cdb)) { 1798 if (!(cmd->t_task.t_task_cdb)) {
1835 printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb" 1799 printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb"
1836 " %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n", 1800 " %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n",
1837 scsi_command_size(cdb), 1801 scsi_command_size(cdb),
1838 (unsigned long)sizeof(cmd->t_task->__t_task_cdb)); 1802 (unsigned long)sizeof(cmd->t_task.__t_task_cdb));
1839 return -ENOMEM; 1803 return -ENOMEM;
1840 } 1804 }
1841 } else 1805 } else
1842 cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0]; 1806 cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0];
1843 /* 1807 /*
1844 * Copy the original CDB into cmd->t_task. 1808 * Copy the original CDB into cmd->t_task.
1845 */ 1809 */
1846 memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb)); 1810 memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb));
1847 /* 1811 /*
1848 * Setup the received CDB based on SCSI defined opcodes and 1812 * Setup the received CDB based on SCSI defined opcodes and
1849 * perform unit attention, persistent reservations and ALUA 1813 * perform unit attention, persistent reservations and ALUA
1850 * checks for virtual device backends. The cmd->t_task->t_task_cdb 1814 * checks for virtual device backends. The cmd->t_task.t_task_cdb
1851 * pointer is expected to be setup before we reach this point. 1815 * pointer is expected to be setup before we reach this point.
1852 */ 1816 */
1853 ret = transport_generic_cmd_sequencer(cmd, cdb); 1817 ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1859,7 +1823,7 @@ int transport_generic_allocate_tasks(
1859 if (transport_check_alloc_task_attr(cmd) < 0) { 1823 if (transport_check_alloc_task_attr(cmd) < 0) {
1860 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1824 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1861 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1825 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1862 return -2; 1826 return -EINVAL;
1863 } 1827 }
1864 spin_lock(&cmd->se_lun->lun_sep_lock); 1828 spin_lock(&cmd->se_lun->lun_sep_lock);
1865 if (cmd->se_lun->lun_sep) 1829 if (cmd->se_lun->lun_sep)
@@ -1947,7 +1911,6 @@ int transport_generic_handle_tmr(
1947 * This is needed for early exceptions. 1911 * This is needed for early exceptions.
1948 */ 1912 */
1949 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; 1913 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1950 transport_device_setup_cmd(cmd);
1951 1914
1952 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); 1915 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
1953 return 0; 1916 return 0;
@@ -1973,9 +1936,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1973 /* 1936 /*
1974 * No tasks remain in the execution queue 1937 * No tasks remain in the execution queue
1975 */ 1938 */
1976 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 1939 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
1977 list_for_each_entry_safe(task, task_tmp, 1940 list_for_each_entry_safe(task, task_tmp,
1978 &cmd->t_task->t_task_list, t_list) { 1941 &cmd->t_task.t_task_list, t_list) {
1979 DEBUG_TS("task_no[%d] - Processing task %p\n", 1942 DEBUG_TS("task_no[%d] - Processing task %p\n",
1980 task->task_no, task); 1943 task->task_no, task);
1981 /* 1944 /*
@@ -1984,14 +1947,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1984 */ 1947 */
1985 if (!atomic_read(&task->task_sent) && 1948 if (!atomic_read(&task->task_sent) &&
1986 !atomic_read(&task->task_active)) { 1949 !atomic_read(&task->task_active)) {
1987 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 1950 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
1988 flags); 1951 flags);
1989 transport_remove_task_from_execute_queue(task, 1952 transport_remove_task_from_execute_queue(task,
1990 task->se_dev); 1953 task->se_dev);
1991 1954
1992 DEBUG_TS("task_no[%d] - Removed from execute queue\n", 1955 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
1993 task->task_no); 1956 task->task_no);
1994 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 1957 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
1995 continue; 1958 continue;
1996 } 1959 }
1997 1960
@@ -2001,7 +1964,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2001 */ 1964 */
2002 if (atomic_read(&task->task_active)) { 1965 if (atomic_read(&task->task_active)) {
2003 atomic_set(&task->task_stop, 1); 1966 atomic_set(&task->task_stop, 1);
2004 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 1967 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
2005 flags); 1968 flags);
2006 1969
2007 DEBUG_TS("task_no[%d] - Waiting to complete\n", 1970 DEBUG_TS("task_no[%d] - Waiting to complete\n",
@@ -2010,8 +1973,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2010 DEBUG_TS("task_no[%d] - Stopped successfully\n", 1973 DEBUG_TS("task_no[%d] - Stopped successfully\n",
2011 task->task_no); 1974 task->task_no);
2012 1975
2013 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 1976 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2014 atomic_dec(&cmd->t_task->t_task_cdbs_left); 1977 atomic_dec(&cmd->t_task.t_task_cdbs_left);
2015 1978
2016 atomic_set(&task->task_active, 0); 1979 atomic_set(&task->task_active, 0);
2017 atomic_set(&task->task_stop, 0); 1980 atomic_set(&task->task_stop, 0);
@@ -2022,7 +1985,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2022 1985
2023 __transport_stop_task_timer(task, &flags); 1986 __transport_stop_task_timer(task, &flags);
2024 } 1987 }
2025 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 1988 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2026 1989
2027 return ret; 1990 return ret;
2028} 1991}
@@ -2038,7 +2001,7 @@ static void transport_generic_request_failure(
2038{ 2001{
2039 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 2002 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2040 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 2003 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
2041 cmd->t_task->t_task_cdb[0]); 2004 cmd->t_task.t_task_cdb[0]);
2042 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" 2005 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2043 " %d/%d transport_error_status: %d\n", 2006 " %d/%d transport_error_status: %d\n",
2044 cmd->se_tfo->get_cmd_state(cmd), 2007 cmd->se_tfo->get_cmd_state(cmd),
@@ -2047,13 +2010,13 @@ static void transport_generic_request_failure(
2047 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" 2010 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2048 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 2011 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2049 " t_transport_active: %d t_transport_stop: %d" 2012 " t_transport_active: %d t_transport_stop: %d"
2050 " t_transport_sent: %d\n", cmd->t_task->t_task_cdbs, 2013 " t_transport_sent: %d\n", cmd->t_task.t_task_cdbs,
2051 atomic_read(&cmd->t_task->t_task_cdbs_left), 2014 atomic_read(&cmd->t_task.t_task_cdbs_left),
2052 atomic_read(&cmd->t_task->t_task_cdbs_sent), 2015 atomic_read(&cmd->t_task.t_task_cdbs_sent),
2053 atomic_read(&cmd->t_task->t_task_cdbs_ex_left), 2016 atomic_read(&cmd->t_task.t_task_cdbs_ex_left),
2054 atomic_read(&cmd->t_task->t_transport_active), 2017 atomic_read(&cmd->t_task.t_transport_active),
2055 atomic_read(&cmd->t_task->t_transport_stop), 2018 atomic_read(&cmd->t_task.t_transport_stop),
2056 atomic_read(&cmd->t_task->t_transport_sent)); 2019 atomic_read(&cmd->t_task.t_transport_sent));
2057 2020
2058 transport_stop_all_task_timers(cmd); 2021 transport_stop_all_task_timers(cmd);
2059 2022
@@ -2135,7 +2098,7 @@ static void transport_generic_request_failure(
2135 break; 2098 break;
2136 default: 2099 default:
2137 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", 2100 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2138 cmd->t_task->t_task_cdb[0], 2101 cmd->t_task.t_task_cdb[0],
2139 cmd->transport_error_status); 2102 cmd->transport_error_status);
2140 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2103 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2141 break; 2104 break;
@@ -2156,19 +2119,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
2156{ 2119{
2157 unsigned long flags; 2120 unsigned long flags;
2158 2121
2159 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2122 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2160 if (!(atomic_read(&cmd->t_task->t_transport_timeout))) { 2123 if (!(atomic_read(&cmd->t_task.t_transport_timeout))) {
2161 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2124 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2162 return; 2125 return;
2163 } 2126 }
2164 if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) { 2127 if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) {
2165 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2128 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2166 return; 2129 return;
2167 } 2130 }
2168 2131
2169 atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout), 2132 atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout),
2170 &cmd->t_task->t_se_count); 2133 &cmd->t_task.t_se_count);
2171 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2134 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2172} 2135}
2173 2136
2174static void transport_generic_request_timeout(struct se_cmd *cmd) 2137static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2176,16 +2139,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
2176 unsigned long flags; 2139 unsigned long flags;
2177 2140
2178 /* 2141 /*
2179 * Reset cmd->t_task->t_se_count to allow transport_generic_remove() 2142 * Reset cmd->t_task.t_se_count to allow transport_generic_remove()
2180 * to allow last call to free memory resources. 2143 * to allow last call to free memory resources.
2181 */ 2144 */
2182 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2145 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2183 if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) { 2146 if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) {
2184 int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1); 2147 int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1);
2185 2148
2186 atomic_sub(tmp, &cmd->t_task->t_se_count); 2149 atomic_sub(tmp, &cmd->t_task.t_se_count);
2187 } 2150 }
2188 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2151 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2189 2152
2190 transport_generic_remove(cmd, 0, 0); 2153 transport_generic_remove(cmd, 0, 0);
2191} 2154}
@@ -2201,8 +2164,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2201 return -ENOMEM; 2164 return -ENOMEM;
2202 } 2165 }
2203 2166
2204 cmd->t_task->t_tasks_se_num = 0; 2167 cmd->t_task.t_tasks_se_num = 0;
2205 cmd->t_task->t_task_buf = buf; 2168 cmd->t_task.t_task_buf = buf;
2206 2169
2207 return 0; 2170 return 0;
2208} 2171}
@@ -2244,9 +2207,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2244{ 2207{
2245 unsigned long flags; 2208 unsigned long flags;
2246 2209
2247 spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); 2210 spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
2248 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 2211 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2249 spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); 2212 spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
2250} 2213}
2251 2214
2252/* 2215/*
@@ -2260,9 +2223,9 @@ static void transport_task_timeout_handler(unsigned long data)
2260 2223
2261 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2224 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2262 2225
2263 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2226 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2264 if (task->task_flags & TF_STOP) { 2227 if (task->task_flags & TF_STOP) {
2265 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2228 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2266 return; 2229 return;
2267 } 2230 }
2268 task->task_flags &= ~TF_RUNNING; 2231 task->task_flags &= ~TF_RUNNING;
@@ -2273,13 +2236,13 @@ static void transport_task_timeout_handler(unsigned long data)
2273 if (!(atomic_read(&task->task_active))) { 2236 if (!(atomic_read(&task->task_active))) {
2274 DEBUG_TT("transport task: %p cmd: %p timeout task_active" 2237 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2275 " == 0\n", task, cmd); 2238 " == 0\n", task, cmd);
2276 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2239 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2277 return; 2240 return;
2278 } 2241 }
2279 2242
2280 atomic_inc(&cmd->t_task->t_se_count); 2243 atomic_inc(&cmd->t_task.t_se_count);
2281 atomic_inc(&cmd->t_task->t_transport_timeout); 2244 atomic_inc(&cmd->t_task.t_transport_timeout);
2282 cmd->t_task->t_tasks_failed = 1; 2245 cmd->t_task.t_tasks_failed = 1;
2283 2246
2284 atomic_set(&task->task_timeout, 1); 2247 atomic_set(&task->task_timeout, 1);
2285 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; 2248 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
@@ -2288,28 +2251,28 @@ static void transport_task_timeout_handler(unsigned long data)
2288 if (atomic_read(&task->task_stop)) { 2251 if (atomic_read(&task->task_stop)) {
2289 DEBUG_TT("transport task: %p cmd: %p timeout task_stop" 2252 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2290 " == 1\n", task, cmd); 2253 " == 1\n", task, cmd);
2291 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2254 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2292 complete(&task->task_stop_comp); 2255 complete(&task->task_stop_comp);
2293 return; 2256 return;
2294 } 2257 }
2295 2258
2296 if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { 2259 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
2297 DEBUG_TT("transport task: %p cmd: %p timeout non zero" 2260 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2298 " t_task_cdbs_left\n", task, cmd); 2261 " t_task_cdbs_left\n", task, cmd);
2299 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2262 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2300 return; 2263 return;
2301 } 2264 }
2302 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2265 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2303 task, cmd); 2266 task, cmd);
2304 2267
2305 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; 2268 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2306 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2269 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2307 2270
2308 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); 2271 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2309} 2272}
2310 2273
2311/* 2274/*
2312 * Called with cmd->t_task->t_state_lock held. 2275 * Called with cmd->t_task.t_state_lock held.
2313 */ 2276 */
2314static void transport_start_task_timer(struct se_task *task) 2277static void transport_start_task_timer(struct se_task *task)
2315{ 2278{
@@ -2339,7 +2302,7 @@ static void transport_start_task_timer(struct se_task *task)
2339} 2302}
2340 2303
2341/* 2304/*
2342 * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held. 2305 * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held.
2343 */ 2306 */
2344void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) 2307void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2345{ 2308{
@@ -2349,11 +2312,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2349 return; 2312 return;
2350 2313
2351 task->task_flags |= TF_STOP; 2314 task->task_flags |= TF_STOP;
2352 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags); 2315 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags);
2353 2316
2354 del_timer_sync(&task->task_timer); 2317 del_timer_sync(&task->task_timer);
2355 2318
2356 spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags); 2319 spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags);
2357 task->task_flags &= ~TF_RUNNING; 2320 task->task_flags &= ~TF_RUNNING;
2358 task->task_flags &= ~TF_STOP; 2321 task->task_flags &= ~TF_STOP;
2359} 2322}
@@ -2363,11 +2326,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
2363 struct se_task *task = NULL, *task_tmp; 2326 struct se_task *task = NULL, *task_tmp;
2364 unsigned long flags; 2327 unsigned long flags;
2365 2328
2366 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2329 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2367 list_for_each_entry_safe(task, task_tmp, 2330 list_for_each_entry_safe(task, task_tmp,
2368 &cmd->t_task->t_task_list, t_list) 2331 &cmd->t_task.t_task_list, t_list)
2369 __transport_stop_task_timer(task, &flags); 2332 __transport_stop_task_timer(task, &flags);
2370 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2333 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2371} 2334}
2372 2335
2373static inline int transport_tcq_window_closed(struct se_device *dev) 2336static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2391,14 +2354,14 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
2391 */ 2354 */
2392static inline int transport_execute_task_attr(struct se_cmd *cmd) 2355static inline int transport_execute_task_attr(struct se_cmd *cmd)
2393{ 2356{
2394 if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 2357 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2395 return 1; 2358 return 1;
2396 /* 2359 /*
2397 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2360 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2398 * to allow the passed struct se_cmd list of tasks to the front of the list. 2361 * to allow the passed struct se_cmd list of tasks to the front of the list.
2399 */ 2362 */
2400 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2363 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2401 atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count); 2364 atomic_inc(&cmd->se_dev->dev_hoq_count);
2402 smp_mb__after_atomic_inc(); 2365 smp_mb__after_atomic_inc();
2403 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2366 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2404 " 0x%02x, se_ordered_id: %u\n", 2367 " 0x%02x, se_ordered_id: %u\n",
@@ -2406,30 +2369,30 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2406 cmd->se_ordered_id); 2369 cmd->se_ordered_id);
2407 return 1; 2370 return 1;
2408 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2371 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2409 spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); 2372 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2410 list_add_tail(&cmd->se_ordered_list, 2373 list_add_tail(&cmd->se_ordered_node,
2411 &cmd->se_lun->lun_se_dev->ordered_cmd_list); 2374 &cmd->se_dev->ordered_cmd_list);
2412 spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); 2375 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2413 2376
2414 atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync); 2377 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2415 smp_mb__after_atomic_inc(); 2378 smp_mb__after_atomic_inc();
2416 2379
2417 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" 2380 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2418 " list, se_ordered_id: %u\n", 2381 " list, se_ordered_id: %u\n",
2419 cmd->t_task->t_task_cdb[0], 2382 cmd->t_task.t_task_cdb[0],
2420 cmd->se_ordered_id); 2383 cmd->se_ordered_id);
2421 /* 2384 /*
2422 * Add ORDERED command to tail of execution queue if 2385 * Add ORDERED command to tail of execution queue if
2423 * no other older commands exist that need to be 2386 * no other older commands exist that need to be
2424 * completed first. 2387 * completed first.
2425 */ 2388 */
2426 if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds))) 2389 if (!(atomic_read(&cmd->se_dev->simple_cmds)))
2427 return 1; 2390 return 1;
2428 } else { 2391 } else {
2429 /* 2392 /*
2430 * For SIMPLE and UNTAGGED Task Attribute commands 2393 * For SIMPLE and UNTAGGED Task Attribute commands
2431 */ 2394 */
2432 atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds); 2395 atomic_inc(&cmd->se_dev->simple_cmds);
2433 smp_mb__after_atomic_inc(); 2396 smp_mb__after_atomic_inc();
2434 } 2397 }
2435 /* 2398 /*
@@ -2437,20 +2400,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2437 * add the dormant task(s) built for the passed struct se_cmd to the 2400 * add the dormant task(s) built for the passed struct se_cmd to the
2438 * execution queue and become in Active state for this struct se_device. 2401 * execution queue and become in Active state for this struct se_device.
2439 */ 2402 */
2440 if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) { 2403 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2441 /* 2404 /*
2442 * Otherwise, add cmd w/ tasks to delayed cmd queue that 2405 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2443 * will be drained upon completion of HEAD_OF_QUEUE task. 2406 * will be drained upon completion of HEAD_OF_QUEUE task.
2444 */ 2407 */
2445 spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); 2408 spin_lock(&cmd->se_dev->delayed_cmd_lock);
2446 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; 2409 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2447 list_add_tail(&cmd->se_delayed_list, 2410 list_add_tail(&cmd->se_delayed_node,
2448 &cmd->se_lun->lun_se_dev->delayed_cmd_list); 2411 &cmd->se_dev->delayed_cmd_list);
2449 spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); 2412 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2450 2413
2451 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" 2414 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2452 " delayed CMD list, se_ordered_id: %u\n", 2415 " delayed CMD list, se_ordered_id: %u\n",
2453 cmd->t_task->t_task_cdb[0], cmd->sam_task_attr, 2416 cmd->t_task.t_task_cdb[0], cmd->sam_task_attr,
2454 cmd->se_ordered_id); 2417 cmd->se_ordered_id);
2455 /* 2418 /*
2456 * Return zero to let transport_execute_tasks() know 2419 * Return zero to let transport_execute_tasks() know
@@ -2505,7 +2468,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2505 * storage object. 2468 * storage object.
2506 */ 2469 */
2507execute_tasks: 2470execute_tasks:
2508 __transport_execute_tasks(cmd->se_lun->lun_se_dev); 2471 __transport_execute_tasks(cmd->se_dev);
2509 return 0; 2472 return 0;
2510} 2473}
2511 2474
@@ -2548,17 +2511,17 @@ check_depth:
2548 2511
2549 cmd = task->task_se_cmd; 2512 cmd = task->task_se_cmd;
2550 2513
2551 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2514 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2552 atomic_set(&task->task_active, 1); 2515 atomic_set(&task->task_active, 1);
2553 atomic_set(&task->task_sent, 1); 2516 atomic_set(&task->task_sent, 1);
2554 atomic_inc(&cmd->t_task->t_task_cdbs_sent); 2517 atomic_inc(&cmd->t_task.t_task_cdbs_sent);
2555 2518
2556 if (atomic_read(&cmd->t_task->t_task_cdbs_sent) == 2519 if (atomic_read(&cmd->t_task.t_task_cdbs_sent) ==
2557 cmd->t_task->t_task_cdbs) 2520 cmd->t_task.t_task_cdbs)
2558 atomic_set(&cmd->transport_sent, 1); 2521 atomic_set(&cmd->transport_sent, 1);
2559 2522
2560 transport_start_task_timer(task); 2523 transport_start_task_timer(task);
2561 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2524 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2562 /* 2525 /*
2563 * The struct se_cmd->transport_emulate_cdb() function pointer is used 2526 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2564 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the 2527 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
@@ -2623,10 +2586,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
2623 * Any unsolicited data will get dumped for failed command inside of 2586 * Any unsolicited data will get dumped for failed command inside of
2624 * the fabric plugin 2587 * the fabric plugin
2625 */ 2588 */
2626 spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); 2589 spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
2627 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; 2590 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2628 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2591 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2629 spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); 2592 spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
2630 2593
2631 se_cmd->se_tfo->new_cmd_failure(se_cmd); 2594 se_cmd->se_tfo->new_cmd_failure(se_cmd);
2632} 2595}
@@ -2638,7 +2601,7 @@ static inline u32 transport_get_sectors_6(
2638 struct se_cmd *cmd, 2601 struct se_cmd *cmd,
2639 int *ret) 2602 int *ret)
2640{ 2603{
2641 struct se_device *dev = cmd->se_lun->lun_se_dev; 2604 struct se_device *dev = cmd->se_dev;
2642 2605
2643 /* 2606 /*
2644 * Assume TYPE_DISK for non struct se_device objects. 2607 * Assume TYPE_DISK for non struct se_device objects.
@@ -2666,7 +2629,7 @@ static inline u32 transport_get_sectors_10(
2666 struct se_cmd *cmd, 2629 struct se_cmd *cmd,
2667 int *ret) 2630 int *ret)
2668{ 2631{
2669 struct se_device *dev = cmd->se_lun->lun_se_dev; 2632 struct se_device *dev = cmd->se_dev;
2670 2633
2671 /* 2634 /*
2672 * Assume TYPE_DISK for non struct se_device objects. 2635 * Assume TYPE_DISK for non struct se_device objects.
@@ -2696,7 +2659,7 @@ static inline u32 transport_get_sectors_12(
2696 struct se_cmd *cmd, 2659 struct se_cmd *cmd,
2697 int *ret) 2660 int *ret)
2698{ 2661{
2699 struct se_device *dev = cmd->se_lun->lun_se_dev; 2662 struct se_device *dev = cmd->se_dev;
2700 2663
2701 /* 2664 /*
2702 * Assume TYPE_DISK for non struct se_device objects. 2665 * Assume TYPE_DISK for non struct se_device objects.
@@ -2726,7 +2689,7 @@ static inline u32 transport_get_sectors_16(
2726 struct se_cmd *cmd, 2689 struct se_cmd *cmd,
2727 int *ret) 2690 int *ret)
2728{ 2691{
2729 struct se_device *dev = cmd->se_lun->lun_se_dev; 2692 struct se_device *dev = cmd->se_dev;
2730 2693
2731 /* 2694 /*
2732 * Assume TYPE_DISK for non struct se_device objects. 2695 * Assume TYPE_DISK for non struct se_device objects.
@@ -2768,7 +2731,7 @@ static inline u32 transport_get_size(
2768 unsigned char *cdb, 2731 unsigned char *cdb,
2769 struct se_cmd *cmd) 2732 struct se_cmd *cmd)
2770{ 2733{
2771 struct se_device *dev = cmd->se_lun->lun_se_dev; 2734 struct se_device *dev = cmd->se_dev;
2772 2735
2773 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2736 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2774 if (cdb[1] & 1) { /* sectors */ 2737 if (cdb[1] & 1) { /* sectors */
@@ -2836,17 +2799,17 @@ static void transport_xor_callback(struct se_cmd *cmd)
2836 return; 2799 return;
2837 } 2800 }
2838 /* 2801 /*
2839 * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list 2802 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list
2840 * into the locally allocated *buf 2803 * into the locally allocated *buf
2841 */ 2804 */
2842 transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list); 2805 transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list);
2843 /* 2806 /*
2844 * Now perform the XOR against the BIDI read memory located at 2807 * Now perform the XOR against the BIDI read memory located at
2845 * cmd->t_task->t_mem_bidi_list 2808 * cmd->t_task.t_mem_bidi_list
2846 */ 2809 */
2847 2810
2848 offset = 0; 2811 offset = 0;
2849 list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) { 2812 list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) {
2850 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); 2813 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2851 if (!(addr)) 2814 if (!(addr))
2852 goto out; 2815 goto out;
@@ -2874,14 +2837,14 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2874 2837
2875 WARN_ON(!cmd->se_lun); 2838 WARN_ON(!cmd->se_lun);
2876 2839
2877 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 2840 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
2878 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2841 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2879 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2842 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2880 return 0; 2843 return 0;
2881 } 2844 }
2882 2845
2883 list_for_each_entry_safe(task, task_tmp, 2846 list_for_each_entry_safe(task, task_tmp,
2884 &cmd->t_task->t_task_list, t_list) { 2847 &cmd->t_task.t_task_list, t_list) {
2885 2848
2886 if (!task->task_sense) 2849 if (!task->task_sense)
2887 continue; 2850 continue;
@@ -2903,12 +2866,12 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2903 cmd->se_tfo->get_task_tag(cmd), task->task_no); 2866 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2904 continue; 2867 continue;
2905 } 2868 }
2906 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2869 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2907 2870
2908 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2871 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2909 TRANSPORT_SENSE_BUFFER); 2872 TRANSPORT_SENSE_BUFFER);
2910 2873
2911 memcpy((void *)&buffer[offset], (void *)sense_buffer, 2874 memcpy(&buffer[offset], sense_buffer,
2912 TRANSPORT_SENSE_BUFFER); 2875 TRANSPORT_SENSE_BUFFER);
2913 cmd->scsi_status = task->task_scsi_status; 2876 cmd->scsi_status = task->task_scsi_status;
2914 /* Automatically padded */ 2877 /* Automatically padded */
@@ -2921,7 +2884,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2921 cmd->scsi_status); 2884 cmd->scsi_status);
2922 return 0; 2885 return 0;
2923 } 2886 }
2924 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 2887 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
2925 2888
2926 return -1; 2889 return -1;
2927} 2890}
@@ -2958,7 +2921,7 @@ transport_handle_reservation_conflict(struct se_cmd *cmd)
2958 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 2921 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2959 cmd->orig_fe_lun, 0x2C, 2922 cmd->orig_fe_lun, 0x2C,
2960 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 2923 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2961 return -2; 2924 return -EINVAL;
2962} 2925}
2963 2926
2964/* transport_generic_cmd_sequencer(): 2927/* transport_generic_cmd_sequencer():
@@ -2975,7 +2938,7 @@ static int transport_generic_cmd_sequencer(
2975 struct se_cmd *cmd, 2938 struct se_cmd *cmd,
2976 unsigned char *cdb) 2939 unsigned char *cdb)
2977{ 2940{
2978 struct se_device *dev = cmd->se_lun->lun_se_dev; 2941 struct se_device *dev = cmd->se_dev;
2979 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2942 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2980 int ret = 0, sector_ret = 0, passthrough; 2943 int ret = 0, sector_ret = 0, passthrough;
2981 u32 sectors = 0, size = 0, pr_reg_type = 0; 2944 u32 sectors = 0, size = 0, pr_reg_type = 0;
@@ -2989,7 +2952,7 @@ static int transport_generic_cmd_sequencer(
2989 &transport_nop_wait_for_tasks; 2952 &transport_nop_wait_for_tasks;
2990 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2953 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2991 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; 2954 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2992 return -2; 2955 return -EINVAL;
2993 } 2956 }
2994 /* 2957 /*
2995 * Check status of Asymmetric Logical Unit Assignment port 2958 * Check status of Asymmetric Logical Unit Assignment port
@@ -3011,7 +2974,7 @@ static int transport_generic_cmd_sequencer(
3011 transport_set_sense_codes(cmd, 0x04, alua_ascq); 2974 transport_set_sense_codes(cmd, 0x04, alua_ascq);
3012 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2975 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3013 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; 2976 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
3014 return -2; 2977 return -EINVAL;
3015 } 2978 }
3016 goto out_invalid_cdb_field; 2979 goto out_invalid_cdb_field;
3017 } 2980 }
@@ -3036,7 +2999,7 @@ static int transport_generic_cmd_sequencer(
3036 goto out_unsupported_cdb; 2999 goto out_unsupported_cdb;
3037 size = transport_get_size(sectors, cdb, cmd); 3000 size = transport_get_size(sectors, cdb, cmd);
3038 cmd->transport_split_cdb = &split_cdb_XX_6; 3001 cmd->transport_split_cdb = &split_cdb_XX_6;
3039 cmd->t_task->t_task_lba = transport_lba_21(cdb); 3002 cmd->t_task.t_task_lba = transport_lba_21(cdb);
3040 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3003 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3041 break; 3004 break;
3042 case READ_10: 3005 case READ_10:
@@ -3045,7 +3008,7 @@ static int transport_generic_cmd_sequencer(
3045 goto out_unsupported_cdb; 3008 goto out_unsupported_cdb;
3046 size = transport_get_size(sectors, cdb, cmd); 3009 size = transport_get_size(sectors, cdb, cmd);
3047 cmd->transport_split_cdb = &split_cdb_XX_10; 3010 cmd->transport_split_cdb = &split_cdb_XX_10;
3048 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3011 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3049 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3012 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3050 break; 3013 break;
3051 case READ_12: 3014 case READ_12:
@@ -3054,7 +3017,7 @@ static int transport_generic_cmd_sequencer(
3054 goto out_unsupported_cdb; 3017 goto out_unsupported_cdb;
3055 size = transport_get_size(sectors, cdb, cmd); 3018 size = transport_get_size(sectors, cdb, cmd);
3056 cmd->transport_split_cdb = &split_cdb_XX_12; 3019 cmd->transport_split_cdb = &split_cdb_XX_12;
3057 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3020 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3058 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3021 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3059 break; 3022 break;
3060 case READ_16: 3023 case READ_16:
@@ -3063,7 +3026,7 @@ static int transport_generic_cmd_sequencer(
3063 goto out_unsupported_cdb; 3026 goto out_unsupported_cdb;
3064 size = transport_get_size(sectors, cdb, cmd); 3027 size = transport_get_size(sectors, cdb, cmd);
3065 cmd->transport_split_cdb = &split_cdb_XX_16; 3028 cmd->transport_split_cdb = &split_cdb_XX_16;
3066 cmd->t_task->t_task_lba = transport_lba_64(cdb); 3029 cmd->t_task.t_task_lba = transport_lba_64(cdb);
3067 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3030 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3068 break; 3031 break;
3069 case WRITE_6: 3032 case WRITE_6:
@@ -3072,7 +3035,7 @@ static int transport_generic_cmd_sequencer(
3072 goto out_unsupported_cdb; 3035 goto out_unsupported_cdb;
3073 size = transport_get_size(sectors, cdb, cmd); 3036 size = transport_get_size(sectors, cdb, cmd);
3074 cmd->transport_split_cdb = &split_cdb_XX_6; 3037 cmd->transport_split_cdb = &split_cdb_XX_6;
3075 cmd->t_task->t_task_lba = transport_lba_21(cdb); 3038 cmd->t_task.t_task_lba = transport_lba_21(cdb);
3076 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3039 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3077 break; 3040 break;
3078 case WRITE_10: 3041 case WRITE_10:
@@ -3081,8 +3044,8 @@ static int transport_generic_cmd_sequencer(
3081 goto out_unsupported_cdb; 3044 goto out_unsupported_cdb;
3082 size = transport_get_size(sectors, cdb, cmd); 3045 size = transport_get_size(sectors, cdb, cmd);
3083 cmd->transport_split_cdb = &split_cdb_XX_10; 3046 cmd->transport_split_cdb = &split_cdb_XX_10;
3084 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3047 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3085 cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); 3048 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
3086 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3049 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3087 break; 3050 break;
3088 case WRITE_12: 3051 case WRITE_12:
@@ -3091,8 +3054,8 @@ static int transport_generic_cmd_sequencer(
3091 goto out_unsupported_cdb; 3054 goto out_unsupported_cdb;
3092 size = transport_get_size(sectors, cdb, cmd); 3055 size = transport_get_size(sectors, cdb, cmd);
3093 cmd->transport_split_cdb = &split_cdb_XX_12; 3056 cmd->transport_split_cdb = &split_cdb_XX_12;
3094 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3057 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3095 cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); 3058 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
3096 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3059 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3097 break; 3060 break;
3098 case WRITE_16: 3061 case WRITE_16:
@@ -3101,20 +3064,20 @@ static int transport_generic_cmd_sequencer(
3101 goto out_unsupported_cdb; 3064 goto out_unsupported_cdb;
3102 size = transport_get_size(sectors, cdb, cmd); 3065 size = transport_get_size(sectors, cdb, cmd);
3103 cmd->transport_split_cdb = &split_cdb_XX_16; 3066 cmd->transport_split_cdb = &split_cdb_XX_16;
3104 cmd->t_task->t_task_lba = transport_lba_64(cdb); 3067 cmd->t_task.t_task_lba = transport_lba_64(cdb);
3105 cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); 3068 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
3106 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3069 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3107 break; 3070 break;
3108 case XDWRITEREAD_10: 3071 case XDWRITEREAD_10:
3109 if ((cmd->data_direction != DMA_TO_DEVICE) || 3072 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3110 !(cmd->t_task->t_tasks_bidi)) 3073 !(cmd->t_task.t_tasks_bidi))
3111 goto out_invalid_cdb_field; 3074 goto out_invalid_cdb_field;
3112 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3075 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3113 if (sector_ret) 3076 if (sector_ret)
3114 goto out_unsupported_cdb; 3077 goto out_unsupported_cdb;
3115 size = transport_get_size(sectors, cdb, cmd); 3078 size = transport_get_size(sectors, cdb, cmd);
3116 cmd->transport_split_cdb = &split_cdb_XX_10; 3079 cmd->transport_split_cdb = &split_cdb_XX_10;
3117 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3080 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3118 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3081 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3119 passthrough = (dev->transport->transport_type == 3082 passthrough = (dev->transport->transport_type ==
3120 TRANSPORT_PLUGIN_PHBA_PDEV); 3083 TRANSPORT_PLUGIN_PHBA_PDEV);
@@ -3127,7 +3090,7 @@ static int transport_generic_cmd_sequencer(
3127 * Setup BIDI XOR callback to be run during transport_generic_complete_ok() 3090 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3128 */ 3091 */
3129 cmd->transport_complete_callback = &transport_xor_callback; 3092 cmd->transport_complete_callback = &transport_xor_callback;
3130 cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); 3093 cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
3131 break; 3094 break;
3132 case VARIABLE_LENGTH_CMD: 3095 case VARIABLE_LENGTH_CMD:
3133 service_action = get_unaligned_be16(&cdb[8]); 3096 service_action = get_unaligned_be16(&cdb[8]);
@@ -3149,7 +3112,7 @@ static int transport_generic_cmd_sequencer(
3149 * XDWRITE_READ_32 logic. 3112 * XDWRITE_READ_32 logic.
3150 */ 3113 */
3151 cmd->transport_split_cdb = &split_cdb_XX_32; 3114 cmd->transport_split_cdb = &split_cdb_XX_32;
3152 cmd->t_task->t_task_lba = transport_lba_64_ext(cdb); 3115 cmd->t_task.t_task_lba = transport_lba_64_ext(cdb);
3153 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 3116 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3154 3117
3155 /* 3118 /*
@@ -3163,14 +3126,14 @@ static int transport_generic_cmd_sequencer(
3163 * transport_generic_complete_ok() 3126 * transport_generic_complete_ok()
3164 */ 3127 */
3165 cmd->transport_complete_callback = &transport_xor_callback; 3128 cmd->transport_complete_callback = &transport_xor_callback;
3166 cmd->t_task->t_tasks_fua = (cdb[10] & 0x8); 3129 cmd->t_task.t_tasks_fua = (cdb[10] & 0x8);
3167 break; 3130 break;
3168 case WRITE_SAME_32: 3131 case WRITE_SAME_32:
3169 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 3132 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3170 if (sector_ret) 3133 if (sector_ret)
3171 goto out_unsupported_cdb; 3134 goto out_unsupported_cdb;
3172 size = transport_get_size(sectors, cdb, cmd); 3135 size = transport_get_size(sectors, cdb, cmd);
3173 cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]); 3136 cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]);
3174 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3137 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3175 3138
3176 /* 3139 /*
@@ -3299,7 +3262,7 @@ static int transport_generic_cmd_sequencer(
3299 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 3262 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3300 * See spc4r17 section 5.3 3263 * See spc4r17 section 5.3
3301 */ 3264 */
3302 if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3265 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3303 cmd->sam_task_attr = MSG_HEAD_TAG; 3266 cmd->sam_task_attr = MSG_HEAD_TAG;
3304 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3267 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3305 break; 3268 break;
@@ -3405,10 +3368,10 @@ static int transport_generic_cmd_sequencer(
3405 */ 3368 */
3406 if (cdb[0] == SYNCHRONIZE_CACHE) { 3369 if (cdb[0] == SYNCHRONIZE_CACHE) {
3407 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3370 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3408 cmd->t_task->t_task_lba = transport_lba_32(cdb); 3371 cmd->t_task.t_task_lba = transport_lba_32(cdb);
3409 } else { 3372 } else {
3410 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 3373 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3411 cmd->t_task->t_task_lba = transport_lba_64(cdb); 3374 cmd->t_task.t_task_lba = transport_lba_64(cdb);
3412 } 3375 }
3413 if (sector_ret) 3376 if (sector_ret)
3414 goto out_unsupported_cdb; 3377 goto out_unsupported_cdb;
@@ -3454,7 +3417,7 @@ static int transport_generic_cmd_sequencer(
3454 if (sector_ret) 3417 if (sector_ret)
3455 goto out_unsupported_cdb; 3418 goto out_unsupported_cdb;
3456 size = transport_get_size(sectors, cdb, cmd); 3419 size = transport_get_size(sectors, cdb, cmd);
3457 cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]); 3420 cmd->t_task.t_task_lba = get_unaligned_be16(&cdb[2]);
3458 passthrough = (dev->transport->transport_type == 3421 passthrough = (dev->transport->transport_type ==
3459 TRANSPORT_PLUGIN_PHBA_PDEV); 3422 TRANSPORT_PLUGIN_PHBA_PDEV);
3460 /* 3423 /*
@@ -3507,7 +3470,7 @@ static int transport_generic_cmd_sequencer(
3507 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 3470 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3508 * See spc4r17 section 5.3 3471 * See spc4r17 section 5.3
3509 */ 3472 */
3510 if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3473 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3511 cmd->sam_task_attr = MSG_HEAD_TAG; 3474 cmd->sam_task_attr = MSG_HEAD_TAG;
3512 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; 3475 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3513 break; 3476 break;
@@ -3560,11 +3523,11 @@ static int transport_generic_cmd_sequencer(
3560out_unsupported_cdb: 3523out_unsupported_cdb:
3561 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3524 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3562 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 3525 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3563 return -2; 3526 return -EINVAL;
3564out_invalid_cdb_field: 3527out_invalid_cdb_field:
3565 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3528 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3566 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3529 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3567 return -2; 3530 return -EINVAL;
3568} 3531}
3569 3532
3570static inline void transport_release_tasks(struct se_cmd *); 3533static inline void transport_release_tasks(struct se_cmd *);
@@ -3662,7 +3625,7 @@ static void transport_memcpy_se_mem_read_contig(
3662 */ 3625 */
3663static void transport_complete_task_attr(struct se_cmd *cmd) 3626static void transport_complete_task_attr(struct se_cmd *cmd)
3664{ 3627{
3665 struct se_device *dev = cmd->se_lun->lun_se_dev; 3628 struct se_device *dev = cmd->se_dev;
3666 struct se_cmd *cmd_p, *cmd_tmp; 3629 struct se_cmd *cmd_p, *cmd_tmp;
3667 int new_active_tasks = 0; 3630 int new_active_tasks = 0;
3668 3631
@@ -3682,7 +3645,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3682 cmd->se_ordered_id); 3645 cmd->se_ordered_id);
3683 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3646 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3684 spin_lock(&dev->ordered_cmd_lock); 3647 spin_lock(&dev->ordered_cmd_lock);
3685 list_del(&cmd->se_ordered_list); 3648 list_del(&cmd->se_ordered_node);
3686 atomic_dec(&dev->dev_ordered_sync); 3649 atomic_dec(&dev->dev_ordered_sync);
3687 smp_mb__after_atomic_dec(); 3650 smp_mb__after_atomic_dec();
3688 spin_unlock(&dev->ordered_cmd_lock); 3651 spin_unlock(&dev->ordered_cmd_lock);
@@ -3698,9 +3661,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3698 */ 3661 */
3699 spin_lock(&dev->delayed_cmd_lock); 3662 spin_lock(&dev->delayed_cmd_lock);
3700 list_for_each_entry_safe(cmd_p, cmd_tmp, 3663 list_for_each_entry_safe(cmd_p, cmd_tmp,
3701 &dev->delayed_cmd_list, se_delayed_list) { 3664 &dev->delayed_cmd_list, se_delayed_node) {
3702 3665
3703 list_del(&cmd_p->se_delayed_list); 3666 list_del(&cmd_p->se_delayed_node);
3704 spin_unlock(&dev->delayed_cmd_lock); 3667 spin_unlock(&dev->delayed_cmd_lock);
3705 3668
3706 DEBUG_STA("Calling add_tasks() for" 3669 DEBUG_STA("Calling add_tasks() for"
@@ -3733,7 +3696,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
3733 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 3696 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3734 * Attribute. 3697 * Attribute.
3735 */ 3698 */
3736 if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3699 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3737 transport_complete_task_attr(cmd); 3700 transport_complete_task_attr(cmd);
3738 /* 3701 /*
3739 * Check if we need to retrieve a sense buffer from 3702 * Check if we need to retrieve a sense buffer from
@@ -3777,8 +3740,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
3777 */ 3740 */
3778 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) 3741 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3779 transport_memcpy_write_contig(cmd, 3742 transport_memcpy_write_contig(cmd,
3780 cmd->t_task->t_task_pt_sgl, 3743 cmd->t_task.t_task_pt_sgl,
3781 cmd->t_task->t_task_buf); 3744 cmd->t_task.t_task_buf);
3782 3745
3783 cmd->se_tfo->queue_data_in(cmd); 3746 cmd->se_tfo->queue_data_in(cmd);
3784 break; 3747 break;
@@ -3792,7 +3755,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
3792 /* 3755 /*
3793 * Check if we need to send READ payload for BIDI-COMMAND 3756 * Check if we need to send READ payload for BIDI-COMMAND
3794 */ 3757 */
3795 if (cmd->t_task->t_mem_bidi_list != NULL) { 3758 if (!list_empty(&cmd->t_task.t_mem_bidi_list)) {
3796 spin_lock(&cmd->se_lun->lun_sep_lock); 3759 spin_lock(&cmd->se_lun->lun_sep_lock);
3797 if (cmd->se_lun->lun_sep) { 3760 if (cmd->se_lun->lun_sep) {
3798 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3761 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3819,9 +3782,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3819 struct se_task *task, *task_tmp; 3782 struct se_task *task, *task_tmp;
3820 unsigned long flags; 3783 unsigned long flags;
3821 3784
3822 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3785 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3823 list_for_each_entry_safe(task, task_tmp, 3786 list_for_each_entry_safe(task, task_tmp,
3824 &cmd->t_task->t_task_list, t_list) { 3787 &cmd->t_task.t_task_list, t_list) {
3825 if (atomic_read(&task->task_active)) 3788 if (atomic_read(&task->task_active))
3826 continue; 3789 continue;
3827 3790
@@ -3830,15 +3793,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3830 3793
3831 list_del(&task->t_list); 3794 list_del(&task->t_list);
3832 3795
3833 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3796 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3834 if (task->se_dev) 3797 if (task->se_dev)
3835 task->se_dev->transport->free_task(task); 3798 task->se_dev->transport->free_task(task);
3836 else 3799 else
3837 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", 3800 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3838 task->task_no); 3801 task->task_no);
3839 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3802 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3840 } 3803 }
3841 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3804 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3842} 3805}
3843 3806
3844static inline void transport_free_pages(struct se_cmd *cmd) 3807static inline void transport_free_pages(struct se_cmd *cmd)
@@ -3851,9 +3814,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3851 if (cmd->se_dev->transport->do_se_mem_map) 3814 if (cmd->se_dev->transport->do_se_mem_map)
3852 free_page = 0; 3815 free_page = 0;
3853 3816
3854 if (cmd->t_task->t_task_buf) { 3817 if (cmd->t_task.t_task_buf) {
3855 kfree(cmd->t_task->t_task_buf); 3818 kfree(cmd->t_task.t_task_buf);
3856 cmd->t_task->t_task_buf = NULL; 3819 cmd->t_task.t_task_buf = NULL;
3857 return; 3820 return;
3858 } 3821 }
3859 3822
@@ -3863,11 +3826,8 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3863 if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) 3826 if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
3864 return; 3827 return;
3865 3828
3866 if (!(cmd->t_task->t_tasks_se_num))
3867 return;
3868
3869 list_for_each_entry_safe(se_mem, se_mem_tmp, 3829 list_for_each_entry_safe(se_mem, se_mem_tmp,
3870 cmd->t_task->t_mem_list, se_list) { 3830 &cmd->t_task.t_mem_list, se_list) {
3871 /* 3831 /*
3872 * We only release call __free_page(struct se_mem->se_page) when 3832 * We only release call __free_page(struct se_mem->se_page) when
3873 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3833 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3878,27 +3838,21 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3878 list_del(&se_mem->se_list); 3838 list_del(&se_mem->se_list);
3879 kmem_cache_free(se_mem_cache, se_mem); 3839 kmem_cache_free(se_mem_cache, se_mem);
3880 } 3840 }
3841 cmd->t_task.t_tasks_se_num = 0;
3881 3842
3882 if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) { 3843 list_for_each_entry_safe(se_mem, se_mem_tmp,
3883 list_for_each_entry_safe(se_mem, se_mem_tmp, 3844 &cmd->t_task.t_mem_bidi_list, se_list) {
3884 cmd->t_task->t_mem_bidi_list, se_list) { 3845 /*
3885 /* 3846 * We only release call __free_page(struct se_mem->se_page) when
3886 * We only release call __free_page(struct se_mem->se_page) when 3847 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3887 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, 3848 */
3888 */ 3849 if (free_page)
3889 if (free_page) 3850 __free_page(se_mem->se_page);
3890 __free_page(se_mem->se_page);
3891 3851
3892 list_del(&se_mem->se_list); 3852 list_del(&se_mem->se_list);
3893 kmem_cache_free(se_mem_cache, se_mem); 3853 kmem_cache_free(se_mem_cache, se_mem);
3894 }
3895 } 3854 }
3896 3855 cmd->t_task.t_tasks_se_bidi_num = 0;
3897 kfree(cmd->t_task->t_mem_bidi_list);
3898 cmd->t_task->t_mem_bidi_list = NULL;
3899 kfree(cmd->t_task->t_mem_list);
3900 cmd->t_task->t_mem_list = NULL;
3901 cmd->t_task->t_tasks_se_num = 0;
3902} 3856}
3903 3857
3904static inline void transport_release_tasks(struct se_cmd *cmd) 3858static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -3910,23 +3864,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3910{ 3864{
3911 unsigned long flags; 3865 unsigned long flags;
3912 3866
3913 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3867 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3914 if (atomic_read(&cmd->t_task->t_fe_count)) { 3868 if (atomic_read(&cmd->t_task.t_fe_count)) {
3915 if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) { 3869 if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) {
3916 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 3870 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
3917 flags); 3871 flags);
3918 return 1; 3872 return 1;
3919 } 3873 }
3920 } 3874 }
3921 3875
3922 if (atomic_read(&cmd->t_task->t_se_count)) { 3876 if (atomic_read(&cmd->t_task.t_se_count)) {
3923 if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) { 3877 if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) {
3924 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 3878 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
3925 flags); 3879 flags);
3926 return 1; 3880 return 1;
3927 } 3881 }
3928 } 3882 }
3929 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3883 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3930 3884
3931 return 0; 3885 return 0;
3932} 3886}
@@ -3938,14 +3892,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
3938 if (transport_dec_and_check(cmd)) 3892 if (transport_dec_and_check(cmd))
3939 return; 3893 return;
3940 3894
3941 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3895 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3942 if (!(atomic_read(&cmd->t_task->transport_dev_active))) { 3896 if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
3943 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3897 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3944 goto free_pages; 3898 goto free_pages;
3945 } 3899 }
3946 atomic_set(&cmd->t_task->transport_dev_active, 0); 3900 atomic_set(&cmd->t_task.transport_dev_active, 0);
3947 transport_all_task_dev_remove_state(cmd); 3901 transport_all_task_dev_remove_state(cmd);
3948 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3902 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3949 3903
3950 transport_release_tasks(cmd); 3904 transport_release_tasks(cmd);
3951free_pages: 3905free_pages:
@@ -3961,33 +3915,30 @@ static int transport_generic_remove(
3961{ 3915{
3962 unsigned long flags; 3916 unsigned long flags;
3963 3917
3964 if (!(cmd->t_task))
3965 goto release_cmd;
3966
3967 if (transport_dec_and_check(cmd)) { 3918 if (transport_dec_and_check(cmd)) {
3968 if (session_reinstatement) { 3919 if (session_reinstatement) {
3969 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3920 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3970 transport_all_task_dev_remove_state(cmd); 3921 transport_all_task_dev_remove_state(cmd);
3971 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 3922 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
3972 flags); 3923 flags);
3973 } 3924 }
3974 return 1; 3925 return 1;
3975 } 3926 }
3976 3927
3977 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 3928 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
3978 if (!(atomic_read(&cmd->t_task->transport_dev_active))) { 3929 if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
3979 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3930 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3980 goto free_pages; 3931 goto free_pages;
3981 } 3932 }
3982 atomic_set(&cmd->t_task->transport_dev_active, 0); 3933 atomic_set(&cmd->t_task.transport_dev_active, 0);
3983 transport_all_task_dev_remove_state(cmd); 3934 transport_all_task_dev_remove_state(cmd);
3984 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 3935 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
3985 3936
3986 transport_release_tasks(cmd); 3937 transport_release_tasks(cmd);
3938
3987free_pages: 3939free_pages:
3988 transport_free_pages(cmd); 3940 transport_free_pages(cmd);
3989 3941
3990release_cmd:
3991 if (release_to_pool) { 3942 if (release_to_pool) {
3992 transport_release_cmd_to_pool(cmd); 3943 transport_release_cmd_to_pool(cmd);
3993 } else { 3944 } else {
@@ -4011,35 +3962,19 @@ release_cmd:
4011 */ 3962 */
4012int transport_generic_map_mem_to_cmd( 3963int transport_generic_map_mem_to_cmd(
4013 struct se_cmd *cmd, 3964 struct se_cmd *cmd,
4014 struct scatterlist *mem, 3965 struct scatterlist *sgl,
4015 u32 sg_mem_num, 3966 u32 sgl_count,
4016 struct scatterlist *mem_bidi_in, 3967 struct scatterlist *sgl_bidi,
4017 u32 sg_mem_bidi_num) 3968 u32 sgl_bidi_count)
4018{ 3969{
4019 u32 se_mem_cnt_out = 0; 3970 u32 mapped_sg_count = 0;
4020 int ret; 3971 int ret;
4021 3972
4022 if (!(mem) || !(sg_mem_num)) 3973 if (!sgl || !sgl_count)
4023 return 0; 3974 return 0;
4024 /*
4025 * Passed *mem will contain a list_head containing preformatted
4026 * struct se_mem elements...
4027 */
4028 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
4029 if ((mem_bidi_in) || (sg_mem_bidi_num)) {
4030 printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
4031 " with BIDI-COMMAND\n");
4032 return -ENOSYS;
4033 }
4034 3975
4035 cmd->t_task->t_mem_list = (struct list_head *)mem;
4036 cmd->t_task->t_tasks_se_num = sg_mem_num;
4037 cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
4038 return 0;
4039 }
4040 /* 3976 /*
4041 * Otherwise, assume the caller is passing a struct scatterlist 3977 * Convert sgls (sgl, sgl_bidi) to list of se_mems
4042 * array from include/linux/scatterlist.h
4043 */ 3978 */
4044 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3979 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
4045 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3980 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
@@ -4048,41 +3983,29 @@ int transport_generic_map_mem_to_cmd(
4048 * processed into a TCM struct se_subsystem_dev, we do the mapping 3983 * processed into a TCM struct se_subsystem_dev, we do the mapping
4049 * from the passed physical memory to struct se_mem->se_page here. 3984 * from the passed physical memory to struct se_mem->se_page here.
4050 */ 3985 */
4051 cmd->t_task->t_mem_list = transport_init_se_mem_list();
4052 if (!(cmd->t_task->t_mem_list))
4053 return -ENOMEM;
4054
4055 ret = transport_map_sg_to_mem(cmd, 3986 ret = transport_map_sg_to_mem(cmd,
4056 cmd->t_task->t_mem_list, mem, &se_mem_cnt_out); 3987 &cmd->t_task.t_mem_list, sgl, &mapped_sg_count);
4057 if (ret < 0) 3988 if (ret < 0)
4058 return -ENOMEM; 3989 return -ENOMEM;
4059 3990
4060 cmd->t_task->t_tasks_se_num = se_mem_cnt_out; 3991 cmd->t_task.t_tasks_se_num = mapped_sg_count;
4061 /* 3992 /*
4062 * Setup BIDI READ list of struct se_mem elements 3993 * Setup BIDI READ list of struct se_mem elements
4063 */ 3994 */
4064 if ((mem_bidi_in) && (sg_mem_bidi_num)) { 3995 if (sgl_bidi && sgl_bidi_count) {
4065 cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); 3996 mapped_sg_count = 0;
4066 if (!(cmd->t_task->t_mem_bidi_list)) {
4067 kfree(cmd->t_task->t_mem_list);
4068 return -ENOMEM;
4069 }
4070 se_mem_cnt_out = 0;
4071
4072 ret = transport_map_sg_to_mem(cmd, 3997 ret = transport_map_sg_to_mem(cmd,
4073 cmd->t_task->t_mem_bidi_list, mem_bidi_in, 3998 &cmd->t_task.t_mem_bidi_list, sgl_bidi,
4074 &se_mem_cnt_out); 3999 &mapped_sg_count);
4075 if (ret < 0) { 4000 if (ret < 0)
4076 kfree(cmd->t_task->t_mem_list);
4077 return -ENOMEM; 4001 return -ENOMEM;
4078 }
4079 4002
4080 cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out; 4003 cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count;
4081 } 4004 }
4082 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 4005 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4083 4006
4084 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { 4007 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
4085 if (mem_bidi_in || sg_mem_bidi_num) { 4008 if (sgl_bidi || sgl_bidi_count) {
4086 printk(KERN_ERR "BIDI-Commands not supported using " 4009 printk(KERN_ERR "BIDI-Commands not supported using "
4087 "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); 4010 "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
4088 return -ENOSYS; 4011 return -ENOSYS;
@@ -4097,7 +4020,8 @@ int transport_generic_map_mem_to_cmd(
4097 * struct scatterlist format. 4020 * struct scatterlist format.
4098 */ 4021 */
4099 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; 4022 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4100 cmd->t_task->t_task_pt_sgl = mem; 4023 cmd->t_task.t_task_pt_sgl = sgl;
4024 /* don't need sgl count? We assume it contains cmd->data_length data */
4101 } 4025 }
4102 4026
4103 return 0; 4027 return 0;
@@ -4112,21 +4036,21 @@ static inline long long transport_dev_end_lba(struct se_device *dev)
4112 4036
4113static int transport_get_sectors(struct se_cmd *cmd) 4037static int transport_get_sectors(struct se_cmd *cmd)
4114{ 4038{
4115 struct se_device *dev = cmd->se_lun->lun_se_dev; 4039 struct se_device *dev = cmd->se_dev;
4116 4040
4117 cmd->t_task->t_tasks_sectors = 4041 cmd->t_task.t_tasks_sectors =
4118 (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 4042 (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
4119 if (!(cmd->t_task->t_tasks_sectors)) 4043 if (!(cmd->t_task.t_tasks_sectors))
4120 cmd->t_task->t_tasks_sectors = 1; 4044 cmd->t_task.t_tasks_sectors = 1;
4121 4045
4122 if (dev->transport->get_device_type(dev) != TYPE_DISK) 4046 if (dev->transport->get_device_type(dev) != TYPE_DISK)
4123 return 0; 4047 return 0;
4124 4048
4125 if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) > 4049 if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) >
4126 transport_dev_end_lba(dev)) { 4050 transport_dev_end_lba(dev)) {
4127 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" 4051 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4128 " transport_dev_end_lba(): %llu\n", 4052 " transport_dev_end_lba(): %llu\n",
4129 cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, 4053 cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
4130 transport_dev_end_lba(dev)); 4054 transport_dev_end_lba(dev));
4131 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4055 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4132 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; 4056 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
@@ -4138,26 +4062,26 @@ static int transport_get_sectors(struct se_cmd *cmd)
4138 4062
4139static int transport_new_cmd_obj(struct se_cmd *cmd) 4063static int transport_new_cmd_obj(struct se_cmd *cmd)
4140{ 4064{
4141 struct se_device *dev = cmd->se_lun->lun_se_dev; 4065 struct se_device *dev = cmd->se_dev;
4142 u32 task_cdbs = 0, rc; 4066 u32 task_cdbs = 0, rc;
4143 4067
4144 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 4068 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4145 task_cdbs++; 4069 task_cdbs++;
4146 cmd->t_task->t_task_cdbs++; 4070 cmd->t_task.t_task_cdbs++;
4147 } else { 4071 } else {
4148 int set_counts = 1; 4072 int set_counts = 1;
4149 4073
4150 /* 4074 /*
4151 * Setup any BIDI READ tasks and memory from 4075 * Setup any BIDI READ tasks and memory from
4152 * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks 4076 * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks
4153 * are queued first for the non pSCSI passthrough case. 4077 * are queued first for the non pSCSI passthrough case.
4154 */ 4078 */
4155 if ((cmd->t_task->t_mem_bidi_list != NULL) && 4079 if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
4156 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { 4080 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4157 rc = transport_generic_get_cdb_count(cmd, 4081 rc = transport_generic_get_cdb_count(cmd,
4158 cmd->t_task->t_task_lba, 4082 cmd->t_task.t_task_lba,
4159 cmd->t_task->t_tasks_sectors, 4083 cmd->t_task.t_tasks_sectors,
4160 DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list, 4084 DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list,
4161 set_counts); 4085 set_counts);
4162 if (!(rc)) { 4086 if (!(rc)) {
4163 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4087 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4168,13 +4092,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4168 set_counts = 0; 4092 set_counts = 0;
4169 } 4093 }
4170 /* 4094 /*
4171 * Setup the tasks and memory from cmd->t_task->t_mem_list 4095 * Setup the tasks and memory from cmd->t_task.t_mem_list
4172 * Note for BIDI transfers this will contain the WRITE payload 4096 * Note for BIDI transfers this will contain the WRITE payload
4173 */ 4097 */
4174 task_cdbs = transport_generic_get_cdb_count(cmd, 4098 task_cdbs = transport_generic_get_cdb_count(cmd,
4175 cmd->t_task->t_task_lba, 4099 cmd->t_task.t_task_lba,
4176 cmd->t_task->t_tasks_sectors, 4100 cmd->t_task.t_tasks_sectors,
4177 cmd->data_direction, cmd->t_task->t_mem_list, 4101 cmd->data_direction, &cmd->t_task.t_mem_list,
4178 set_counts); 4102 set_counts);
4179 if (!(task_cdbs)) { 4103 if (!(task_cdbs)) {
4180 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 4104 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4182,63 +4106,34 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4182 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4106 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4183 return PYX_TRANSPORT_LU_COMM_FAILURE; 4107 return PYX_TRANSPORT_LU_COMM_FAILURE;
4184 } 4108 }
4185 cmd->t_task->t_task_cdbs += task_cdbs; 4109 cmd->t_task.t_task_cdbs += task_cdbs;
4186 4110
4187#if 0 4111#if 0
4188 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" 4112 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4189 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, 4113 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4190 cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, 4114 cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
4191 cmd->t_task->t_task_cdbs); 4115 cmd->t_task.t_task_cdbs);
4192#endif 4116#endif
4193 } 4117 }
4194 4118
4195 atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs); 4119 atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs);
4196 atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs); 4120 atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs);
4197 atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs); 4121 atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs);
4198 return 0; 4122 return 0;
4199} 4123}
4200 4124
4201static struct list_head *transport_init_se_mem_list(void)
4202{
4203 struct list_head *se_mem_list;
4204
4205 se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
4206 if (!(se_mem_list)) {
4207 printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
4208 return NULL;
4209 }
4210 INIT_LIST_HEAD(se_mem_list);
4211
4212 return se_mem_list;
4213}
4214
4215static int 4125static int
4216transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) 4126transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4217{ 4127{
4218 unsigned char *buf; 4128 unsigned char *buf;
4219 struct se_mem *se_mem; 4129 struct se_mem *se_mem;
4220 4130
4221 cmd->t_task->t_mem_list = transport_init_se_mem_list();
4222 if (!(cmd->t_task->t_mem_list))
4223 return -ENOMEM;
4224
4225 /* 4131 /*
4226 * If the device uses memory mapping this is enough. 4132 * If the device uses memory mapping this is enough.
4227 */ 4133 */
4228 if (cmd->se_dev->transport->do_se_mem_map) 4134 if (cmd->se_dev->transport->do_se_mem_map)
4229 return 0; 4135 return 0;
4230 4136
4231 /*
4232 * Setup BIDI-COMMAND READ list of struct se_mem elements
4233 */
4234 if (cmd->t_task->t_tasks_bidi) {
4235 cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list();
4236 if (!(cmd->t_task->t_mem_bidi_list)) {
4237 kfree(cmd->t_task->t_mem_list);
4238 return -ENOMEM;
4239 }
4240 }
4241
4242 while (length) { 4137 while (length) {
4243 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 4138 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4244 if (!(se_mem)) { 4139 if (!(se_mem)) {
@@ -4263,8 +4158,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4263 memset(buf, 0, se_mem->se_len); 4158 memset(buf, 0, se_mem->se_len);
4264 kunmap_atomic(buf, KM_IRQ0); 4159 kunmap_atomic(buf, KM_IRQ0);
4265 4160
4266 list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list); 4161 list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list);
4267 cmd->t_task->t_tasks_se_num++; 4162 cmd->t_task.t_tasks_se_num++;
4268 4163
4269 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" 4164 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4270 " Offset(%u)\n", se_mem->se_page, se_mem->se_len, 4165 " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
@@ -4274,7 +4169,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4274 } 4169 }
4275 4170
4276 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", 4171 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4277 cmd->t_task->t_tasks_se_num); 4172 cmd->t_task.t_tasks_se_num);
4278 4173
4279 return 0; 4174 return 0;
4280out: 4175out:
@@ -4290,7 +4185,7 @@ int transport_init_task_sg(
4290 u32 task_offset) 4185 u32 task_offset)
4291{ 4186{
4292 struct se_cmd *se_cmd = task->task_se_cmd; 4187 struct se_cmd *se_cmd = task->task_se_cmd;
4293 struct se_device *se_dev = se_cmd->se_lun->lun_se_dev; 4188 struct se_device *se_dev = se_cmd->se_dev;
4294 struct se_mem *se_mem = in_se_mem; 4189 struct se_mem *se_mem = in_se_mem;
4295 struct target_core_fabric_ops *tfo = se_cmd->se_tfo; 4190 struct target_core_fabric_ops *tfo = se_cmd->se_tfo;
4296 u32 sg_length, task_size = task->task_size, task_sg_num_padded; 4191 u32 sg_length, task_size = task->task_size, task_sg_num_padded;
@@ -4306,7 +4201,7 @@ int transport_init_task_sg(
4306 sg_length = se_mem->se_len; 4201 sg_length = se_mem->se_len;
4307 4202
4308 if (!(list_is_last(&se_mem->se_list, 4203 if (!(list_is_last(&se_mem->se_list,
4309 se_cmd->t_task->t_mem_list))) 4204 &se_cmd->t_task.t_mem_list)))
4310 se_mem = list_entry(se_mem->se_list.next, 4205 se_mem = list_entry(se_mem->se_list.next,
4311 struct se_mem, se_list); 4206 struct se_mem, se_list);
4312 } else { 4207 } else {
@@ -4326,7 +4221,7 @@ int transport_init_task_sg(
4326 sg_length = (se_mem->se_len - task_offset); 4221 sg_length = (se_mem->se_len - task_offset);
4327 4222
4328 if (!(list_is_last(&se_mem->se_list, 4223 if (!(list_is_last(&se_mem->se_list,
4329 se_cmd->t_task->t_mem_list))) 4224 &se_cmd->t_task.t_mem_list)))
4330 se_mem = list_entry(se_mem->se_list.next, 4225 se_mem = list_entry(se_mem->se_list.next,
4331 struct se_mem, se_list); 4226 struct se_mem, se_list);
4332 } 4227 }
@@ -4367,7 +4262,7 @@ next:
4367 * Setup task->task_sg_bidi for SCSI READ payload for 4262 * Setup task->task_sg_bidi for SCSI READ payload for
4368 * TCM/pSCSI passthrough if present for BIDI-COMMAND 4263 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4369 */ 4264 */
4370 if ((se_cmd->t_task->t_mem_bidi_list != NULL) && 4265 if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) &&
4371 (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { 4266 (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4372 task->task_sg_bidi = kzalloc(task_sg_num_padded * 4267 task->task_sg_bidi = kzalloc(task_sg_num_padded *
4373 sizeof(struct scatterlist), GFP_KERNEL); 4268 sizeof(struct scatterlist), GFP_KERNEL);
@@ -4458,21 +4353,26 @@ static inline int transport_set_tasks_sectors(
4458 max_sectors_set); 4353 max_sectors_set);
4459} 4354}
4460 4355
4356/*
4357 * Convert a sgl into a linked list of se_mems.
4358 */
4461static int transport_map_sg_to_mem( 4359static int transport_map_sg_to_mem(
4462 struct se_cmd *cmd, 4360 struct se_cmd *cmd,
4463 struct list_head *se_mem_list, 4361 struct list_head *se_mem_list,
4464 void *in_mem, 4362 struct scatterlist *sg,
4465 u32 *se_mem_cnt) 4363 u32 *sg_count)
4466{ 4364{
4467 struct se_mem *se_mem; 4365 struct se_mem *se_mem;
4468 struct scatterlist *sg; 4366 u32 cmd_size = cmd->data_length;
4469 u32 sg_count = 1, cmd_size = cmd->data_length;
4470 4367
4471 WARN_ON(!in_mem); 4368 WARN_ON(!sg);
4472
4473 sg = (struct scatterlist *)in_mem;
4474 4369
4475 while (cmd_size) { 4370 while (cmd_size) {
4371 /*
4372 * NOTE: it is safe to return -ENOMEM at any time in creating this
4373 * list because transport_free_pages() will eventually be called, and is
4374 * smart enough to deallocate all list items for sg and sg_bidi lists.
4375 */
4476 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 4376 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4477 if (!(se_mem)) { 4377 if (!(se_mem)) {
4478 printk(KERN_ERR "Unable to allocate struct se_mem\n"); 4378 printk(KERN_ERR "Unable to allocate struct se_mem\n");
@@ -4489,26 +4389,21 @@ static int transport_map_sg_to_mem(
4489 if (cmd_size > sg->length) { 4389 if (cmd_size > sg->length) {
4490 se_mem->se_len = sg->length; 4390 se_mem->se_len = sg->length;
4491 sg = sg_next(sg); 4391 sg = sg_next(sg);
4492 sg_count++;
4493 } else 4392 } else
4494 se_mem->se_len = cmd_size; 4393 se_mem->se_len = cmd_size;
4495 4394
4496 cmd_size -= se_mem->se_len; 4395 cmd_size -= se_mem->se_len;
4396 (*sg_count)++;
4497 4397
4498 DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", 4398 DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n",
4499 *se_mem_cnt, cmd_size); 4399 sg_count, cmd_size);
4500 DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", 4400 DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
4501 se_mem->se_page, se_mem->se_off, se_mem->se_len); 4401 se_mem->se_page, se_mem->se_off, se_mem->se_len);
4502 4402
4503 list_add_tail(&se_mem->se_list, se_mem_list); 4403 list_add_tail(&se_mem->se_list, se_mem_list);
4504 (*se_mem_cnt)++;
4505 } 4404 }
4506 4405
4507 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" 4406 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count);
4508 " struct se_mem\n", sg_count, *se_mem_cnt);
4509
4510 if (sg_count != *se_mem_cnt)
4511 BUG();
4512 4407
4513 return 0; 4408 return 0;
4514} 4409}
@@ -4551,7 +4446,7 @@ int transport_map_mem_to_sg(
4551 sg->length = se_mem->se_len; 4446 sg->length = se_mem->se_len;
4552 4447
4553 if (!(list_is_last(&se_mem->se_list, 4448 if (!(list_is_last(&se_mem->se_list,
4554 se_cmd->t_task->t_mem_list))) { 4449 &se_cmd->t_task.t_mem_list))) {
4555 se_mem = list_entry(se_mem->se_list.next, 4450 se_mem = list_entry(se_mem->se_list.next,
4556 struct se_mem, se_list); 4451 struct se_mem, se_list);
4557 (*se_mem_cnt)++; 4452 (*se_mem_cnt)++;
@@ -4587,7 +4482,7 @@ int transport_map_mem_to_sg(
4587 sg->length = (se_mem->se_len - *task_offset); 4482 sg->length = (se_mem->se_len - *task_offset);
4588 4483
4589 if (!(list_is_last(&se_mem->se_list, 4484 if (!(list_is_last(&se_mem->se_list,
4590 se_cmd->t_task->t_mem_list))) { 4485 &se_cmd->t_task.t_mem_list))) {
4591 se_mem = list_entry(se_mem->se_list.next, 4486 se_mem = list_entry(se_mem->se_list.next,
4592 struct se_mem, se_list); 4487 struct se_mem, se_list);
4593 (*se_mem_cnt)++; 4488 (*se_mem_cnt)++;
@@ -4645,7 +4540,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4645 * Walk the struct se_task list and setup scatterlist chains 4540 * Walk the struct se_task list and setup scatterlist chains
4646 * for each contiguosly allocated struct se_task->task_sg[]. 4541 * for each contiguosly allocated struct se_task->task_sg[].
4647 */ 4542 */
4648 list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { 4543 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
4649 if (!(task->task_sg) || !(task->task_padded_sg)) 4544 if (!(task->task_sg) || !(task->task_padded_sg))
4650 continue; 4545 continue;
4651 4546
@@ -4656,7 +4551,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4656 * Either add chain or mark end of scatterlist 4551 * Either add chain or mark end of scatterlist
4657 */ 4552 */
4658 if (!(list_is_last(&task->t_list, 4553 if (!(list_is_last(&task->t_list,
4659 &cmd->t_task->t_task_list))) { 4554 &cmd->t_task.t_task_list))) {
4660 /* 4555 /*
4661 * Clear existing SGL termination bit set in 4556 * Clear existing SGL termination bit set in
4662 * transport_init_task_sg(), see sg_mark_end() 4557 * transport_init_task_sg(), see sg_mark_end()
@@ -4682,7 +4577,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4682 /* 4577 /*
4683 * Check for single task.. 4578 * Check for single task..
4684 */ 4579 */
4685 if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) { 4580 if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) {
4686 /* 4581 /*
4687 * Clear existing SGL termination bit set in 4582 * Clear existing SGL termination bit set in
4688 * transport_init_task_sg(), see sg_mark_end() 4583 * transport_init_task_sg(), see sg_mark_end()
@@ -4700,18 +4595,18 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4700 * Setup the starting pointer and total t_tasks_sg_linked_no including 4595 * Setup the starting pointer and total t_tasks_sg_linked_no including
4701 * padding SGs for linking and to mark the end. 4596 * padding SGs for linking and to mark the end.
4702 */ 4597 */
4703 cmd->t_task->t_tasks_sg_chained = sg_first; 4598 cmd->t_task.t_tasks_sg_chained = sg_first;
4704 cmd->t_task->t_tasks_sg_chained_no = sg_count; 4599 cmd->t_task.t_tasks_sg_chained_no = sg_count;
4705 4600
4706 DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and" 4601 DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and"
4707 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained, 4602 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained,
4708 cmd->t_task->t_tasks_sg_chained_no); 4603 cmd->t_task.t_tasks_sg_chained_no);
4709 4604
4710 for_each_sg(cmd->t_task->t_tasks_sg_chained, sg, 4605 for_each_sg(cmd->t_task.t_tasks_sg_chained, sg,
4711 cmd->t_task->t_tasks_sg_chained_no, i) { 4606 cmd->t_task.t_tasks_sg_chained_no, i) {
4712 4607
4713 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", 4608 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
4714 i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); 4609 i, sg, sg_page(sg), sg->length, sg->offset);
4715 if (sg_is_chain(sg)) 4610 if (sg_is_chain(sg))
4716 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4611 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4717 if (sg_is_last(sg)) 4612 if (sg_is_last(sg))
@@ -4741,7 +4636,7 @@ static int transport_do_se_mem_map(
4741 in_mem, in_se_mem, out_se_mem, se_mem_cnt, 4636 in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4742 task_offset_in); 4637 task_offset_in);
4743 if (ret == 0) 4638 if (ret == 0)
4744 task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; 4639 task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
4745 4640
4746 return ret; 4641 return ret;
4747 } 4642 }
@@ -4791,7 +4686,7 @@ static u32 transport_generic_get_cdb_count(
4791 struct se_task *task; 4686 struct se_task *task;
4792 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 4687 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4793 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; 4688 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
4794 struct se_device *dev = cmd->se_lun->lun_se_dev; 4689 struct se_device *dev = cmd->se_dev;
4795 int max_sectors_set = 0, ret; 4690 int max_sectors_set = 0, ret;
4796 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; 4691 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
4797 4692
@@ -4805,15 +4700,14 @@ static u32 transport_generic_get_cdb_count(
4805 * mem_list will ever be empty at this point. 4700 * mem_list will ever be empty at this point.
4806 */ 4701 */
4807 if (!(list_empty(mem_list))) 4702 if (!(list_empty(mem_list)))
4808 se_mem = list_entry(mem_list->next, struct se_mem, se_list); 4703 se_mem = list_first_entry(mem_list, struct se_mem, se_list);
4809 /* 4704 /*
4810 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to 4705 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4811 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation 4706 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4812 */ 4707 */
4813 if ((cmd->t_task->t_mem_bidi_list != NULL) && 4708 if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
4814 !(list_empty(cmd->t_task->t_mem_bidi_list)) &&
4815 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) 4709 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4816 se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next, 4710 se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list,
4817 struct se_mem, se_list); 4711 struct se_mem, se_list);
4818 4712
4819 while (sectors) { 4713 while (sectors) {
@@ -4836,15 +4730,15 @@ static u32 transport_generic_get_cdb_count(
4836 4730
4837 cdb = dev->transport->get_cdb(task); 4731 cdb = dev->transport->get_cdb(task);
4838 if ((cdb)) { 4732 if ((cdb)) {
4839 memcpy(cdb, cmd->t_task->t_task_cdb, 4733 memcpy(cdb, cmd->t_task.t_task_cdb,
4840 scsi_command_size(cmd->t_task->t_task_cdb)); 4734 scsi_command_size(cmd->t_task.t_task_cdb));
4841 cmd->transport_split_cdb(task->task_lba, 4735 cmd->transport_split_cdb(task->task_lba,
4842 &task->task_sectors, cdb); 4736 &task->task_sectors, cdb);
4843 } 4737 }
4844 4738
4845 /* 4739 /*
4846 * Perform the SE OBJ plugin and/or Transport plugin specific 4740 * Perform the SE OBJ plugin and/or Transport plugin specific
4847 * mapping for cmd->t_task->t_mem_list. And setup the 4741 * mapping for cmd->t_task.t_mem_list. And setup the
4848 * task->task_sg and if necessary task->task_sg_bidi 4742 * task->task_sg and if necessary task->task_sg_bidi
4849 */ 4743 */
4850 ret = transport_do_se_mem_map(dev, task, mem_list, 4744 ret = transport_do_se_mem_map(dev, task, mem_list,
@@ -4855,7 +4749,7 @@ static u32 transport_generic_get_cdb_count(
4855 4749
4856 se_mem = se_mem_lout; 4750 se_mem = se_mem_lout;
4857 /* 4751 /*
4858 * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi 4752 * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi
4859 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI 4753 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4860 * 4754 *
4861 * Note that the first call to transport_do_se_mem_map() above will 4755 * Note that the first call to transport_do_se_mem_map() above will
@@ -4865,7 +4759,7 @@ static u32 transport_generic_get_cdb_count(
4865 */ 4759 */
4866 if (task->task_sg_bidi != NULL) { 4760 if (task->task_sg_bidi != NULL) {
4867 ret = transport_do_se_mem_map(dev, task, 4761 ret = transport_do_se_mem_map(dev, task,
4868 cmd->t_task->t_mem_bidi_list, NULL, 4762 &cmd->t_task.t_mem_bidi_list, NULL,
4869 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, 4763 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4870 &task_offset_in); 4764 &task_offset_in);
4871 if (ret < 0) 4765 if (ret < 0)
@@ -4888,8 +4782,8 @@ static u32 transport_generic_get_cdb_count(
4888 } 4782 }
4889 4783
4890 if (set_counts) { 4784 if (set_counts) {
4891 atomic_inc(&cmd->t_task->t_fe_count); 4785 atomic_inc(&cmd->t_task.t_fe_count);
4892 atomic_inc(&cmd->t_task->t_se_count); 4786 atomic_inc(&cmd->t_task.t_se_count);
4893 } 4787 }
4894 4788
4895 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", 4789 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
@@ -4904,7 +4798,7 @@ out:
4904static int 4798static int
4905transport_map_control_cmd_to_task(struct se_cmd *cmd) 4799transport_map_control_cmd_to_task(struct se_cmd *cmd)
4906{ 4800{
4907 struct se_device *dev = cmd->se_lun->lun_se_dev; 4801 struct se_device *dev = cmd->se_dev;
4908 unsigned char *cdb; 4802 unsigned char *cdb;
4909 struct se_task *task; 4803 struct se_task *task;
4910 int ret; 4804 int ret;
@@ -4915,26 +4809,26 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
4915 4809
4916 cdb = dev->transport->get_cdb(task); 4810 cdb = dev->transport->get_cdb(task);
4917 if (cdb) 4811 if (cdb)
4918 memcpy(cdb, cmd->t_task->t_task_cdb, 4812 memcpy(cdb, cmd->t_task.t_task_cdb,
4919 scsi_command_size(cmd->t_task->t_task_cdb)); 4813 scsi_command_size(cmd->t_task.t_task_cdb));
4920 4814
4921 task->task_size = cmd->data_length; 4815 task->task_size = cmd->data_length;
4922 task->task_sg_num = 4816 task->task_sg_num =
4923 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; 4817 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
4924 4818
4925 atomic_inc(&cmd->t_task->t_fe_count); 4819 atomic_inc(&cmd->t_task.t_fe_count);
4926 atomic_inc(&cmd->t_task->t_se_count); 4820 atomic_inc(&cmd->t_task.t_se_count);
4927 4821
4928 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4822 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4929 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 4823 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4930 u32 se_mem_cnt = 0, task_offset = 0; 4824 u32 se_mem_cnt = 0, task_offset = 0;
4931 4825
4932 if (!list_empty(cmd->t_task->t_mem_list)) 4826 if (!list_empty(&cmd->t_task.t_mem_list))
4933 se_mem = list_entry(cmd->t_task->t_mem_list->next, 4827 se_mem = list_first_entry(&cmd->t_task.t_mem_list,
4934 struct se_mem, se_list); 4828 struct se_mem, se_list);
4935 4829
4936 ret = transport_do_se_mem_map(dev, task, 4830 ret = transport_do_se_mem_map(dev, task,
4937 cmd->t_task->t_mem_list, NULL, se_mem, 4831 &cmd->t_task.t_mem_list, NULL, se_mem,
4938 &se_mem_lout, &se_mem_cnt, &task_offset); 4832 &se_mem_lout, &se_mem_cnt, &task_offset);
4939 if (ret < 0) 4833 if (ret < 0)
4940 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 4834 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
@@ -4969,14 +4863,14 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
4969{ 4863{
4970 struct se_portal_group *se_tpg; 4864 struct se_portal_group *se_tpg;
4971 struct se_task *task; 4865 struct se_task *task;
4972 struct se_device *dev = cmd->se_lun->lun_se_dev; 4866 struct se_device *dev = cmd->se_dev;
4973 int ret = 0; 4867 int ret = 0;
4974 4868
4975 /* 4869 /*
4976 * Determine is the TCM fabric module has already allocated physical 4870 * Determine is the TCM fabric module has already allocated physical
4977 * memory, and is directly calling transport_generic_map_mem_to_cmd() 4871 * memory, and is directly calling transport_generic_map_mem_to_cmd()
4978 * to setup beforehand the linked list of physical memory at 4872 * to setup beforehand the linked list of physical memory at
4979 * cmd->t_task->t_mem_list of struct se_mem->se_page 4873 * cmd->t_task.t_mem_list of struct se_mem->se_page
4980 */ 4874 */
4981 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 4875 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
4982 ret = transport_allocate_resources(cmd); 4876 ret = transport_allocate_resources(cmd);
@@ -5005,7 +4899,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
5005 } 4899 }
5006 4900
5007 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 4901 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
5008 list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { 4902 list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
5009 if (atomic_read(&task->task_sent)) 4903 if (atomic_read(&task->task_sent))
5010 continue; 4904 continue;
5011 if (!dev->transport->map_task_SG) 4905 if (!dev->transport->map_task_SG)
@@ -5052,9 +4946,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
5052 * original EDTL 4946 * original EDTL
5053 */ 4947 */
5054 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 4948 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
5055 if (!cmd->t_task->t_tasks_se_num) { 4949 if (!cmd->t_task.t_tasks_se_num) {
5056 unsigned char *dst, *buf = 4950 unsigned char *dst, *buf =
5057 (unsigned char *)cmd->t_task->t_task_buf; 4951 (unsigned char *)cmd->t_task.t_task_buf;
5058 4952
5059 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); 4953 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
5060 if (!(dst)) { 4954 if (!(dst)) {
@@ -5066,15 +4960,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
5066 } 4960 }
5067 memcpy(dst, buf, cmd->cmd_spdtl); 4961 memcpy(dst, buf, cmd->cmd_spdtl);
5068 4962
5069 kfree(cmd->t_task->t_task_buf); 4963 kfree(cmd->t_task.t_task_buf);
5070 cmd->t_task->t_task_buf = dst; 4964 cmd->t_task.t_task_buf = dst;
5071 } else { 4965 } else {
5072 struct scatterlist *sg = 4966 struct scatterlist *sg =
5073 (struct scatterlist *sg)cmd->t_task->t_task_buf; 4967 (struct scatterlist *sg)cmd->t_task.t_task_buf;
5074 struct scatterlist *orig_sg; 4968 struct scatterlist *orig_sg;
5075 4969
5076 orig_sg = kzalloc(sizeof(struct scatterlist) * 4970 orig_sg = kzalloc(sizeof(struct scatterlist) *
5077 cmd->t_task->t_tasks_se_num, 4971 cmd->t_task.t_tasks_se_num,
5078 GFP_KERNEL))) { 4972 GFP_KERNEL))) {
5079 if (!(orig_sg)) { 4973 if (!(orig_sg)) {
5080 printk(KERN_ERR "Unable to allocate memory" 4974 printk(KERN_ERR "Unable to allocate memory"
@@ -5084,9 +4978,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
5084 return; 4978 return;
5085 } 4979 }
5086 4980
5087 memcpy(orig_sg, cmd->t_task->t_task_buf, 4981 memcpy(orig_sg, cmd->t_task.t_task_buf,
5088 sizeof(struct scatterlist) * 4982 sizeof(struct scatterlist) *
5089 cmd->t_task->t_tasks_se_num); 4983 cmd->t_task.t_tasks_se_num);
5090 4984
5091 cmd->data_length = cmd->cmd_spdtl; 4985 cmd->data_length = cmd->cmd_spdtl;
5092 /* 4986 /*
@@ -5117,22 +5011,22 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
5117 unsigned long flags; 5011 unsigned long flags;
5118 int ret; 5012 int ret;
5119 5013
5120 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5014 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5121 cmd->t_state = TRANSPORT_WRITE_PENDING; 5015 cmd->t_state = TRANSPORT_WRITE_PENDING;
5122 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5016 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5123 /* 5017 /*
5124 * For the TCM control CDBs using a contiguous buffer, do the memcpy 5018 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5125 * from the passed Linux/SCSI struct scatterlist located at 5019 * from the passed Linux/SCSI struct scatterlist located at
5126 * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at 5020 * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at
5127 * se_cmd->t_task->t_task_buf. 5021 * se_cmd->t_task.t_task_buf.
5128 */ 5022 */
5129 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) 5023 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5130 transport_memcpy_read_contig(cmd, 5024 transport_memcpy_read_contig(cmd,
5131 cmd->t_task->t_task_buf, 5025 cmd->t_task.t_task_buf,
5132 cmd->t_task->t_task_pt_sgl); 5026 cmd->t_task.t_task_pt_sgl);
5133 /* 5027 /*
5134 * Clear the se_cmd for WRITE_PENDING status in order to set 5028 * Clear the se_cmd for WRITE_PENDING status in order to set
5135 * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data 5029 * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data
5136 * can be called from HW target mode interrupt code. This is safe 5030 * can be called from HW target mode interrupt code. This is safe
5137 * to be called with transport_off=1 before the cmd->se_tfo->write_pending 5031 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
5138 * because the se_cmd->se_lun pointer is not being cleared. 5032 * because the se_cmd->se_lun pointer is not being cleared.
@@ -5156,7 +5050,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
5156 */ 5050 */
5157void transport_release_cmd_to_pool(struct se_cmd *cmd) 5051void transport_release_cmd_to_pool(struct se_cmd *cmd)
5158{ 5052{
5159 BUG_ON(!cmd->t_task);
5160 BUG_ON(!cmd->se_tfo); 5053 BUG_ON(!cmd->se_tfo);
5161 5054
5162 transport_free_se_cmd(cmd); 5055 transport_free_se_cmd(cmd);
@@ -5174,7 +5067,7 @@ void transport_generic_free_cmd(
5174 int release_to_pool, 5067 int release_to_pool,
5175 int session_reinstatement) 5068 int session_reinstatement)
5176{ 5069{
5177 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task) 5070 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
5178 transport_release_cmd_to_pool(cmd); 5071 transport_release_cmd_to_pool(cmd);
5179 else { 5072 else {
5180 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); 5073 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
@@ -5220,32 +5113,32 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5220 * If the frontend has already requested this struct se_cmd to 5113 * If the frontend has already requested this struct se_cmd to
5221 * be stopped, we can safely ignore this struct se_cmd. 5114 * be stopped, we can safely ignore this struct se_cmd.
5222 */ 5115 */
5223 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5116 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5224 if (atomic_read(&cmd->t_task->t_transport_stop)) { 5117 if (atomic_read(&cmd->t_task.t_transport_stop)) {
5225 atomic_set(&cmd->t_task->transport_lun_stop, 0); 5118 atomic_set(&cmd->t_task.transport_lun_stop, 0);
5226 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" 5119 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5227 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 5120 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
5228 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5121 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5229 transport_cmd_check_stop(cmd, 1, 0); 5122 transport_cmd_check_stop(cmd, 1, 0);
5230 return -EPERM; 5123 return -EPERM;
5231 } 5124 }
5232 atomic_set(&cmd->t_task->transport_lun_fe_stop, 1); 5125 atomic_set(&cmd->t_task.transport_lun_fe_stop, 1);
5233 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5126 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5234 5127
5235 wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); 5128 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
5236 5129
5237 ret = transport_stop_tasks_for_cmd(cmd); 5130 ret = transport_stop_tasks_for_cmd(cmd);
5238 5131
5239 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" 5132 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5240 " %d\n", cmd, cmd->t_task->t_task_cdbs, ret); 5133 " %d\n", cmd, cmd->t_task.t_task_cdbs, ret);
5241 if (!ret) { 5134 if (!ret) {
5242 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 5135 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5243 cmd->se_tfo->get_task_tag(cmd)); 5136 cmd->se_tfo->get_task_tag(cmd));
5244 wait_for_completion(&cmd->t_task->transport_lun_stop_comp); 5137 wait_for_completion(&cmd->t_task.transport_lun_stop_comp);
5245 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 5138 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5246 cmd->se_tfo->get_task_tag(cmd)); 5139 cmd->se_tfo->get_task_tag(cmd));
5247 } 5140 }
5248 transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); 5141 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
5249 5142
5250 return 0; 5143 return 0;
5251} 5144}
@@ -5266,31 +5159,24 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5266 * Initiator Port. 5159 * Initiator Port.
5267 */ 5160 */
5268 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 5161 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5269 while (!list_empty_careful(&lun->lun_cmd_list)) { 5162 while (!list_empty(&lun->lun_cmd_list)) {
5270 cmd = list_entry(lun->lun_cmd_list.next, 5163 cmd = list_first_entry(&lun->lun_cmd_list,
5271 struct se_cmd, se_lun_list); 5164 struct se_cmd, se_lun_node);
5272 list_del(&cmd->se_lun_list); 5165 list_del(&cmd->se_lun_node);
5273 5166
5274 if (!(cmd->t_task)) { 5167 atomic_set(&cmd->t_task.transport_lun_active, 0);
5275 printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL"
5276 "[i,t]_state: %u/%u\n",
5277 cmd->se_tfo->get_task_tag(cmd),
5278 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
5279 BUG();
5280 }
5281 atomic_set(&cmd->t_task->transport_lun_active, 0);
5282 /* 5168 /*
5283 * This will notify iscsi_target_transport.c: 5169 * This will notify iscsi_target_transport.c:
5284 * transport_cmd_check_stop() that a LUN shutdown is in 5170 * transport_cmd_check_stop() that a LUN shutdown is in
5285 * progress for the iscsi_cmd_t. 5171 * progress for the iscsi_cmd_t.
5286 */ 5172 */
5287 spin_lock(&cmd->t_task->t_state_lock); 5173 spin_lock(&cmd->t_task.t_state_lock);
5288 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport" 5174 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport"
5289 "_lun_stop for ITT: 0x%08x\n", 5175 "_lun_stop for ITT: 0x%08x\n",
5290 cmd->se_lun->unpacked_lun, 5176 cmd->se_lun->unpacked_lun,
5291 cmd->se_tfo->get_task_tag(cmd)); 5177 cmd->se_tfo->get_task_tag(cmd));
5292 atomic_set(&cmd->t_task->transport_lun_stop, 1); 5178 atomic_set(&cmd->t_task.transport_lun_stop, 1);
5293 spin_unlock(&cmd->t_task->t_state_lock); 5179 spin_unlock(&cmd->t_task.t_state_lock);
5294 5180
5295 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 5181 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5296 5182
@@ -5318,14 +5204,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5318 cmd->se_lun->unpacked_lun, 5204 cmd->se_lun->unpacked_lun,
5319 cmd->se_tfo->get_task_tag(cmd)); 5205 cmd->se_tfo->get_task_tag(cmd));
5320 5206
5321 spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); 5207 spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
5322 if (!(atomic_read(&cmd->t_task->transport_dev_active))) { 5208 if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
5323 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); 5209 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
5324 goto check_cond; 5210 goto check_cond;
5325 } 5211 }
5326 atomic_set(&cmd->t_task->transport_dev_active, 0); 5212 atomic_set(&cmd->t_task.transport_dev_active, 0);
5327 transport_all_task_dev_remove_state(cmd); 5213 transport_all_task_dev_remove_state(cmd);
5328 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); 5214 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
5329 5215
5330 transport_free_dev_tasks(cmd); 5216 transport_free_dev_tasks(cmd);
5331 /* 5217 /*
@@ -5342,24 +5228,24 @@ check_cond:
5342 * be released, notify the waiting thread now that LU has 5228 * be released, notify the waiting thread now that LU has
5343 * finished accessing it. 5229 * finished accessing it.
5344 */ 5230 */
5345 spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); 5231 spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
5346 if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) { 5232 if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) {
5347 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" 5233 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5348 " struct se_cmd: %p ITT: 0x%08x\n", 5234 " struct se_cmd: %p ITT: 0x%08x\n",
5349 lun->unpacked_lun, 5235 lun->unpacked_lun,
5350 cmd, cmd->se_tfo->get_task_tag(cmd)); 5236 cmd, cmd->se_tfo->get_task_tag(cmd));
5351 5237
5352 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, 5238 spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
5353 cmd_flags); 5239 cmd_flags);
5354 transport_cmd_check_stop(cmd, 1, 0); 5240 transport_cmd_check_stop(cmd, 1, 0);
5355 complete(&cmd->t_task->transport_lun_fe_stop_comp); 5241 complete(&cmd->t_task.transport_lun_fe_stop_comp);
5356 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 5242 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5357 continue; 5243 continue;
5358 } 5244 }
5359 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 5245 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5360 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 5246 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
5361 5247
5362 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); 5248 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
5363 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 5249 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5364 } 5250 }
5365 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 5251 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5379,7 +5265,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
5379{ 5265{
5380 struct task_struct *kt; 5266 struct task_struct *kt;
5381 5267
5382 kt = kthread_run(transport_clear_lun_thread, (void *)lun, 5268 kt = kthread_run(transport_clear_lun_thread, lun,
5383 "tcm_cl_%u", lun->unpacked_lun); 5269 "tcm_cl_%u", lun->unpacked_lun);
5384 if (IS_ERR(kt)) { 5270 if (IS_ERR(kt)) {
5385 printk(KERN_ERR "Unable to start clear_lun thread\n"); 5271 printk(KERN_ERR "Unable to start clear_lun thread\n");
@@ -5405,15 +5291,15 @@ static void transport_generic_wait_for_tasks(
5405 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) 5291 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5406 return; 5292 return;
5407 5293
5408 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5294 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5409 /* 5295 /*
5410 * If we are already stopped due to an external event (ie: LUN shutdown) 5296 * If we are already stopped due to an external event (ie: LUN shutdown)
5411 * sleep until the connection can have the passed struct se_cmd back. 5297 * sleep until the connection can have the passed struct se_cmd back.
5412 * The cmd->t_task->transport_lun_stopped_sem will be upped by 5298 * The cmd->t_task.transport_lun_stopped_sem will be upped by
5413 * transport_clear_lun_from_sessions() once the ConfigFS context caller 5299 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5414 * has completed its operation on the struct se_cmd. 5300 * has completed its operation on the struct se_cmd.
5415 */ 5301 */
5416 if (atomic_read(&cmd->t_task->transport_lun_stop)) { 5302 if (atomic_read(&cmd->t_task.transport_lun_stop)) {
5417 5303
5418 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" 5304 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5419 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 5305 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
@@ -5426,10 +5312,10 @@ static void transport_generic_wait_for_tasks(
5426 * We go ahead and up transport_lun_stop_comp just to be sure 5312 * We go ahead and up transport_lun_stop_comp just to be sure
5427 * here. 5313 * here.
5428 */ 5314 */
5429 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5315 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5430 complete(&cmd->t_task->transport_lun_stop_comp); 5316 complete(&cmd->t_task.transport_lun_stop_comp);
5431 wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp); 5317 wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp);
5432 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5318 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5433 5319
5434 transport_all_task_dev_remove_state(cmd); 5320 transport_all_task_dev_remove_state(cmd);
5435 /* 5321 /*
@@ -5442,13 +5328,13 @@ static void transport_generic_wait_for_tasks(
5442 "stop_comp); for ITT: 0x%08x\n", 5328 "stop_comp); for ITT: 0x%08x\n",
5443 cmd->se_tfo->get_task_tag(cmd)); 5329 cmd->se_tfo->get_task_tag(cmd));
5444 5330
5445 atomic_set(&cmd->t_task->transport_lun_stop, 0); 5331 atomic_set(&cmd->t_task.transport_lun_stop, 0);
5446 } 5332 }
5447 if (!atomic_read(&cmd->t_task->t_transport_active) || 5333 if (!atomic_read(&cmd->t_task.t_transport_active) ||
5448 atomic_read(&cmd->t_task->t_transport_aborted)) 5334 atomic_read(&cmd->t_task.t_transport_aborted))
5449 goto remove; 5335 goto remove;
5450 5336
5451 atomic_set(&cmd->t_task->t_transport_stop, 1); 5337 atomic_set(&cmd->t_task.t_transport_stop, 1);
5452 5338
5453 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" 5339 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5454 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" 5340 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
@@ -5456,21 +5342,21 @@ static void transport_generic_wait_for_tasks(
5456 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 5342 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
5457 cmd->deferred_t_state); 5343 cmd->deferred_t_state);
5458 5344
5459 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5345 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5460 5346
5461 wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); 5347 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
5462 5348
5463 wait_for_completion(&cmd->t_task->t_transport_stop_comp); 5349 wait_for_completion(&cmd->t_task.t_transport_stop_comp);
5464 5350
5465 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5351 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5466 atomic_set(&cmd->t_task->t_transport_active, 0); 5352 atomic_set(&cmd->t_task.t_transport_active, 0);
5467 atomic_set(&cmd->t_task->t_transport_stop, 0); 5353 atomic_set(&cmd->t_task.t_transport_stop, 0);
5468 5354
5469 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" 5355 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5470 "&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n", 5356 "&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n",
5471 cmd->se_tfo->get_task_tag(cmd)); 5357 cmd->se_tfo->get_task_tag(cmd));
5472remove: 5358remove:
5473 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5359 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5474 if (!remove_cmd) 5360 if (!remove_cmd)
5475 return; 5361 return;
5476 5362
@@ -5509,13 +5395,13 @@ int transport_send_check_condition_and_sense(
5509 int offset; 5395 int offset;
5510 u8 asc = 0, ascq = 0; 5396 u8 asc = 0, ascq = 0;
5511 5397
5512 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5398 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5513 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 5399 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5514 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5400 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5515 return 0; 5401 return 0;
5516 } 5402 }
5517 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 5403 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5518 spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); 5404 spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
5519 5405
5520 if (!reason && from_transport) 5406 if (!reason && from_transport)
5521 goto after_reason; 5407 goto after_reason;
@@ -5674,14 +5560,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5674{ 5560{
5675 int ret = 0; 5561 int ret = 0;
5676 5562
5677 if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) { 5563 if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) {
5678 if (!(send_status) || 5564 if (!(send_status) ||
5679 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 5565 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5680 return 1; 5566 return 1;
5681#if 0 5567#if 0
5682 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" 5568 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5683 " status for CDB: 0x%02x ITT: 0x%08x\n", 5569 " status for CDB: 0x%02x ITT: 0x%08x\n",
5684 cmd->t_task->t_task_cdb[0], 5570 cmd->t_task.t_task_cdb[0],
5685 cmd->se_tfo->get_task_tag(cmd)); 5571 cmd->se_tfo->get_task_tag(cmd));
5686#endif 5572#endif
5687 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 5573 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
@@ -5702,7 +5588,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5702 */ 5588 */
5703 if (cmd->data_direction == DMA_TO_DEVICE) { 5589 if (cmd->data_direction == DMA_TO_DEVICE) {
5704 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 5590 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
5705 atomic_inc(&cmd->t_task->t_transport_aborted); 5591 atomic_inc(&cmd->t_task.t_transport_aborted);
5706 smp_mb__after_atomic_inc(); 5592 smp_mb__after_atomic_inc();
5707 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 5593 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5708 transport_new_cmd_failure(cmd); 5594 transport_new_cmd_failure(cmd);
@@ -5712,7 +5598,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5712 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 5598 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5713#if 0 5599#if 0
5714 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 5600 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5715 " ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0], 5601 " ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0],
5716 cmd->se_tfo->get_task_tag(cmd)); 5602 cmd->se_tfo->get_task_tag(cmd));
5717#endif 5603#endif
5718 cmd->se_tfo->queue_status(cmd); 5604 cmd->se_tfo->queue_status(cmd);
@@ -5725,7 +5611,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5725int transport_generic_do_tmr(struct se_cmd *cmd) 5611int transport_generic_do_tmr(struct se_cmd *cmd)
5726{ 5612{
5727 struct se_cmd *ref_cmd; 5613 struct se_cmd *ref_cmd;
5728 struct se_device *dev = cmd->se_lun->lun_se_dev; 5614 struct se_device *dev = cmd->se_dev;
5729 struct se_tmr_req *tmr = cmd->se_tmr_req; 5615 struct se_tmr_req *tmr = cmd->se_tmr_req;
5730 int ret; 5616 int ret;
5731 5617
@@ -5788,9 +5674,7 @@ transport_get_task_from_state_list(struct se_device *dev)
5788static void transport_processing_shutdown(struct se_device *dev) 5674static void transport_processing_shutdown(struct se_device *dev)
5789{ 5675{
5790 struct se_cmd *cmd; 5676 struct se_cmd *cmd;
5791 struct se_queue_req *qr;
5792 struct se_task *task; 5677 struct se_task *task;
5793 u8 state;
5794 unsigned long flags; 5678 unsigned long flags;
5795 /* 5679 /*
5796 * Empty the struct se_device's struct se_task state list. 5680 * Empty the struct se_device's struct se_task state list.
@@ -5803,15 +5687,9 @@ static void transport_processing_shutdown(struct se_device *dev)
5803 } 5687 }
5804 cmd = task->task_se_cmd; 5688 cmd = task->task_se_cmd;
5805 5689
5806 if (!cmd->t_task) {
5807 printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:"
5808 " %p ITT: 0x%08x\n", task, cmd,
5809 cmd->se_tfo->get_task_tag(cmd));
5810 continue;
5811 }
5812 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 5690 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5813 5691
5814 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5692 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5815 5693
5816 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," 5694 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5817 " i_state/def_i_state: %d/%d, t_state/def_t_state:" 5695 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
@@ -5819,22 +5697,22 @@ static void transport_processing_shutdown(struct se_device *dev)
5819 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, 5697 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
5820 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, 5698 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
5821 cmd->t_state, cmd->deferred_t_state, 5699 cmd->t_state, cmd->deferred_t_state,
5822 cmd->t_task->t_task_cdb[0]); 5700 cmd->t_task.t_task_cdb[0]);
5823 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" 5701 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5824 " %d t_task_cdbs_sent: %d -- t_transport_active: %d" 5702 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5825 " t_transport_stop: %d t_transport_sent: %d\n", 5703 " t_transport_stop: %d t_transport_sent: %d\n",
5826 cmd->se_tfo->get_task_tag(cmd), 5704 cmd->se_tfo->get_task_tag(cmd),
5827 cmd->t_task->t_task_cdbs, 5705 cmd->t_task.t_task_cdbs,
5828 atomic_read(&cmd->t_task->t_task_cdbs_left), 5706 atomic_read(&cmd->t_task.t_task_cdbs_left),
5829 atomic_read(&cmd->t_task->t_task_cdbs_sent), 5707 atomic_read(&cmd->t_task.t_task_cdbs_sent),
5830 atomic_read(&cmd->t_task->t_transport_active), 5708 atomic_read(&cmd->t_task.t_transport_active),
5831 atomic_read(&cmd->t_task->t_transport_stop), 5709 atomic_read(&cmd->t_task.t_transport_stop),
5832 atomic_read(&cmd->t_task->t_transport_sent)); 5710 atomic_read(&cmd->t_task.t_transport_sent));
5833 5711
5834 if (atomic_read(&task->task_active)) { 5712 if (atomic_read(&task->task_active)) {
5835 atomic_set(&task->task_stop, 1); 5713 atomic_set(&task->task_stop, 1);
5836 spin_unlock_irqrestore( 5714 spin_unlock_irqrestore(
5837 &cmd->t_task->t_state_lock, flags); 5715 &cmd->t_task.t_state_lock, flags);
5838 5716
5839 DEBUG_DO("Waiting for task: %p to shutdown for dev:" 5717 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5840 " %p\n", task, dev); 5718 " %p\n", task, dev);
@@ -5842,8 +5720,8 @@ static void transport_processing_shutdown(struct se_device *dev)
5842 DEBUG_DO("Completed task: %p shutdown for dev: %p\n", 5720 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5843 task, dev); 5721 task, dev);
5844 5722
5845 spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); 5723 spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
5846 atomic_dec(&cmd->t_task->t_task_cdbs_left); 5724 atomic_dec(&cmd->t_task.t_task_cdbs_left);
5847 5725
5848 atomic_set(&task->task_active, 0); 5726 atomic_set(&task->task_active, 0);
5849 atomic_set(&task->task_stop, 0); 5727 atomic_set(&task->task_stop, 0);
@@ -5853,39 +5731,39 @@ static void transport_processing_shutdown(struct se_device *dev)
5853 } 5731 }
5854 __transport_stop_task_timer(task, &flags); 5732 __transport_stop_task_timer(task, &flags);
5855 5733
5856 if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { 5734 if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
5857 spin_unlock_irqrestore( 5735 spin_unlock_irqrestore(
5858 &cmd->t_task->t_state_lock, flags); 5736 &cmd->t_task.t_state_lock, flags);
5859 5737
5860 DEBUG_DO("Skipping task: %p, dev: %p for" 5738 DEBUG_DO("Skipping task: %p, dev: %p for"
5861 " t_task_cdbs_ex_left: %d\n", task, dev, 5739 " t_task_cdbs_ex_left: %d\n", task, dev,
5862 atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); 5740 atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
5863 5741
5864 spin_lock_irqsave(&dev->execute_task_lock, flags); 5742 spin_lock_irqsave(&dev->execute_task_lock, flags);
5865 continue; 5743 continue;
5866 } 5744 }
5867 5745
5868 if (atomic_read(&cmd->t_task->t_transport_active)) { 5746 if (atomic_read(&cmd->t_task.t_transport_active)) {
5869 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" 5747 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5870 " %p\n", task, dev); 5748 " %p\n", task, dev);
5871 5749
5872 if (atomic_read(&cmd->t_task->t_fe_count)) { 5750 if (atomic_read(&cmd->t_task.t_fe_count)) {
5873 spin_unlock_irqrestore( 5751 spin_unlock_irqrestore(
5874 &cmd->t_task->t_state_lock, flags); 5752 &cmd->t_task.t_state_lock, flags);
5875 transport_send_check_condition_and_sense( 5753 transport_send_check_condition_and_sense(
5876 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 5754 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5877 0); 5755 0);
5878 transport_remove_cmd_from_queue(cmd, 5756 transport_remove_cmd_from_queue(cmd,
5879 &cmd->se_lun->lun_se_dev->dev_queue_obj); 5757 &cmd->se_dev->dev_queue_obj);
5880 5758
5881 transport_lun_remove_cmd(cmd); 5759 transport_lun_remove_cmd(cmd);
5882 transport_cmd_check_stop(cmd, 1, 0); 5760 transport_cmd_check_stop(cmd, 1, 0);
5883 } else { 5761 } else {
5884 spin_unlock_irqrestore( 5762 spin_unlock_irqrestore(
5885 &cmd->t_task->t_state_lock, flags); 5763 &cmd->t_task.t_state_lock, flags);
5886 5764
5887 transport_remove_cmd_from_queue(cmd, 5765 transport_remove_cmd_from_queue(cmd,
5888 &cmd->se_lun->lun_se_dev->dev_queue_obj); 5766 &cmd->se_dev->dev_queue_obj);
5889 5767
5890 transport_lun_remove_cmd(cmd); 5768 transport_lun_remove_cmd(cmd);
5891 5769
@@ -5899,22 +5777,22 @@ static void transport_processing_shutdown(struct se_device *dev)
5899 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", 5777 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
5900 task, dev); 5778 task, dev);
5901 5779
5902 if (atomic_read(&cmd->t_task->t_fe_count)) { 5780 if (atomic_read(&cmd->t_task.t_fe_count)) {
5903 spin_unlock_irqrestore( 5781 spin_unlock_irqrestore(
5904 &cmd->t_task->t_state_lock, flags); 5782 &cmd->t_task.t_state_lock, flags);
5905 transport_send_check_condition_and_sense(cmd, 5783 transport_send_check_condition_and_sense(cmd,
5906 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 5784 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5907 transport_remove_cmd_from_queue(cmd, 5785 transport_remove_cmd_from_queue(cmd,
5908 &cmd->se_lun->lun_se_dev->dev_queue_obj); 5786 &cmd->se_dev->dev_queue_obj);
5909 5787
5910 transport_lun_remove_cmd(cmd); 5788 transport_lun_remove_cmd(cmd);
5911 transport_cmd_check_stop(cmd, 1, 0); 5789 transport_cmd_check_stop(cmd, 1, 0);
5912 } else { 5790 } else {
5913 spin_unlock_irqrestore( 5791 spin_unlock_irqrestore(
5914 &cmd->t_task->t_state_lock, flags); 5792 &cmd->t_task.t_state_lock, flags);
5915 5793
5916 transport_remove_cmd_from_queue(cmd, 5794 transport_remove_cmd_from_queue(cmd,
5917 &cmd->se_lun->lun_se_dev->dev_queue_obj); 5795 &cmd->se_dev->dev_queue_obj);
5918 transport_lun_remove_cmd(cmd); 5796 transport_lun_remove_cmd(cmd);
5919 5797
5920 if (transport_cmd_check_stop(cmd, 1, 0)) 5798 if (transport_cmd_check_stop(cmd, 1, 0))
@@ -5927,15 +5805,12 @@ static void transport_processing_shutdown(struct se_device *dev)
5927 /* 5805 /*
5928 * Empty the struct se_device's struct se_cmd list. 5806 * Empty the struct se_device's struct se_cmd list.
5929 */ 5807 */
5930 while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) { 5808 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5931 cmd = qr->cmd;
5932 state = qr->state;
5933 kfree(qr);
5934 5809
5935 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", 5810 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
5936 cmd, state); 5811 cmd, cmd->t_state);
5937 5812
5938 if (atomic_read(&cmd->t_task->t_fe_count)) { 5813 if (atomic_read(&cmd->t_task.t_fe_count)) {
5939 transport_send_check_condition_and_sense(cmd, 5814 transport_send_check_condition_and_sense(cmd,
5940 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 5815 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5941 5816
@@ -5955,10 +5830,9 @@ static void transport_processing_shutdown(struct se_device *dev)
5955 */ 5830 */
5956static int transport_processing_thread(void *param) 5831static int transport_processing_thread(void *param)
5957{ 5832{
5958 int ret, t_state; 5833 int ret;
5959 struct se_cmd *cmd; 5834 struct se_cmd *cmd;
5960 struct se_device *dev = (struct se_device *) param; 5835 struct se_device *dev = (struct se_device *) param;
5961 struct se_queue_req *qr;
5962 5836
5963 set_user_nice(current, -20); 5837 set_user_nice(current, -20);
5964 5838
@@ -5980,15 +5854,11 @@ static int transport_processing_thread(void *param)
5980get_cmd: 5854get_cmd:
5981 __transport_execute_tasks(dev); 5855 __transport_execute_tasks(dev);
5982 5856
5983 qr = transport_get_qr_from_queue(&dev->dev_queue_obj); 5857 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
5984 if (!(qr)) 5858 if (!cmd)
5985 continue; 5859 continue;
5986 5860
5987 cmd = qr->cmd; 5861 switch (cmd->t_state) {
5988 t_state = qr->state;
5989 kfree(qr);
5990
5991 switch (t_state) {
5992 case TRANSPORT_NEW_CMD_MAP: 5862 case TRANSPORT_NEW_CMD_MAP:
5993 if (!(cmd->se_tfo->new_cmd_map)) { 5863 if (!(cmd->se_tfo->new_cmd_map)) {
5994 printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" 5864 printk(KERN_ERR "cmd->se_tfo->new_cmd_map is"
@@ -6039,7 +5909,7 @@ get_cmd:
6039 default: 5909 default:
6040 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" 5910 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
6041 " %d for ITT: 0x%08x i_state: %d on SE LUN:" 5911 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
6042 " %u\n", t_state, cmd->deferred_t_state, 5912 " %u\n", cmd->t_state, cmd->deferred_t_state,
6043 cmd->se_tfo->get_task_tag(cmd), 5913 cmd->se_tfo->get_task_tag(cmd),
6044 cmd->se_tfo->get_cmd_state(cmd), 5914 cmd->se_tfo->get_cmd_state(cmd),
6045 cmd->se_lun->unpacked_lun); 5915 cmd->se_lun->unpacked_lun);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 16f41d188e26..3b8b02cf4b41 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
208 u8 *asc, 208 u8 *asc,
209 u8 *ascq) 209 u8 *ascq)
210{ 210{
211 struct se_device *dev = cmd->se_lun->lun_se_dev; 211 struct se_device *dev = cmd->se_dev;
212 struct se_dev_entry *deve; 212 struct se_dev_entry *deve;
213 struct se_session *sess = cmd->se_sess; 213 struct se_session *sess = cmd->se_sess;
214 struct se_node_acl *nacl; 214 struct se_node_acl *nacl;
@@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition(
270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 270 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
271 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 271 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
272 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, 272 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
273 cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq); 273 cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq);
274} 274}
275 275
276int core_scsi3_ua_clear_for_request_sense( 276int core_scsi3_ua_clear_for_request_sense(
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 19b2b9948314..6d9553bbba30 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -72,16 +72,16 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
72 caller, cmd, cmd->cdb); 72 caller, cmd, cmd->cdb);
73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74 74
75 task = se_cmd->t_task; 75 task = &se_cmd->t_task;
76 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", 76 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
77 caller, cmd, task, task->t_tasks_se_num, 77 caller, cmd, task, task->t_tasks_se_num,
78 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); 78 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
79 if (task->t_mem_list) 79
80 list_for_each_entry(mem, task->t_mem_list, se_list) 80 list_for_each_entry(mem, &task->t_mem_list, se_list)
81 printk(KERN_INFO "%s: cmd %p mem %p page %p " 81 printk(KERN_INFO "%s: cmd %p mem %p page %p "
82 "len 0x%x off 0x%x\n", 82 "len 0x%x off 0x%x\n",
83 caller, cmd, mem, 83 caller, cmd, mem,
84 mem->se_page, mem->se_len, mem->se_off); 84 mem->se_page, mem->se_len, mem->se_off);
85 sp = cmd->seq; 85 sp = cmd->seq;
86 if (sp) { 86 if (sp) {
87 ep = fc_seq_exch(sp); 87 ep = fc_seq_exch(sp);
@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
262 * TCM/LIO target 262 * TCM/LIO target
263 */ 263 */
264 transport_do_task_sg_chain(se_cmd); 264 transport_do_task_sg_chain(se_cmd);
265 cmd->sg = se_cmd->t_task->t_tasks_sg_chained; 265 cmd->sg = se_cmd->t_task.t_tasks_sg_chained;
266 cmd->sg_cnt = 266 cmd->sg_cnt =
267 se_cmd->t_task->t_tasks_sg_chained_no; 267 se_cmd->t_task.t_tasks_sg_chained_no;
268 } 268 }
269 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, 269 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
270 cmd->sg, cmd->sg_cnt)) 270 cmd->sg, cmd->sg_cnt))
@@ -438,7 +438,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
438 switch (fcp->fc_tm_flags) { 438 switch (fcp->fc_tm_flags) {
439 case FCP_TMF_LUN_RESET: 439 case FCP_TMF_LUN_RESET:
440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { 441 if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
442 /* 442 /*
443 * Make sure to clean up newly allocated TMR request 443 * Make sure to clean up newly allocated TMR request
444 * since "unable to handle TMR request because failed 444 * since "unable to handle TMR request because failed
@@ -637,7 +637,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
638 638
639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640 ret = transport_get_lun_for_cmd(&cmd->se_cmd, cmd->lun); 640 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
641 if (ret < 0) { 641 if (ret < 0) {
642 ft_dump_cmd(cmd, __func__); 642 ft_dump_cmd(cmd, __func__);
643 transport_send_check_condition_and_sense(&cmd->se_cmd, 643 transport_send_check_condition_and_sense(&cmd->se_cmd,
@@ -650,13 +650,13 @@ static void ft_send_cmd(struct ft_cmd *cmd)
650 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); 650 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
651 ft_dump_cmd(cmd, __func__); 651 ft_dump_cmd(cmd, __func__);
652 652
653 if (ret == -1) { 653 if (ret == -ENOMEM) {
654 transport_send_check_condition_and_sense(se_cmd, 654 transport_send_check_condition_and_sense(se_cmd,
655 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 655 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
656 transport_generic_free_cmd(se_cmd, 0, 1, 0); 656 transport_generic_free_cmd(se_cmd, 0, 1, 0);
657 return; 657 return;
658 } 658 }
659 if (ret == -2) { 659 if (ret == -EINVAL) {
660 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) 660 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
661 ft_queue_status(se_cmd); 661 ft_queue_status(se_cmd);
662 else 662 else
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8c5067c65720..58e4745749db 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg(
331 transport_init_queue_obj(&tpg->qobj); 331 transport_init_queue_obj(&tpg->qobj);
332 332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); 334 tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) { 335 if (ret < 0) {
336 kfree(tpg); 336 kfree(tpg);
337 return NULL; 337 return NULL;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 47efcfb9f4b8..f18af6e99b83 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -90,15 +90,14 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
90 lport = ep->lp; 90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq); 91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92 92
93 task = se_cmd->t_task; 93 task = &se_cmd->t_task;
94 BUG_ON(!task);
95 remaining = se_cmd->data_length; 94 remaining = se_cmd->data_length;
96 95
97 /* 96 /*
98 * Setup to use first mem list entry if any. 97 * Setup to use first mem list entry if any.
99 */ 98 */
100 if (task->t_tasks_se_num) { 99 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list, 100 mem = list_first_entry(&task->t_mem_list,
102 struct se_mem, se_list); 101 struct se_mem, se_list);
103 mem_len = mem->se_len; 102 mem_len = mem->se_len;
104 mem_off = mem->se_off; 103 mem_off = mem->se_off;
@@ -236,8 +235,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
236 u32 f_ctl; 235 u32 f_ctl;
237 void *buf; 236 void *buf;
238 237
239 task = se_cmd->t_task; 238 task = &se_cmd->t_task;
240 BUG_ON(!task);
241 239
242 fh = fc_frame_header_get(fp); 240 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) 241 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
@@ -315,7 +313,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
315 * Setup to use first mem list entry if any. 313 * Setup to use first mem list entry if any.
316 */ 314 */
317 if (task->t_tasks_se_num) { 315 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list, 316 mem = list_first_entry(&task->t_mem_list,
319 struct se_mem, se_list); 317 struct se_mem, se_list);
320 mem_len = mem->se_len; 318 mem_len = mem->se_len;
321 mem_off = mem->se_off; 319 mem_off = mem->se_off;