aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/loopback/tcm_loop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/loopback/tcm_loop.c')
-rw-r--r--drivers/target/loopback/tcm_loop.c56
1 files changed, 22 insertions, 34 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 2f19e1926493..eeb7ee7ab9f7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -118,17 +118,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi 118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
119 */ 119 */
120 if (scsi_bidi_cmnd(sc)) 120 if (scsi_bidi_cmnd(sc))
121 se_cmd->t_task->t_tasks_bidi = 1; 121 se_cmd->t_task.t_tasks_bidi = 1;
122 /* 122 /*
123 * Locate the struct se_lun pointer and attach it to struct se_cmd 123 * Locate the struct se_lun pointer and attach it to struct se_cmd
124 */ 124 */
125 if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) { 125 if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
126 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 126 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
127 set_host_byte(sc, DID_NO_CONNECT); 127 set_host_byte(sc, DID_NO_CONNECT);
128 return NULL; 128 return NULL;
129 } 129 }
130 130
131 transport_device_setup_cmd(se_cmd);
132 return se_cmd; 131 return se_cmd;
133} 132}
134 133
@@ -143,17 +142,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
143 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 142 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
144 struct tcm_loop_cmd, tl_se_cmd); 143 struct tcm_loop_cmd, tl_se_cmd);
145 struct scsi_cmnd *sc = tl_cmd->sc; 144 struct scsi_cmnd *sc = tl_cmd->sc;
146 void *mem_ptr, *mem_bidi_ptr = NULL; 145 struct scatterlist *sgl_bidi = NULL;
147 u32 sg_no_bidi = 0; 146 u32 sgl_bidi_count = 0;
148 int ret; 147 int ret;
149 /* 148 /*
150 * Allocate the necessary tasks to complete the received CDB+data 149 * Allocate the necessary tasks to complete the received CDB+data
151 */ 150 */
152 ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); 151 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
153 if (ret == -1) { 152 if (ret == -ENOMEM) {
154 /* Out of Resources */ 153 /* Out of Resources */
155 return PYX_TRANSPORT_LU_COMM_FAILURE; 154 return PYX_TRANSPORT_LU_COMM_FAILURE;
156 } else if (ret == -2) { 155 } else if (ret == -EINVAL) {
157 /* 156 /*
158 * Handle case for SAM_STAT_RESERVATION_CONFLICT 157 * Handle case for SAM_STAT_RESERVATION_CONFLICT
159 */ 158 */
@@ -165,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
165 */ 164 */
166 return PYX_TRANSPORT_USE_SENSE_REASON; 165 return PYX_TRANSPORT_USE_SENSE_REASON;
167 } 166 }
167
168 /* 168 /*
169 * Setup the struct scatterlist memory from the received 169 * For BIDI commands, pass in the extra READ buffer
170 * struct scsi_cmnd. 170 * to transport_generic_map_mem_to_cmd() below..
171 */ 171 */
172 if (scsi_sg_count(sc)) { 172 if (se_cmd->t_task.t_tasks_bidi) {
173 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; 173 struct scsi_data_buffer *sdb = scsi_in(sc);
174 mem_ptr = (void *)scsi_sglist(sc);
175 /*
176 * For BIDI commands, pass in the extra READ buffer
177 * to transport_generic_map_mem_to_cmd() below..
178 */
179 if (se_cmd->t_task->t_tasks_bidi) {
180 struct scsi_data_buffer *sdb = scsi_in(sc);
181 174
182 mem_bidi_ptr = (void *)sdb->table.sgl; 175 sgl_bidi = sdb->table.sgl;
183 sg_no_bidi = sdb->table.nents; 176 sgl_bidi_count = sdb->table.nents;
184 }
185 } else {
186 /*
187 * Used for DMA_NONE
188 */
189 mem_ptr = NULL;
190 } 177 }
178
191 /* 179 /*
192 * Map the SG memory into struct se_mem->page linked list using the same 180 * Map the SG memory into struct se_mem->page linked list using the same
193 * physical memory at sg->page_link. 181 * physical memory at sg->page_link.
194 */ 182 */
195 ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, 183 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
196 scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); 184 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
197 if (ret < 0) 185 if (ret < 0)
198 return PYX_TRANSPORT_LU_COMM_FAILURE; 186 return PYX_TRANSPORT_LU_COMM_FAILURE;
199 187
@@ -384,14 +372,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
384 /* 372 /*
385 * Allocate the LUN_RESET TMR 373 * Allocate the LUN_RESET TMR
386 */ 374 */
387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, 375 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
388 TMR_LUN_RESET); 376 TMR_LUN_RESET);
389 if (IS_ERR(se_cmd->se_tmr_req)) 377 if (IS_ERR(se_cmd->se_tmr_req))
390 goto release; 378 goto release;
391 /* 379 /*
392 * Locate the underlying TCM struct se_lun from sc->device->lun 380 * Locate the underlying TCM struct se_lun from sc->device->lun
393 */ 381 */
394 if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) 382 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
395 goto release; 383 goto release;
396 /* 384 /*
397 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 385 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -904,7 +892,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
904 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 892 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
905 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 893 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
906 894
907 memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, 895 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
908 SCSI_SENSE_BUFFERSIZE); 896 SCSI_SENSE_BUFFERSIZE);
909 sc->result = SAM_STAT_CHECK_CONDITION; 897 sc->result = SAM_STAT_CHECK_CONDITION;
910 set_driver_byte(sc, DRIVER_SENSE); 898 set_driver_byte(sc, DRIVER_SENSE);
@@ -1054,7 +1042,7 @@ static int tcm_loop_make_nexus(
1054 * transport_register_session() 1042 * transport_register_session()
1055 */ 1043 */
1056 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1044 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1057 tl_nexus->se_sess, (void *)tl_nexus); 1045 tl_nexus->se_sess, tl_nexus);
1058 tl_tpg->tl_hba->tl_nexus = tl_nexus; 1046 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1059 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1047 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1060 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1048 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
@@ -1242,7 +1230,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1242 * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1230 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1243 */ 1231 */
1244 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, 1232 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1245 wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, 1233 wwn, &tl_tpg->tl_se_tpg, tl_tpg,
1246 TRANSPORT_TPG_TYPE_NORMAL); 1234 TRANSPORT_TPG_TYPE_NORMAL);
1247 if (ret < 0) 1235 if (ret < 0)
1248 return ERR_PTR(-ENOMEM); 1236 return ERR_PTR(-ENOMEM);