aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-06 22:00:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-06 22:00:42 -0500
commit6aad3738f6a79fd0ca480eaceefe064cc471f6eb (patch)
tree08fb9ec4824bf3320af01f29fe84b75f814c0fa0 /drivers/target
parent02ebbbd481635fd3ce7018e5bb19c18c0f1e4561 (diff)
parent5bda90c8f20f0af93375721533f4081a40fa6f41 (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: target: use ->exectute_task for all CDB emulation target: remove SCF_EMULATE_CDB_ASYNC target: refactor transport_emulate_control_cdb target: pass the se_task to the CDB emulation callback target: split core_scsi3_emulate_pr target: split core_scsi2_emulate_crh target: Add generic active I/O shutdown logic target: add back error handling in transport_complete_task target/pscsi: blk_make_request() returns an ERR_PTR() target: Remove core TRANSPORT_FREE_CMD_INTR usage target: Make TFO->check_stop_free return free status iscsi-target: Fix non-immediate TMR handling iscsi-target: Add missing CMDSN_LOWER_THAN_EXP check in iscsit_handle_scsi_cmd target: Avoid double list_del for aborted se_tmr_req target: Minor cleanups to core_tmr_drain_tmr_list target: Fix wrong se_tmr being added to drain_tmr_list target: Fix incorrect se_cmd assignment in core_tmr_drain_tmr_list target: Check -ENOMEM to signal QUEUE_FULL from fabric callbacks tcm_loop: Add explict read buffer memset for SCF_SCSI_CONTROL_SG_IO_CDB target: Fix compile warning w/ missing module.h include
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/loopback/tcm_loop.c23
-rw-r--r--drivers/target/target_core_alua.c11
-rw-r--r--drivers/target/target_core_alua.h4
-rw-r--r--drivers/target/target_core_cdb.c216
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_device.c14
-rw-r--r--drivers/target/target_core_pr.c349
-rw-r--r--drivers/target/target_core_pr.h7
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_tmr.c23
-rw-r--r--drivers/target/target_core_transport.c392
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c3
14 files changed, 604 insertions, 467 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4d01768fcd90..1bf057ed9931 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1079,7 +1079,9 @@ attach_cmd:
1079 */ 1079 */
1080 if (!cmd->immediate_data) { 1080 if (!cmd->immediate_data) {
1081 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1081 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1082 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1082 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1083 return 0;
1084 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1083 return iscsit_add_reject_from_cmd( 1085 return iscsit_add_reject_from_cmd(
1084 ISCSI_REASON_PROTOCOL_ERROR, 1086 ISCSI_REASON_PROTOCOL_ERROR,
1085 1, 0, buf, cmd); 1087 1, 0, buf, cmd);
@@ -1819,17 +1821,16 @@ attach:
1819 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1821 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1820 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1822 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1821 out_of_order_cmdsn = 1; 1823 out_of_order_cmdsn = 1;
1822 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1824 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1823 return 0; 1825 return 0;
1824 } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */ 1826 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1825 return iscsit_add_reject_from_cmd( 1827 return iscsit_add_reject_from_cmd(
1826 ISCSI_REASON_PROTOCOL_ERROR, 1828 ISCSI_REASON_PROTOCOL_ERROR,
1827 1, 0, buf, cmd); 1829 1, 0, buf, cmd);
1828 }
1829 } 1830 }
1830 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1831 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1831 1832
1832 if (out_of_order_cmdsn) 1833 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
1833 return 0; 1834 return 0;
1834 /* 1835 /*
1835 * Found the referenced task, send to transport for processing. 1836 * Found the referenced task, send to transport for processing.
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index b15d8cbf630b..3df1c9b8ae6b 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
174 sgl_bidi = sdb->table.sgl; 174 sgl_bidi = sdb->table.sgl;
175 sgl_bidi_count = sdb->table.nents; 175 sgl_bidi_count = sdb->table.nents;
176 } 176 }
177 /*
178 * Because some userspace code via scsi-generic do not memset their
179 * associated read buffers, go ahead and do that here for type
180 * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
181 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
182 * by target core in transport_generic_allocate_tasks() ->
183 * transport_generic_cmd_sequencer().
184 */
185 if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
186 se_cmd->data_direction == DMA_FROM_DEVICE) {
187 struct scatterlist *sg = scsi_sglist(sc);
188 unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
189
190 if (buf != NULL) {
191 memset(buf, 0, sg->length);
192 kunmap(sg_page(sg));
193 }
194 }
177 195
178 /* Tell the core about our preallocated memory */ 196 /* Tell the core about our preallocated memory */
179 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 197 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
@@ -187,7 +205,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
187/* 205/*
188 * Called from struct target_core_fabric_ops->check_stop_free() 206 * Called from struct target_core_fabric_ops->check_stop_free()
189 */ 207 */
190static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) 208static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
191{ 209{
192 /* 210 /*
193 * Do not release struct se_cmd's containing a valid TMR 211 * Do not release struct se_cmd's containing a valid TMR
@@ -195,12 +213,13 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
195 * with transport_generic_free_cmd(). 213 * with transport_generic_free_cmd().
196 */ 214 */
197 if (se_cmd->se_tmr_req) 215 if (se_cmd->se_tmr_req)
198 return; 216 return 0;
199 /* 217 /*
200 * Release the struct se_cmd, which will make a callback to release 218 * Release the struct se_cmd, which will make a callback to release
201 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 219 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
202 */ 220 */
203 transport_generic_free_cmd(se_cmd, 0); 221 transport_generic_free_cmd(se_cmd, 0);
222 return 1;
204} 223}
205 224
206static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 225static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 8f4447749c71..2739b93983a2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -58,8 +58,9 @@ struct t10_alua_lu_gp *default_lu_gp;
58 * 58 *
59 * See spc4r17 section 6.27 59 * See spc4r17 section 6.27
60 */ 60 */
61int core_emulate_report_target_port_groups(struct se_cmd *cmd) 61int target_emulate_report_target_port_groups(struct se_task *task)
62{ 62{
63 struct se_cmd *cmd = task->task_se_cmd;
63 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 64 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
64 struct se_port *port; 65 struct se_port *port;
65 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -164,6 +165,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
164 165
165 transport_kunmap_first_data_page(cmd); 166 transport_kunmap_first_data_page(cmd);
166 167
168 task->task_scsi_status = GOOD;
169 transport_complete_task(task, 1);
167 return 0; 170 return 0;
168} 171}
169 172
@@ -172,8 +175,9 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
172 * 175 *
173 * See spc4r17 section 6.35 176 * See spc4r17 section 6.35
174 */ 177 */
175int core_emulate_set_target_port_groups(struct se_cmd *cmd) 178int target_emulate_set_target_port_groups(struct se_task *task)
176{ 179{
180 struct se_cmd *cmd = task->task_se_cmd;
177 struct se_device *dev = cmd->se_dev; 181 struct se_device *dev = cmd->se_dev;
178 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 182 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
179 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 183 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@@ -341,7 +345,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
341 345
342out: 346out:
343 transport_kunmap_first_data_page(cmd); 347 transport_kunmap_first_data_page(cmd);
344 348 task->task_scsi_status = GOOD;
349 transport_complete_task(task, 1);
345 return 0; 350 return 0;
346} 351}
347 352
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c86f97a081ed..c5b4ecd3e745 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -66,8 +66,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
66extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 66extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
67extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 67extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 68
69extern int core_emulate_report_target_port_groups(struct se_cmd *); 69extern int target_emulate_report_target_port_groups(struct se_task *);
70extern int core_emulate_set_target_port_groups(struct se_cmd *); 70extern int target_emulate_set_target_port_groups(struct se_task *);
71extern int core_alua_check_nonop_delay(struct se_cmd *); 71extern int core_alua_check_nonop_delay(struct se_cmd *);
72extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 72extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
73 struct se_device *, struct se_port *, 73 struct se_device *, struct se_port *,
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 38535eb13929..683ba02b8247 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -32,6 +32,7 @@
32#include <target/target_core_transport.h> 32#include <target/target_core_transport.h>
33#include <target/target_core_fabric_ops.h> 33#include <target/target_core_fabric_ops.h>
34#include "target_core_ua.h" 34#include "target_core_ua.h"
35#include "target_core_cdb.h"
35 36
36static void 37static void
37target_fill_alua_data(struct se_port *port, unsigned char *buf) 38target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -679,16 +680,18 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
679 return 0; 680 return 0;
680} 681}
681 682
682static int 683int target_emulate_inquiry(struct se_task *task)
683target_emulate_inquiry(struct se_cmd *cmd)
684{ 684{
685 struct se_cmd *cmd = task->task_se_cmd;
685 struct se_device *dev = cmd->se_dev; 686 struct se_device *dev = cmd->se_dev;
686 unsigned char *buf; 687 unsigned char *buf;
687 unsigned char *cdb = cmd->t_task_cdb; 688 unsigned char *cdb = cmd->t_task_cdb;
688 int p, ret; 689 int p, ret;
689 690
690 if (!(cdb[1] & 0x1)) 691 if (!(cdb[1] & 0x1)) {
691 return target_emulate_inquiry_std(cmd); 692 ret = target_emulate_inquiry_std(cmd);
693 goto out;
694 }
692 695
693 /* 696 /*
694 * Make sure we at least have 4 bytes of INQUIRY response 697 * Make sure we at least have 4 bytes of INQUIRY response
@@ -707,22 +710,30 @@ target_emulate_inquiry(struct se_cmd *cmd)
707 710
708 buf[0] = dev->transport->get_device_type(dev); 711 buf[0] = dev->transport->get_device_type(dev);
709 712
710 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 713 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
711 if (cdb[2] == evpd_handlers[p].page) { 714 if (cdb[2] == evpd_handlers[p].page) {
712 buf[1] = cdb[2]; 715 buf[1] = cdb[2];
713 ret = evpd_handlers[p].emulate(cmd, buf); 716 ret = evpd_handlers[p].emulate(cmd, buf);
714 transport_kunmap_first_data_page(cmd); 717 goto out_unmap;
715 return ret;
716 } 718 }
719 }
717 720
718 transport_kunmap_first_data_page(cmd);
719 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 721 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
720 return -EINVAL; 722 ret = -EINVAL;
723
724out_unmap:
725 transport_kunmap_first_data_page(cmd);
726out:
727 if (!ret) {
728 task->task_scsi_status = GOOD;
729 transport_complete_task(task, 1);
730 }
731 return ret;
721} 732}
722 733
723static int 734int target_emulate_readcapacity(struct se_task *task)
724target_emulate_readcapacity(struct se_cmd *cmd)
725{ 735{
736 struct se_cmd *cmd = task->task_se_cmd;
726 struct se_device *dev = cmd->se_dev; 737 struct se_device *dev = cmd->se_dev;
727 unsigned char *buf; 738 unsigned char *buf;
728 unsigned long long blocks_long = dev->transport->get_blocks(dev); 739 unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -751,12 +762,14 @@ target_emulate_readcapacity(struct se_cmd *cmd)
751 762
752 transport_kunmap_first_data_page(cmd); 763 transport_kunmap_first_data_page(cmd);
753 764
765 task->task_scsi_status = GOOD;
766 transport_complete_task(task, 1);
754 return 0; 767 return 0;
755} 768}
756 769
757static int 770int target_emulate_readcapacity_16(struct se_task *task)
758target_emulate_readcapacity_16(struct se_cmd *cmd)
759{ 771{
772 struct se_cmd *cmd = task->task_se_cmd;
760 struct se_device *dev = cmd->se_dev; 773 struct se_device *dev = cmd->se_dev;
761 unsigned char *buf; 774 unsigned char *buf;
762 unsigned long long blocks = dev->transport->get_blocks(dev); 775 unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -784,6 +797,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
784 797
785 transport_kunmap_first_data_page(cmd); 798 transport_kunmap_first_data_page(cmd);
786 799
800 task->task_scsi_status = GOOD;
801 transport_complete_task(task, 1);
787 return 0; 802 return 0;
788} 803}
789 804
@@ -922,14 +937,15 @@ target_modesense_dpofua(unsigned char *buf, int type)
922 } 937 }
923} 938}
924 939
925static int 940int target_emulate_modesense(struct se_task *task)
926target_emulate_modesense(struct se_cmd *cmd, int ten)
927{ 941{
942 struct se_cmd *cmd = task->task_se_cmd;
928 struct se_device *dev = cmd->se_dev; 943 struct se_device *dev = cmd->se_dev;
929 char *cdb = cmd->t_task_cdb; 944 char *cdb = cmd->t_task_cdb;
930 unsigned char *rbuf; 945 unsigned char *rbuf;
931 int type = dev->transport->get_device_type(dev); 946 int type = dev->transport->get_device_type(dev);
932 int offset = (ten) ? 8 : 4; 947 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
948 int offset = ten ? 8 : 4;
933 int length = 0; 949 int length = 0;
934 unsigned char buf[SE_MODE_PAGE_BUF]; 950 unsigned char buf[SE_MODE_PAGE_BUF];
935 951
@@ -995,12 +1011,14 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
995 memcpy(rbuf, buf, offset); 1011 memcpy(rbuf, buf, offset);
996 transport_kunmap_first_data_page(cmd); 1012 transport_kunmap_first_data_page(cmd);
997 1013
1014 task->task_scsi_status = GOOD;
1015 transport_complete_task(task, 1);
998 return 0; 1016 return 0;
999} 1017}
1000 1018
1001static int 1019int target_emulate_request_sense(struct se_task *task)
1002target_emulate_request_sense(struct se_cmd *cmd)
1003{ 1020{
1021 struct se_cmd *cmd = task->task_se_cmd;
1004 unsigned char *cdb = cmd->t_task_cdb; 1022 unsigned char *cdb = cmd->t_task_cdb;
1005 unsigned char *buf; 1023 unsigned char *buf;
1006 u8 ua_asc = 0, ua_ascq = 0; 1024 u8 ua_asc = 0, ua_ascq = 0;
@@ -1059,7 +1077,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
1059 1077
1060end: 1078end:
1061 transport_kunmap_first_data_page(cmd); 1079 transport_kunmap_first_data_page(cmd);
1062 1080 task->task_scsi_status = GOOD;
1081 transport_complete_task(task, 1);
1063 return 0; 1082 return 0;
1064} 1083}
1065 1084
@@ -1067,8 +1086,7 @@ end:
1067 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1086 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1068 * Note this is not used for TCM/pSCSI passthrough 1087 * Note this is not used for TCM/pSCSI passthrough
1069 */ 1088 */
1070static int 1089int target_emulate_unmap(struct se_task *task)
1071target_emulate_unmap(struct se_task *task)
1072{ 1090{
1073 struct se_cmd *cmd = task->task_se_cmd; 1091 struct se_cmd *cmd = task->task_se_cmd;
1074 struct se_device *dev = cmd->se_dev; 1092 struct se_device *dev = cmd->se_dev;
@@ -1079,6 +1097,12 @@ target_emulate_unmap(struct se_task *task)
1079 int ret = 0, offset; 1097 int ret = 0, offset;
1080 unsigned short dl, bd_dl; 1098 unsigned short dl, bd_dl;
1081 1099
1100 if (!dev->transport->do_discard) {
1101 pr_err("UNMAP emulation not supported for: %s\n",
1102 dev->transport->name);
1103 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1104 }
1105
1082 /* First UNMAP block descriptor starts at 8 byte offset */ 1106 /* First UNMAP block descriptor starts at 8 byte offset */
1083 offset = 8; 1107 offset = 8;
1084 size -= 8; 1108 size -= 8;
@@ -1110,7 +1134,10 @@ target_emulate_unmap(struct se_task *task)
1110 1134
1111err: 1135err:
1112 transport_kunmap_first_data_page(cmd); 1136 transport_kunmap_first_data_page(cmd);
1113 1137 if (!ret) {
1138 task->task_scsi_status = GOOD;
1139 transport_complete_task(task, 1);
1140 }
1114 return ret; 1141 return ret;
1115} 1142}
1116 1143
@@ -1118,14 +1145,28 @@ err:
1118 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1145 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1119 * Note this is not used for TCM/pSCSI passthrough 1146 * Note this is not used for TCM/pSCSI passthrough
1120 */ 1147 */
1121static int 1148int target_emulate_write_same(struct se_task *task)
1122target_emulate_write_same(struct se_task *task, u32 num_blocks)
1123{ 1149{
1124 struct se_cmd *cmd = task->task_se_cmd; 1150 struct se_cmd *cmd = task->task_se_cmd;
1125 struct se_device *dev = cmd->se_dev; 1151 struct se_device *dev = cmd->se_dev;
1126 sector_t range; 1152 sector_t range;
1127 sector_t lba = cmd->t_task_lba; 1153 sector_t lba = cmd->t_task_lba;
1154 u32 num_blocks;
1128 int ret; 1155 int ret;
1156
1157 if (!dev->transport->do_discard) {
1158 pr_err("WRITE_SAME emulation not supported"
1159 " for: %s\n", dev->transport->name);
1160 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1161 }
1162
1163 if (cmd->t_task_cdb[0] == WRITE_SAME)
1164 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
1165 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
1166 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1167 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
1168 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1169
1129 /* 1170 /*
1130 * Use the explicit range when non zero is supplied, otherwise calculate 1171 * Use the explicit range when non zero is supplied, otherwise calculate
1131 * the remaining range based on ->get_blocks() - starting LBA. 1172 * the remaining range based on ->get_blocks() - starting LBA.
@@ -1144,127 +1185,30 @@ target_emulate_write_same(struct se_task *task, u32 num_blocks)
1144 return ret; 1185 return ret;
1145 } 1186 }
1146 1187
1188 task->task_scsi_status = GOOD;
1189 transport_complete_task(task, 1);
1147 return 0; 1190 return 0;
1148} 1191}
1149 1192
1150int 1193int target_emulate_synchronize_cache(struct se_task *task)
1151transport_emulate_control_cdb(struct se_task *task)
1152{ 1194{
1153 struct se_cmd *cmd = task->task_se_cmd; 1195 struct se_device *dev = task->task_se_cmd->se_dev;
1154 struct se_device *dev = cmd->se_dev;
1155 unsigned short service_action;
1156 int ret = 0;
1157 1196
1158 switch (cmd->t_task_cdb[0]) { 1197 if (!dev->transport->do_sync_cache) {
1159 case INQUIRY: 1198 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1160 ret = target_emulate_inquiry(cmd); 1199 " for: %s\n", dev->transport->name);
1161 break;
1162 case READ_CAPACITY:
1163 ret = target_emulate_readcapacity(cmd);
1164 break;
1165 case MODE_SENSE:
1166 ret = target_emulate_modesense(cmd, 0);
1167 break;
1168 case MODE_SENSE_10:
1169 ret = target_emulate_modesense(cmd, 1);
1170 break;
1171 case SERVICE_ACTION_IN:
1172 switch (cmd->t_task_cdb[1] & 0x1f) {
1173 case SAI_READ_CAPACITY_16:
1174 ret = target_emulate_readcapacity_16(cmd);
1175 break;
1176 default:
1177 pr_err("Unsupported SA: 0x%02x\n",
1178 cmd->t_task_cdb[1] & 0x1f);
1179 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1180 }
1181 break;
1182 case REQUEST_SENSE:
1183 ret = target_emulate_request_sense(cmd);
1184 break;
1185 case UNMAP:
1186 if (!dev->transport->do_discard) {
1187 pr_err("UNMAP emulation not supported for: %s\n",
1188 dev->transport->name);
1189 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1190 }
1191 ret = target_emulate_unmap(task);
1192 break;
1193 case WRITE_SAME:
1194 if (!dev->transport->do_discard) {
1195 pr_err("WRITE_SAME emulation not supported"
1196 " for: %s\n", dev->transport->name);
1197 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1198 }
1199 ret = target_emulate_write_same(task,
1200 get_unaligned_be16(&cmd->t_task_cdb[7]));
1201 break;
1202 case WRITE_SAME_16:
1203 if (!dev->transport->do_discard) {
1204 pr_err("WRITE_SAME_16 emulation not supported"
1205 " for: %s\n", dev->transport->name);
1206 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1207 }
1208 ret = target_emulate_write_same(task,
1209 get_unaligned_be32(&cmd->t_task_cdb[10]));
1210 break;
1211 case VARIABLE_LENGTH_CMD:
1212 service_action =
1213 get_unaligned_be16(&cmd->t_task_cdb[8]);
1214 switch (service_action) {
1215 case WRITE_SAME_32:
1216 if (!dev->transport->do_discard) {
1217 pr_err("WRITE_SAME_32 SA emulation not"
1218 " supported for: %s\n",
1219 dev->transport->name);
1220 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1221 }
1222 ret = target_emulate_write_same(task,
1223 get_unaligned_be32(&cmd->t_task_cdb[28]));
1224 break;
1225 default:
1226 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
1227 " 0x%02x\n", service_action);
1228 break;
1229 }
1230 break;
1231 case SYNCHRONIZE_CACHE:
1232 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1233 if (!dev->transport->do_sync_cache) {
1234 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1235 " for: %s\n", dev->transport->name);
1236 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1237 }
1238 dev->transport->do_sync_cache(task);
1239 break;
1240 case ALLOW_MEDIUM_REMOVAL:
1241 case ERASE:
1242 case REZERO_UNIT:
1243 case SEEK_10:
1244 case SPACE:
1245 case START_STOP:
1246 case TEST_UNIT_READY:
1247 case VERIFY:
1248 case WRITE_FILEMARKS:
1249 break;
1250 default:
1251 pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
1252 cmd->t_task_cdb[0], dev->transport->name);
1253 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1200 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1254 } 1201 }
1255 1202
1256 if (ret < 0) 1203 dev->transport->do_sync_cache(task);
1257 return ret; 1204 return 0;
1258 /* 1205}
1259 * Handle the successful completion here unless a caller
1260 * has explictly requested an asychronous completion.
1261 */
1262 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1263 task->task_scsi_status = GOOD;
1264 transport_complete_task(task, 1);
1265 }
1266 1206
1267 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1207int target_emulate_noop(struct se_task *task)
1208{
1209 task->task_scsi_status = GOOD;
1210 transport_complete_task(task, 1);
1211 return 0;
1268} 1212}
1269 1213
1270/* 1214/*
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
new file mode 100644
index 000000000000..ad6b1e393001
--- /dev/null
+++ b/drivers/target/target_core_cdb.h
@@ -0,0 +1,14 @@
1#ifndef TARGET_CORE_CDB_H
2#define TARGET_CORE_CDB_H
3
4int target_emulate_inquiry(struct se_task *task);
5int target_emulate_readcapacity(struct se_task *task);
6int target_emulate_readcapacity_16(struct se_task *task);
7int target_emulate_modesense(struct se_task *task);
8int target_emulate_request_sense(struct se_task *task);
9int target_emulate_unmap(struct se_task *task);
10int target_emulate_write_same(struct se_task *task);
11int target_emulate_synchronize_cache(struct se_task *task);
12int target_emulate_noop(struct se_task *task);
13
14#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f870c3bcfd82..28d2c808c56b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -651,23 +651,15 @@ void core_dev_unexport(
651 lun->lun_se_dev = NULL; 651 lun->lun_se_dev = NULL;
652} 652}
653 653
654int transport_core_report_lun_response(struct se_cmd *se_cmd) 654int target_report_luns(struct se_task *se_task)
655{ 655{
656 struct se_cmd *se_cmd = se_task->task_se_cmd;
656 struct se_dev_entry *deve; 657 struct se_dev_entry *deve;
657 struct se_lun *se_lun; 658 struct se_lun *se_lun;
658 struct se_session *se_sess = se_cmd->se_sess; 659 struct se_session *se_sess = se_cmd->se_sess;
659 struct se_task *se_task;
660 unsigned char *buf; 660 unsigned char *buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 661 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662 662
663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
664 break;
665
666 if (!se_task) {
667 pr_err("Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE;
669 }
670
671 buf = transport_kmap_first_data_page(se_cmd); 663 buf = transport_kmap_first_data_page(se_cmd);
672 664
673 /* 665 /*
@@ -713,6 +705,8 @@ done:
713 buf[2] = ((lun_count >> 8) & 0xff); 705 buf[2] = ((lun_count >> 8) & 0xff);
714 buf[3] = (lun_count & 0xff); 706 buf[3] = (lun_count & 0xff);
715 707
708 se_task->task_scsi_status = GOOD;
709 transport_complete_task(se_task, 1);
716 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 710 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
717} 711}
718 712
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0c4f783f924c..5a4ebfc3a54f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -116,114 +116,21 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
116 return ret; 116 return ret;
117} 117}
118 118
119static int core_scsi2_reservation_release(struct se_cmd *cmd)
120{
121 struct se_device *dev = cmd->se_dev;
122 struct se_session *sess = cmd->se_sess;
123 struct se_portal_group *tpg = sess->se_tpg;
124
125 if (!sess || !tpg)
126 return 0;
127
128 spin_lock(&dev->dev_reservation_lock);
129 if (!dev->dev_reserved_node_acl || !sess) {
130 spin_unlock(&dev->dev_reservation_lock);
131 return 0;
132 }
133
134 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
135 spin_unlock(&dev->dev_reservation_lock);
136 return 0;
137 }
138 dev->dev_reserved_node_acl = NULL;
139 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
140 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
141 dev->dev_res_bin_isid = 0;
142 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
143 }
144 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
145 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
146 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
147 sess->se_node_acl->initiatorname);
148 spin_unlock(&dev->dev_reservation_lock);
149
150 return 0;
151}
152
153static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
154{
155 struct se_device *dev = cmd->se_dev;
156 struct se_session *sess = cmd->se_sess;
157 struct se_portal_group *tpg = sess->se_tpg;
158
159 if ((cmd->t_task_cdb[1] & 0x01) &&
160 (cmd->t_task_cdb[1] & 0x02)) {
161 pr_err("LongIO and Obselete Bits set, returning"
162 " ILLEGAL_REQUEST\n");
163 return PYX_TRANSPORT_ILLEGAL_REQUEST;
164 }
165 /*
166 * This is currently the case for target_core_mod passthrough struct se_cmd
167 * ops
168 */
169 if (!sess || !tpg)
170 return 0;
171
172 spin_lock(&dev->dev_reservation_lock);
173 if (dev->dev_reserved_node_acl &&
174 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
175 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
176 tpg->se_tpg_tfo->get_fabric_name());
177 pr_err("Original reserver LUN: %u %s\n",
178 cmd->se_lun->unpacked_lun,
179 dev->dev_reserved_node_acl->initiatorname);
180 pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
181 " from %s \n", cmd->se_lun->unpacked_lun,
182 cmd->se_deve->mapped_lun,
183 sess->se_node_acl->initiatorname);
184 spin_unlock(&dev->dev_reservation_lock);
185 return PYX_TRANSPORT_RESERVATION_CONFLICT;
186 }
187
188 dev->dev_reserved_node_acl = sess->se_node_acl;
189 dev->dev_flags |= DF_SPC2_RESERVATIONS;
190 if (sess->sess_bin_isid != 0) {
191 dev->dev_res_bin_isid = sess->sess_bin_isid;
192 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
193 }
194 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
195 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
196 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
197 sess->se_node_acl->initiatorname);
198 spin_unlock(&dev->dev_reservation_lock);
199
200 return 0;
201}
202
203static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, 119static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
204 struct se_node_acl *, struct se_session *); 120 struct se_node_acl *, struct se_session *);
205static void core_scsi3_put_pr_reg(struct t10_pr_registration *); 121static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
206 122
207/* 123static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
208 * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
209 * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
210 * thread context.
211 */
212int core_scsi2_emulate_crh(struct se_cmd *cmd)
213{ 124{
214 struct se_session *se_sess = cmd->se_sess; 125 struct se_session *se_sess = cmd->se_sess;
215 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 126 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
216 struct t10_pr_registration *pr_reg; 127 struct t10_pr_registration *pr_reg;
217 struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 128 struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
218 unsigned char *cdb = &cmd->t_task_cdb[0];
219 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 129 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
220 int conflict = 0; 130 int conflict = 0;
221 131
222 if (!se_sess)
223 return 0;
224
225 if (!crh) 132 if (!crh)
226 goto after_crh; 133 return false;
227 134
228 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 135 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
229 se_sess); 136 se_sess);
@@ -251,14 +158,16 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
251 */ 158 */
252 if (pr_reg->pr_res_holder) { 159 if (pr_reg->pr_res_holder) {
253 core_scsi3_put_pr_reg(pr_reg); 160 core_scsi3_put_pr_reg(pr_reg);
254 return 0; 161 *ret = 0;
162 return false;
255 } 163 }
256 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || 164 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
257 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || 165 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
258 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 166 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
259 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 167 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
260 core_scsi3_put_pr_reg(pr_reg); 168 core_scsi3_put_pr_reg(pr_reg);
261 return 0; 169 *ret = 0;
170 return true;
262 } 171 }
263 core_scsi3_put_pr_reg(pr_reg); 172 core_scsi3_put_pr_reg(pr_reg);
264 conflict = 1; 173 conflict = 1;
@@ -282,18 +191,118 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
282 pr_err("Received legacy SPC-2 RESERVE/RELEASE" 191 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
283 " while active SPC-3 registrations exist," 192 " while active SPC-3 registrations exist,"
284 " returning RESERVATION_CONFLICT\n"); 193 " returning RESERVATION_CONFLICT\n");
285 return PYX_TRANSPORT_RESERVATION_CONFLICT; 194 *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
195 return true;
286 } 196 }
287 197
288after_crh: 198 return false;
289 if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10)) 199}
290 return core_scsi2_reservation_reserve(cmd); 200
291 else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10)) 201int target_scsi2_reservation_release(struct se_task *task)
292 return core_scsi2_reservation_release(cmd); 202{
293 else 203 struct se_cmd *cmd = task->task_se_cmd;
294 return PYX_TRANSPORT_INVALID_CDB_FIELD; 204 struct se_device *dev = cmd->se_dev;
205 struct se_session *sess = cmd->se_sess;
206 struct se_portal_group *tpg = sess->se_tpg;
207 int ret = 0;
208
209 if (!sess || !tpg)
210 goto out;
211 if (target_check_scsi2_reservation_conflict(cmd, &ret))
212 goto out;
213
214 ret = 0;
215 spin_lock(&dev->dev_reservation_lock);
216 if (!dev->dev_reserved_node_acl || !sess)
217 goto out_unlock;
218
219 if (dev->dev_reserved_node_acl != sess->se_node_acl)
220 goto out_unlock;
221
222 dev->dev_reserved_node_acl = NULL;
223 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
224 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
225 dev->dev_res_bin_isid = 0;
226 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
227 }
228 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
229 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
230 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
231 sess->se_node_acl->initiatorname);
232
233out_unlock:
234 spin_unlock(&dev->dev_reservation_lock);
235out:
236 if (!ret) {
237 task->task_scsi_status = GOOD;
238 transport_complete_task(task, 1);
239 }
240 return ret;
241}
242
243int target_scsi2_reservation_reserve(struct se_task *task)
244{
245 struct se_cmd *cmd = task->task_se_cmd;
246 struct se_device *dev = cmd->se_dev;
247 struct se_session *sess = cmd->se_sess;
248 struct se_portal_group *tpg = sess->se_tpg;
249 int ret = 0;
250
251 if ((cmd->t_task_cdb[1] & 0x01) &&
252 (cmd->t_task_cdb[1] & 0x02)) {
253 pr_err("LongIO and Obselete Bits set, returning"
254 " ILLEGAL_REQUEST\n");
255 ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
256 goto out;
257 }
258 /*
259 * This is currently the case for target_core_mod passthrough struct se_cmd
260 * ops
261 */
262 if (!sess || !tpg)
263 goto out;
264 if (target_check_scsi2_reservation_conflict(cmd, &ret))
265 goto out;
266
267 ret = 0;
268 spin_lock(&dev->dev_reservation_lock);
269 if (dev->dev_reserved_node_acl &&
270 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
271 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
272 tpg->se_tpg_tfo->get_fabric_name());
273 pr_err("Original reserver LUN: %u %s\n",
274 cmd->se_lun->unpacked_lun,
275 dev->dev_reserved_node_acl->initiatorname);
276 pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
277 " from %s \n", cmd->se_lun->unpacked_lun,
278 cmd->se_deve->mapped_lun,
279 sess->se_node_acl->initiatorname);
280 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
281 goto out_unlock;
282 }
283
284 dev->dev_reserved_node_acl = sess->se_node_acl;
285 dev->dev_flags |= DF_SPC2_RESERVATIONS;
286 if (sess->sess_bin_isid != 0) {
287 dev->dev_res_bin_isid = sess->sess_bin_isid;
288 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
289 }
290 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
291 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
292 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
293 sess->se_node_acl->initiatorname);
294
295out_unlock:
296 spin_unlock(&dev->dev_reservation_lock);
297out:
298 if (!ret) {
299 task->task_scsi_status = GOOD;
300 transport_complete_task(task, 1);
301 }
302 return ret;
295} 303}
296 304
305
297/* 306/*
298 * Begin SPC-3/SPC-4 Persistent Reservations emulation support 307 * Begin SPC-3/SPC-4 Persistent Reservations emulation support
299 * 308 *
@@ -418,12 +427,12 @@ static int core_scsi3_pr_seq_non_holder(
418 break; 427 break;
419 case RELEASE: 428 case RELEASE:
420 case RELEASE_10: 429 case RELEASE_10:
421 /* Handled by CRH=1 in core_scsi2_emulate_crh() */ 430 /* Handled by CRH=1 in target_scsi2_reservation_release() */
422 ret = 0; 431 ret = 0;
423 break; 432 break;
424 case RESERVE: 433 case RESERVE:
425 case RESERVE_10: 434 case RESERVE_10:
426 /* Handled by CRH=1 in core_scsi2_emulate_crh() */ 435 /* Handled by CRH=1 in target_scsi2_reservation_reserve() */
427 ret = 0; 436 ret = 0;
428 break; 437 break;
429 case TEST_UNIT_READY: 438 case TEST_UNIT_READY:
@@ -3739,12 +3748,33 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3739/* 3748/*
3740 * See spc4r17 section 6.14 Table 170 3749 * See spc4r17 section 6.14 Table 170
3741 */ 3750 */
3742static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) 3751int target_scsi3_emulate_pr_out(struct se_task *task)
3743{ 3752{
3753 struct se_cmd *cmd = task->task_se_cmd;
3754 unsigned char *cdb = &cmd->t_task_cdb[0];
3744 unsigned char *buf; 3755 unsigned char *buf;
3745 u64 res_key, sa_res_key; 3756 u64 res_key, sa_res_key;
3746 int sa, scope, type, aptpl; 3757 int sa, scope, type, aptpl;
3747 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; 3758 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
3759 int ret;
3760
3761 /*
3762 * Following spc2r20 5.5.1 Reservations overview:
3763 *
3764 * If a logical unit has been reserved by any RESERVE command and is
3765 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
3766 * PERSISTENT RESERVE OUT commands shall conflict regardless of
3767 * initiator or service action and shall terminate with a RESERVATION
3768 * CONFLICT status.
3769 */
3770 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
3771 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3772 " SPC-2 reservation is held, returning"
3773 " RESERVATION_CONFLICT\n");
3774 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
3775 goto out;
3776 }
3777
3748 /* 3778 /*
3749 * FIXME: A NULL struct se_session pointer means an this is not coming from 3779 * FIXME: A NULL struct se_session pointer means an this is not coming from
3750 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3780 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
@@ -3755,7 +3785,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3755 if (cmd->data_length < 24) { 3785 if (cmd->data_length < 24) {
3756 pr_warn("SPC-PR: Received PR OUT parameter list" 3786 pr_warn("SPC-PR: Received PR OUT parameter list"
3757 " length too small: %u\n", cmd->data_length); 3787 " length too small: %u\n", cmd->data_length);
3758 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3788 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3789 goto out;
3759 } 3790 }
3760 /* 3791 /*
3761 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) 3792 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
@@ -3788,8 +3819,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3788 /* 3819 /*
3789 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3820 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3790 */ 3821 */
3791 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) 3822 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
3792 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3823 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3824 goto out;
3825 }
3826
3793 /* 3827 /*
3794 * From spc4r17 section 6.14: 3828 * From spc4r17 section 6.14:
3795 * 3829 *
@@ -3803,7 +3837,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3803 (cmd->data_length != 24)) { 3837 (cmd->data_length != 24)) {
3804 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3838 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3805 " list length: %u\n", cmd->data_length); 3839 " list length: %u\n", cmd->data_length);
3806 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3840 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3841 goto out;
3807 } 3842 }
3808 /* 3843 /*
3809 * (core_scsi3_emulate_pro_* function parameters 3844 * (core_scsi3_emulate_pro_* function parameters
@@ -3812,35 +3847,47 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3812 */ 3847 */
3813 switch (sa) { 3848 switch (sa) {
3814 case PRO_REGISTER: 3849 case PRO_REGISTER:
3815 return core_scsi3_emulate_pro_register(cmd, 3850 ret = core_scsi3_emulate_pro_register(cmd,
3816 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); 3851 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
3852 break;
3817 case PRO_RESERVE: 3853 case PRO_RESERVE:
3818 return core_scsi3_emulate_pro_reserve(cmd, 3854 ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
3819 type, scope, res_key); 3855 break;
3820 case PRO_RELEASE: 3856 case PRO_RELEASE:
3821 return core_scsi3_emulate_pro_release(cmd, 3857 ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
3822 type, scope, res_key); 3858 break;
3823 case PRO_CLEAR: 3859 case PRO_CLEAR:
3824 return core_scsi3_emulate_pro_clear(cmd, res_key); 3860 ret = core_scsi3_emulate_pro_clear(cmd, res_key);
3861 break;
3825 case PRO_PREEMPT: 3862 case PRO_PREEMPT:
3826 return core_scsi3_emulate_pro_preempt(cmd, type, scope, 3863 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
3827 res_key, sa_res_key, 0); 3864 res_key, sa_res_key, 0);
3865 break;
3828 case PRO_PREEMPT_AND_ABORT: 3866 case PRO_PREEMPT_AND_ABORT:
3829 return core_scsi3_emulate_pro_preempt(cmd, type, scope, 3867 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
3830 res_key, sa_res_key, 1); 3868 res_key, sa_res_key, 1);
3869 break;
3831 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: 3870 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
3832 return core_scsi3_emulate_pro_register(cmd, 3871 ret = core_scsi3_emulate_pro_register(cmd,
3833 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); 3872 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
3873 break;
3834 case PRO_REGISTER_AND_MOVE: 3874 case PRO_REGISTER_AND_MOVE:
3835 return core_scsi3_emulate_pro_register_and_move(cmd, res_key, 3875 ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
3836 sa_res_key, aptpl, unreg); 3876 sa_res_key, aptpl, unreg);
3877 break;
3837 default: 3878 default:
3838 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3879 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3839 " action: 0x%02x\n", cdb[1] & 0x1f); 3880 " action: 0x%02x\n", cdb[1] & 0x1f);
3840 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3881 ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
3882 break;
3841 } 3883 }
3842 3884
3843 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3885out:
3886 if (!ret) {
3887 task->task_scsi_status = GOOD;
3888 transport_complete_task(task, 1);
3889 }
3890 return ret;
3844} 3891}
3845 3892
3846/* 3893/*
@@ -4190,29 +4237,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4190 return 0; 4237 return 0;
4191} 4238}
4192 4239
4193static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) 4240int target_scsi3_emulate_pr_in(struct se_task *task)
4194{ 4241{
4195 switch (cdb[1] & 0x1f) { 4242 struct se_cmd *cmd = task->task_se_cmd;
4196 case PRI_READ_KEYS: 4243 int ret;
4197 return core_scsi3_pri_read_keys(cmd);
4198 case PRI_READ_RESERVATION:
4199 return core_scsi3_pri_read_reservation(cmd);
4200 case PRI_REPORT_CAPABILITIES:
4201 return core_scsi3_pri_report_capabilities(cmd);
4202 case PRI_READ_FULL_STATUS:
4203 return core_scsi3_pri_read_full_status(cmd);
4204 default:
4205 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4206 " action: 0x%02x\n", cdb[1] & 0x1f);
4207 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4208 }
4209
4210}
4211 4244
4212int core_scsi3_emulate_pr(struct se_cmd *cmd)
4213{
4214 unsigned char *cdb = &cmd->t_task_cdb[0];
4215 struct se_device *dev = cmd->se_dev;
4216 /* 4245 /*
4217 * Following spc2r20 5.5.1 Reservations overview: 4246 * Following spc2r20 5.5.1 Reservations overview:
4218 * 4247 *
@@ -4222,16 +4251,38 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
4222 * initiator or service action and shall terminate with a RESERVATION 4251 * initiator or service action and shall terminate with a RESERVATION
4223 * CONFLICT status. 4252 * CONFLICT status.
4224 */ 4253 */
4225 if (dev->dev_flags & DF_SPC2_RESERVATIONS) { 4254 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
4226 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4255 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4227 " SPC-2 reservation is held, returning" 4256 " SPC-2 reservation is held, returning"
4228 " RESERVATION_CONFLICT\n"); 4257 " RESERVATION_CONFLICT\n");
4229 return PYX_TRANSPORT_RESERVATION_CONFLICT; 4258 return PYX_TRANSPORT_RESERVATION_CONFLICT;
4230 } 4259 }
4231 4260
4232 return (cdb[0] == PERSISTENT_RESERVE_OUT) ? 4261 switch (cmd->t_task_cdb[1] & 0x1f) {
4233 core_scsi3_emulate_pr_out(cmd, cdb) : 4262 case PRI_READ_KEYS:
4234 core_scsi3_emulate_pr_in(cmd, cdb); 4263 ret = core_scsi3_pri_read_keys(cmd);
4264 break;
4265 case PRI_READ_RESERVATION:
4266 ret = core_scsi3_pri_read_reservation(cmd);
4267 break;
4268 case PRI_REPORT_CAPABILITIES:
4269 ret = core_scsi3_pri_report_capabilities(cmd);
4270 break;
4271 case PRI_READ_FULL_STATUS:
4272 ret = core_scsi3_pri_read_full_status(cmd);
4273 break;
4274 default:
4275 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4276 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
4277 ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
4278 break;
4279 }
4280
4281 if (!ret) {
4282 task->task_scsi_status = GOOD;
4283 transport_complete_task(task, 1);
4284 }
4285 return ret;
4235} 4286}
4236 4287
4237static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type) 4288static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index c8f47d064584..b97f6940dd05 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,7 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
47 47
48extern int core_pr_dump_initiator_port(struct t10_pr_registration *, 48extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
49 char *, u32); 49 char *, u32);
50extern int core_scsi2_emulate_crh(struct se_cmd *); 50extern int target_scsi2_reservation_release(struct se_task *task);
51extern int target_scsi2_reservation_reserve(struct se_task *task);
51extern int core_scsi3_alloc_aptpl_registration( 52extern int core_scsi3_alloc_aptpl_registration(
52 struct t10_reservation *, u64, 53 struct t10_reservation *, u64,
53 unsigned char *, unsigned char *, u32, 54 unsigned char *, unsigned char *, u32,
@@ -61,7 +62,9 @@ extern void core_scsi3_free_all_registrations(struct se_device *);
61extern unsigned char *core_scsi3_pr_dump_type(int); 62extern unsigned char *core_scsi3_pr_dump_type(int);
62extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *, 63extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
63 struct se_cmd *); 64 struct se_cmd *);
64extern int core_scsi3_emulate_pr(struct se_cmd *); 65
66extern int target_scsi3_emulate_pr_in(struct se_task *task);
67extern int target_scsi3_emulate_pr_out(struct se_task *task);
65extern int core_setup_reservations(struct se_device *, int); 68extern int core_setup_reservations(struct se_device *, int);
66 69
67#endif /* TARGET_CORE_PR_H */ 70#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index dad671dee9e9..f941b6232614 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1091,7 +1091,7 @@ static int pscsi_do_task(struct se_task *task)
1091 1091
1092 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1092 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1093 GFP_KERNEL); 1093 GFP_KERNEL);
1094 if (!req) { 1094 if (IS_ERR(req)) {
1095 pr_err("pSCSI: blk_make_request() failed\n"); 1095 pr_err("pSCSI: blk_make_request() failed\n");
1096 goto fail; 1096 goto fail;
1097 } 1097 }
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 570b144a1edb..1d2aaba3f372 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -118,7 +118,7 @@ static void core_tmr_drain_tmr_list(
118 /* 118 /*
119 * Allow the received TMR to return with FUNCTION_COMPLETE. 119 * Allow the received TMR to return with FUNCTION_COMPLETE.
120 */ 120 */
121 if (tmr && (tmr_p == tmr)) 121 if (tmr_p == tmr)
122 continue; 122 continue;
123 123
124 cmd = tmr_p->task_cmd; 124 cmd = tmr_p->task_cmd;
@@ -147,19 +147,18 @@ static void core_tmr_drain_tmr_list(
147 } 147 }
148 spin_unlock(&cmd->t_state_lock); 148 spin_unlock(&cmd->t_state_lock);
149 149
150 list_move_tail(&tmr->tmr_list, &drain_tmr_list); 150 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
151 } 151 }
152 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 152 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
153 153
154 while (!list_empty(&drain_tmr_list)) { 154 list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
155 tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); 155 list_del_init(&tmr_p->tmr_list);
156 list_del(&tmr->tmr_list);
157 cmd = tmr_p->task_cmd; 156 cmd = tmr_p->task_cmd;
158 157
159 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 158 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
160 " Response: 0x%02x, t_state: %d\n", 159 " Response: 0x%02x, t_state: %d\n",
161 (preempt_and_abort_list) ? "Preempt" : "", tmr, 160 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
162 tmr->function, tmr->response, cmd->t_state); 161 tmr_p->function, tmr_p->response, cmd->t_state);
163 162
164 transport_cmd_finish_abort(cmd, 1); 163 transport_cmd_finish_abort(cmd, 1);
165 } 164 }
@@ -330,16 +329,6 @@ static void core_tmr_drain_cmd_list(
330 */ 329 */
331 if (prout_cmd == cmd) 330 if (prout_cmd == cmd)
332 continue; 331 continue;
333 /*
334 * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
335 * HW target mode fabrics.
336 */
337 spin_lock(&cmd->t_state_lock);
338 if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
339 spin_unlock(&cmd->t_state_lock);
340 continue;
341 }
342 spin_unlock(&cmd->t_state_lock);
343 332
344 atomic_set(&cmd->t_transport_queue_active, 0); 333 atomic_set(&cmd->t_transport_queue_active, 0);
345 atomic_dec(&qobj->queue_cnt); 334 atomic_dec(&qobj->queue_cnt);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d75255804481..f603b12485bd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -52,6 +52,7 @@
52#include <target/target_core_configfs.h> 52#include <target/target_core_configfs.h>
53 53
54#include "target_core_alua.h" 54#include "target_core_alua.h"
55#include "target_core_cdb.h"
55#include "target_core_hba.h" 56#include "target_core_hba.h"
56#include "target_core_pr.h" 57#include "target_core_pr.h"
57#include "target_core_ua.h" 58#include "target_core_ua.h"
@@ -268,6 +269,9 @@ struct se_session *transport_init_session(void)
268 } 269 }
269 INIT_LIST_HEAD(&se_sess->sess_list); 270 INIT_LIST_HEAD(&se_sess->sess_list);
270 INIT_LIST_HEAD(&se_sess->sess_acl_list); 271 INIT_LIST_HEAD(&se_sess->sess_acl_list);
272 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
273 INIT_LIST_HEAD(&se_sess->sess_wait_list);
274 spin_lock_init(&se_sess->sess_cmd_lock);
271 275
272 return se_sess; 276 return se_sess;
273} 277}
@@ -514,13 +518,16 @@ static int transport_cmd_check_stop(
514 * Some fabric modules like tcm_loop can release 518 * Some fabric modules like tcm_loop can release
515 * their internally allocated I/O reference now and 519 * their internally allocated I/O reference now and
516 * struct se_cmd now. 520 * struct se_cmd now.
521 *
522 * Fabric modules are expected to return '1' here if the
523 * se_cmd being passed is released at this point,
524 * or zero if not being released.
517 */ 525 */
518 if (cmd->se_tfo->check_stop_free != NULL) { 526 if (cmd->se_tfo->check_stop_free != NULL) {
519 spin_unlock_irqrestore( 527 spin_unlock_irqrestore(
520 &cmd->t_state_lock, flags); 528 &cmd->t_state_lock, flags);
521 529
522 cmd->se_tfo->check_stop_free(cmd); 530 return cmd->se_tfo->check_stop_free(cmd);
523 return 1;
524 } 531 }
525 } 532 }
526 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 533 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -730,6 +737,10 @@ void transport_complete_task(struct se_task *task, int success)
730 complete(&task->task_stop_comp); 737 complete(&task->task_stop_comp);
731 return; 738 return;
732 } 739 }
740
741 if (!success)
742 cmd->t_tasks_failed = 1;
743
733 /* 744 /*
734 * Decrement the outstanding t_task_cdbs_left count. The last 745 * Decrement the outstanding t_task_cdbs_left count. The last
735 * struct se_task from struct se_cmd will complete itself into the 746 * struct se_task from struct se_cmd will complete itself into the
@@ -740,7 +751,7 @@ void transport_complete_task(struct se_task *task, int success)
740 return; 751 return;
741 } 752 }
742 753
743 if (!success || cmd->t_tasks_failed) { 754 if (cmd->t_tasks_failed) {
744 if (!task->task_error_status) { 755 if (!task->task_error_status) {
745 task->task_error_status = 756 task->task_error_status =
746 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 757 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@@ -908,7 +919,7 @@ void transport_remove_task_from_execute_queue(
908} 919}
909 920
910/* 921/*
911 * Handle QUEUE_FULL / -EAGAIN status 922 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
912 */ 923 */
913 924
914static void target_qf_do_work(struct work_struct *work) 925static void target_qf_do_work(struct work_struct *work)
@@ -1498,11 +1509,12 @@ void transport_init_se_cmd(
1498 INIT_LIST_HEAD(&cmd->se_ordered_node); 1509 INIT_LIST_HEAD(&cmd->se_ordered_node);
1499 INIT_LIST_HEAD(&cmd->se_qf_node); 1510 INIT_LIST_HEAD(&cmd->se_qf_node);
1500 INIT_LIST_HEAD(&cmd->se_queue_node); 1511 INIT_LIST_HEAD(&cmd->se_queue_node);
1501 1512 INIT_LIST_HEAD(&cmd->se_cmd_list);
1502 INIT_LIST_HEAD(&cmd->t_task_list); 1513 INIT_LIST_HEAD(&cmd->t_task_list);
1503 init_completion(&cmd->transport_lun_fe_stop_comp); 1514 init_completion(&cmd->transport_lun_fe_stop_comp);
1504 init_completion(&cmd->transport_lun_stop_comp); 1515 init_completion(&cmd->transport_lun_stop_comp);
1505 init_completion(&cmd->t_transport_stop_comp); 1516 init_completion(&cmd->t_transport_stop_comp);
1517 init_completion(&cmd->cmd_wait_comp);
1506 spin_lock_init(&cmd->t_state_lock); 1518 spin_lock_init(&cmd->t_state_lock);
1507 atomic_set(&cmd->transport_dev_active, 1); 1519 atomic_set(&cmd->transport_dev_active, 1);
1508 1520
@@ -1645,9 +1657,7 @@ int transport_handle_cdb_direct(
1645 * and call transport_generic_request_failure() if necessary.. 1657 * and call transport_generic_request_failure() if necessary..
1646 */ 1658 */
1647 ret = transport_generic_new_cmd(cmd); 1659 ret = transport_generic_new_cmd(cmd);
1648 if (ret == -EAGAIN) 1660 if (ret < 0) {
1649 return 0;
1650 else if (ret < 0) {
1651 cmd->transport_error_status = ret; 1661 cmd->transport_error_status = ret;
1652 transport_generic_request_failure(cmd, 0, 1662 transport_generic_request_failure(cmd, 0,
1653 (cmd->data_direction != DMA_TO_DEVICE)); 1663 (cmd->data_direction != DMA_TO_DEVICE));
@@ -1717,13 +1727,6 @@ int transport_generic_handle_tmr(
1717} 1727}
1718EXPORT_SYMBOL(transport_generic_handle_tmr); 1728EXPORT_SYMBOL(transport_generic_handle_tmr);
1719 1729
1720void transport_generic_free_cmd_intr(
1721 struct se_cmd *cmd)
1722{
1723 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
1724}
1725EXPORT_SYMBOL(transport_generic_free_cmd_intr);
1726
1727/* 1730/*
1728 * If the task is active, request it to be stopped and sleep until it 1731 * If the task is active, request it to be stopped and sleep until it
1729 * has completed. 1732 * has completed.
@@ -1886,7 +1889,7 @@ static void transport_generic_request_failure(
1886 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1889 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1887 1890
1888 ret = cmd->se_tfo->queue_status(cmd); 1891 ret = cmd->se_tfo->queue_status(cmd);
1889 if (ret == -EAGAIN) 1892 if (ret == -EAGAIN || ret == -ENOMEM)
1890 goto queue_full; 1893 goto queue_full;
1891 goto check_stop; 1894 goto check_stop;
1892 case PYX_TRANSPORT_USE_SENSE_REASON: 1895 case PYX_TRANSPORT_USE_SENSE_REASON:
@@ -1913,7 +1916,7 @@ static void transport_generic_request_failure(
1913 else { 1916 else {
1914 ret = transport_send_check_condition_and_sense(cmd, 1917 ret = transport_send_check_condition_and_sense(cmd,
1915 cmd->scsi_sense_reason, 0); 1918 cmd->scsi_sense_reason, 0);
1916 if (ret == -EAGAIN) 1919 if (ret == -EAGAIN || ret == -ENOMEM)
1917 goto queue_full; 1920 goto queue_full;
1918 } 1921 }
1919 1922
@@ -2153,62 +2156,20 @@ check_depth:
2153 atomic_set(&cmd->t_transport_sent, 1); 2156 atomic_set(&cmd->t_transport_sent, 1);
2154 2157
2155 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2158 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2156 /*
2157 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2158 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
2159 * struct se_subsystem_api->do_task() caller below.
2160 */
2161 if (cmd->transport_emulate_cdb) {
2162 error = cmd->transport_emulate_cdb(cmd);
2163 if (error != 0) {
2164 cmd->transport_error_status = error;
2165 spin_lock_irqsave(&cmd->t_state_lock, flags);
2166 task->task_flags &= ~TF_ACTIVE;
2167 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2168 atomic_set(&cmd->t_transport_sent, 0);
2169 transport_stop_tasks_for_cmd(cmd);
2170 atomic_inc(&dev->depth_left);
2171 transport_generic_request_failure(cmd, 0, 1);
2172 goto check_depth;
2173 }
2174 /*
2175 * Handle the successful completion for transport_emulate_cdb()
2176 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2177 * Otherwise the caller is expected to complete the task with
2178 * proper status.
2179 */
2180 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2181 cmd->scsi_status = SAM_STAT_GOOD;
2182 task->task_scsi_status = GOOD;
2183 transport_complete_task(task, 1);
2184 }
2185 } else {
2186 /*
2187 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2188 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2189 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2190 * LUN emulation code.
2191 *
2192 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2193 * call ->do_task() directly and let the underlying TCM subsystem plugin
2194 * code handle the CDB emulation.
2195 */
2196 if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2197 (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2198 error = transport_emulate_control_cdb(task);
2199 else
2200 error = dev->transport->do_task(task);
2201 2159
2202 if (error != 0) { 2160 if (cmd->execute_task)
2203 cmd->transport_error_status = error; 2161 error = cmd->execute_task(task);
2204 spin_lock_irqsave(&cmd->t_state_lock, flags); 2162 else
2205 task->task_flags &= ~TF_ACTIVE; 2163 error = dev->transport->do_task(task);
2206 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2164 if (error != 0) {
2207 atomic_set(&cmd->t_transport_sent, 0); 2165 cmd->transport_error_status = error;
2208 transport_stop_tasks_for_cmd(cmd); 2166 spin_lock_irqsave(&cmd->t_state_lock, flags);
2209 atomic_inc(&dev->depth_left); 2167 task->task_flags &= ~TF_ACTIVE;
2210 transport_generic_request_failure(cmd, 0, 1); 2168 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2211 } 2169 atomic_set(&cmd->t_transport_sent, 0);
2170 transport_stop_tasks_for_cmd(cmd);
2171 atomic_inc(&dev->depth_left);
2172 transport_generic_request_failure(cmd, 0, 1);
2212 } 2173 }
2213 2174
2214 goto check_depth; 2175 goto check_depth;
@@ -2642,6 +2603,13 @@ static int transport_generic_cmd_sequencer(
2642 */ 2603 */
2643 } 2604 }
2644 2605
2606 /*
2607 * If we operate in passthrough mode we skip most CDB emulation and
2608 * instead hand the commands down to the physical SCSI device.
2609 */
2610 passthrough =
2611 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2612
2645 switch (cdb[0]) { 2613 switch (cdb[0]) {
2646 case READ_6: 2614 case READ_6:
2647 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret); 2615 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
@@ -2721,9 +2689,12 @@ static int transport_generic_cmd_sequencer(
2721 cmd->t_task_lba = transport_lba_32(cdb); 2689 cmd->t_task_lba = transport_lba_32(cdb);
2722 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2690 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2723 2691
2724 if (dev->transport->transport_type == 2692 /*
2725 TRANSPORT_PLUGIN_PHBA_PDEV) 2693 * Do now allow BIDI commands for passthrough mode.
2694 */
2695 if (passthrough)
2726 goto out_unsupported_cdb; 2696 goto out_unsupported_cdb;
2697
2727 /* 2698 /*
2728 * Setup BIDI XOR callback to be run after I/O completion. 2699 * Setup BIDI XOR callback to be run after I/O completion.
2729 */ 2700 */
@@ -2732,13 +2703,6 @@ static int transport_generic_cmd_sequencer(
2732 break; 2703 break;
2733 case VARIABLE_LENGTH_CMD: 2704 case VARIABLE_LENGTH_CMD:
2734 service_action = get_unaligned_be16(&cdb[8]); 2705 service_action = get_unaligned_be16(&cdb[8]);
2735 /*
2736 * Determine if this is TCM/PSCSI device and we should disable
2737 * internal emulation for this CDB.
2738 */
2739 passthrough = (dev->transport->transport_type ==
2740 TRANSPORT_PLUGIN_PHBA_PDEV);
2741
2742 switch (service_action) { 2706 switch (service_action) {
2743 case XDWRITEREAD_32: 2707 case XDWRITEREAD_32:
2744 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2708 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -2752,8 +2716,12 @@ static int transport_generic_cmd_sequencer(
2752 cmd->t_task_lba = transport_lba_64_ext(cdb); 2716 cmd->t_task_lba = transport_lba_64_ext(cdb);
2753 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2717 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2754 2718
2719 /*
2720 * Do now allow BIDI commands for passthrough mode.
2721 */
2755 if (passthrough) 2722 if (passthrough)
2756 goto out_unsupported_cdb; 2723 goto out_unsupported_cdb;
2724
2757 /* 2725 /*
2758 * Setup BIDI XOR callback to be run during after I/O 2726 * Setup BIDI XOR callback to be run during after I/O
2759 * completion. 2727 * completion.
@@ -2779,7 +2747,8 @@ static int transport_generic_cmd_sequencer(
2779 2747
2780 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2748 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2781 goto out_invalid_cdb_field; 2749 goto out_invalid_cdb_field;
2782 2750 if (!passthrough)
2751 cmd->execute_task = target_emulate_write_same;
2783 break; 2752 break;
2784 default: 2753 default:
2785 pr_err("VARIABLE_LENGTH_CMD service action" 2754 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -2793,12 +2762,10 @@ static int transport_generic_cmd_sequencer(
2793 /* 2762 /*
2794 * Check for emulated MI_REPORT_TARGET_PGS. 2763 * Check for emulated MI_REPORT_TARGET_PGS.
2795 */ 2764 */
2796 if (cdb[1] == MI_REPORT_TARGET_PGS) { 2765 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2797 cmd->transport_emulate_cdb = 2766 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2798 (su_dev->t10_alua.alua_type == 2767 cmd->execute_task =
2799 SPC3_ALUA_EMULATED) ? 2768 target_emulate_report_target_port_groups;
2800 core_emulate_report_target_port_groups :
2801 NULL;
2802 } 2769 }
2803 size = (cdb[6] << 24) | (cdb[7] << 16) | 2770 size = (cdb[6] << 24) | (cdb[7] << 16) |
2804 (cdb[8] << 8) | cdb[9]; 2771 (cdb[8] << 8) | cdb[9];
@@ -2819,8 +2786,15 @@ static int transport_generic_cmd_sequencer(
2819 case MODE_SENSE: 2786 case MODE_SENSE:
2820 size = cdb[4]; 2787 size = cdb[4];
2821 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2788 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2789 if (!passthrough)
2790 cmd->execute_task = target_emulate_modesense;
2822 break; 2791 break;
2823 case MODE_SENSE_10: 2792 case MODE_SENSE_10:
2793 size = (cdb[7] << 8) + cdb[8];
2794 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2795 if (!passthrough)
2796 cmd->execute_task = target_emulate_modesense;
2797 break;
2824 case GPCMD_READ_BUFFER_CAPACITY: 2798 case GPCMD_READ_BUFFER_CAPACITY:
2825 case GPCMD_SEND_OPC: 2799 case GPCMD_SEND_OPC:
2826 case LOG_SELECT: 2800 case LOG_SELECT:
@@ -2840,11 +2814,14 @@ static int transport_generic_cmd_sequencer(
2840 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2814 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2841 break; 2815 break;
2842 case PERSISTENT_RESERVE_IN: 2816 case PERSISTENT_RESERVE_IN:
2817 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2818 cmd->execute_task = target_scsi3_emulate_pr_in;
2819 size = (cdb[7] << 8) + cdb[8];
2820 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2821 break;
2843 case PERSISTENT_RESERVE_OUT: 2822 case PERSISTENT_RESERVE_OUT:
2844 cmd->transport_emulate_cdb = 2823 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2845 (su_dev->t10_pr.res_type == 2824 cmd->execute_task = target_scsi3_emulate_pr_out;
2846 SPC3_PERSISTENT_RESERVATIONS) ?
2847 core_scsi3_emulate_pr : NULL;
2848 size = (cdb[7] << 8) + cdb[8]; 2825 size = (cdb[7] << 8) + cdb[8];
2849 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2826 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2850 break; 2827 break;
@@ -2863,12 +2840,10 @@ static int transport_generic_cmd_sequencer(
2863 * 2840 *
2864 * Check for emulated MO_SET_TARGET_PGS. 2841 * Check for emulated MO_SET_TARGET_PGS.
2865 */ 2842 */
2866 if (cdb[1] == MO_SET_TARGET_PGS) { 2843 if (cdb[1] == MO_SET_TARGET_PGS &&
2867 cmd->transport_emulate_cdb = 2844 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2868 (su_dev->t10_alua.alua_type == 2845 cmd->execute_task =
2869 SPC3_ALUA_EMULATED) ? 2846 target_emulate_set_target_port_groups;
2870 core_emulate_set_target_port_groups :
2871 NULL;
2872 } 2847 }
2873 2848
2874 size = (cdb[6] << 24) | (cdb[7] << 16) | 2849 size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -2888,6 +2863,8 @@ static int transport_generic_cmd_sequencer(
2888 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 2863 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2889 cmd->sam_task_attr = MSG_HEAD_TAG; 2864 cmd->sam_task_attr = MSG_HEAD_TAG;
2890 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2865 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2866 if (!passthrough)
2867 cmd->execute_task = target_emulate_inquiry;
2891 break; 2868 break;
2892 case READ_BUFFER: 2869 case READ_BUFFER:
2893 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2870 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
@@ -2896,6 +2873,8 @@ static int transport_generic_cmd_sequencer(
2896 case READ_CAPACITY: 2873 case READ_CAPACITY:
2897 size = READ_CAP_LEN; 2874 size = READ_CAP_LEN;
2898 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2875 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2876 if (!passthrough)
2877 cmd->execute_task = target_emulate_readcapacity;
2899 break; 2878 break;
2900 case READ_MEDIA_SERIAL_NUMBER: 2879 case READ_MEDIA_SERIAL_NUMBER:
2901 case SECURITY_PROTOCOL_IN: 2880 case SECURITY_PROTOCOL_IN:
@@ -2904,6 +2883,21 @@ static int transport_generic_cmd_sequencer(
2904 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2883 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2905 break; 2884 break;
2906 case SERVICE_ACTION_IN: 2885 case SERVICE_ACTION_IN:
2886 switch (cmd->t_task_cdb[1] & 0x1f) {
2887 case SAI_READ_CAPACITY_16:
2888 if (!passthrough)
2889 cmd->execute_task =
2890 target_emulate_readcapacity_16;
2891 break;
2892 default:
2893 if (passthrough)
2894 break;
2895
2896 pr_err("Unsupported SA: 0x%02x\n",
2897 cmd->t_task_cdb[1] & 0x1f);
2898 goto out_unsupported_cdb;
2899 }
2900 /*FALLTHROUGH*/
2907 case ACCESS_CONTROL_IN: 2901 case ACCESS_CONTROL_IN:
2908 case ACCESS_CONTROL_OUT: 2902 case ACCESS_CONTROL_OUT:
2909 case EXTENDED_COPY: 2903 case EXTENDED_COPY:
@@ -2934,6 +2928,8 @@ static int transport_generic_cmd_sequencer(
2934 case REQUEST_SENSE: 2928 case REQUEST_SENSE:
2935 size = cdb[4]; 2929 size = cdb[4];
2936 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2930 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2931 if (!passthrough)
2932 cmd->execute_task = target_emulate_request_sense;
2937 break; 2933 break;
2938 case READ_ELEMENT_STATUS: 2934 case READ_ELEMENT_STATUS:
2939 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2935 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
@@ -2961,10 +2957,8 @@ static int transport_generic_cmd_sequencer(
2961 * is running in SPC_PASSTHROUGH, and wants reservations 2957 * is running in SPC_PASSTHROUGH, and wants reservations
2962 * emulation disabled. 2958 * emulation disabled.
2963 */ 2959 */
2964 cmd->transport_emulate_cdb = 2960 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2965 (su_dev->t10_pr.res_type != 2961 cmd->execute_task = target_scsi2_reservation_reserve;
2966 SPC_PASSTHROUGH) ?
2967 core_scsi2_emulate_crh : NULL;
2968 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2962 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2969 break; 2963 break;
2970 case RELEASE: 2964 case RELEASE:
@@ -2978,10 +2972,8 @@ static int transport_generic_cmd_sequencer(
2978 else 2972 else
2979 size = cmd->data_length; 2973 size = cmd->data_length;
2980 2974
2981 cmd->transport_emulate_cdb = 2975 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2982 (su_dev->t10_pr.res_type != 2976 cmd->execute_task = target_scsi2_reservation_release;
2983 SPC_PASSTHROUGH) ?
2984 core_scsi2_emulate_crh : NULL;
2985 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2977 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2986 break; 2978 break;
2987 case SYNCHRONIZE_CACHE: 2979 case SYNCHRONIZE_CACHE:
@@ -3002,16 +2994,9 @@ static int transport_generic_cmd_sequencer(
3002 size = transport_get_size(sectors, cdb, cmd); 2994 size = transport_get_size(sectors, cdb, cmd);
3003 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2995 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3004 2996
3005 /* 2997 if (passthrough)
3006 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3007 */
3008 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3009 break; 2998 break;
3010 /* 2999
3011 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3012 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3013 */
3014 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3015 /* 3000 /*
3016 * Check to ensure that LBA + Range does not exceed past end of 3001 * Check to ensure that LBA + Range does not exceed past end of
3017 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 3002 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
@@ -3020,10 +3005,13 @@ static int transport_generic_cmd_sequencer(
3020 if (transport_cmd_get_valid_sectors(cmd) < 0) 3005 if (transport_cmd_get_valid_sectors(cmd) < 0)
3021 goto out_invalid_cdb_field; 3006 goto out_invalid_cdb_field;
3022 } 3007 }
3008 cmd->execute_task = target_emulate_synchronize_cache;
3023 break; 3009 break;
3024 case UNMAP: 3010 case UNMAP:
3025 size = get_unaligned_be16(&cdb[7]); 3011 size = get_unaligned_be16(&cdb[7]);
3026 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3012 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3013 if (!passthrough)
3014 cmd->execute_task = target_emulate_unmap;
3027 break; 3015 break;
3028 case WRITE_SAME_16: 3016 case WRITE_SAME_16:
3029 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 3017 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
@@ -3042,6 +3030,8 @@ static int transport_generic_cmd_sequencer(
3042 3030
3043 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3031 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3044 goto out_invalid_cdb_field; 3032 goto out_invalid_cdb_field;
3033 if (!passthrough)
3034 cmd->execute_task = target_emulate_write_same;
3045 break; 3035 break;
3046 case WRITE_SAME: 3036 case WRITE_SAME:
3047 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 3037 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
@@ -3063,26 +3053,31 @@ static int transport_generic_cmd_sequencer(
3063 */ 3053 */
3064 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3054 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3065 goto out_invalid_cdb_field; 3055 goto out_invalid_cdb_field;
3056 if (!passthrough)
3057 cmd->execute_task = target_emulate_write_same;
3066 break; 3058 break;
3067 case ALLOW_MEDIUM_REMOVAL: 3059 case ALLOW_MEDIUM_REMOVAL:
3068 case GPCMD_CLOSE_TRACK:
3069 case ERASE: 3060 case ERASE:
3070 case INITIALIZE_ELEMENT_STATUS:
3071 case GPCMD_LOAD_UNLOAD:
3072 case REZERO_UNIT: 3061 case REZERO_UNIT:
3073 case SEEK_10: 3062 case SEEK_10:
3074 case GPCMD_SET_SPEED:
3075 case SPACE: 3063 case SPACE:
3076 case START_STOP: 3064 case START_STOP:
3077 case TEST_UNIT_READY: 3065 case TEST_UNIT_READY:
3078 case VERIFY: 3066 case VERIFY:
3079 case WRITE_FILEMARKS: 3067 case WRITE_FILEMARKS:
3068 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3069 if (!passthrough)
3070 cmd->execute_task = target_emulate_noop;
3071 break;
3072 case GPCMD_CLOSE_TRACK:
3073 case INITIALIZE_ELEMENT_STATUS:
3074 case GPCMD_LOAD_UNLOAD:
3075 case GPCMD_SET_SPEED:
3080 case MOVE_MEDIUM: 3076 case MOVE_MEDIUM:
3081 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3077 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3082 break; 3078 break;
3083 case REPORT_LUNS: 3079 case REPORT_LUNS:
3084 cmd->transport_emulate_cdb = 3080 cmd->execute_task = target_report_luns;
3085 transport_core_report_lun_response;
3086 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 3081 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3087 /* 3082 /*
3088 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 3083 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
@@ -3134,6 +3129,11 @@ static int transport_generic_cmd_sequencer(
3134 cmd->data_length = size; 3129 cmd->data_length = size;
3135 } 3130 }
3136 3131
3132 /* reject any command that we don't have a handler for */
3133 if (!(passthrough || cmd->execute_task ||
3134 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3135 goto out_unsupported_cdb;
3136
3137 /* Let's limit control cdbs to a page, for simplicity's sake. */ 3137 /* Let's limit control cdbs to a page, for simplicity's sake. */
3138 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3138 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3139 size > PAGE_SIZE) 3139 size > PAGE_SIZE)
@@ -3308,7 +3308,7 @@ static void target_complete_ok_work(struct work_struct *work)
3308 if (cmd->scsi_status) { 3308 if (cmd->scsi_status) {
3309 ret = transport_send_check_condition_and_sense( 3309 ret = transport_send_check_condition_and_sense(
3310 cmd, reason, 1); 3310 cmd, reason, 1);
3311 if (ret == -EAGAIN) 3311 if (ret == -EAGAIN || ret == -ENOMEM)
3312 goto queue_full; 3312 goto queue_full;
3313 3313
3314 transport_lun_remove_cmd(cmd); 3314 transport_lun_remove_cmd(cmd);
@@ -3333,7 +3333,7 @@ static void target_complete_ok_work(struct work_struct *work)
3333 spin_unlock(&cmd->se_lun->lun_sep_lock); 3333 spin_unlock(&cmd->se_lun->lun_sep_lock);
3334 3334
3335 ret = cmd->se_tfo->queue_data_in(cmd); 3335 ret = cmd->se_tfo->queue_data_in(cmd);
3336 if (ret == -EAGAIN) 3336 if (ret == -EAGAIN || ret == -ENOMEM)
3337 goto queue_full; 3337 goto queue_full;
3338 break; 3338 break;
3339 case DMA_TO_DEVICE: 3339 case DMA_TO_DEVICE:
@@ -3354,14 +3354,14 @@ static void target_complete_ok_work(struct work_struct *work)
3354 } 3354 }
3355 spin_unlock(&cmd->se_lun->lun_sep_lock); 3355 spin_unlock(&cmd->se_lun->lun_sep_lock);
3356 ret = cmd->se_tfo->queue_data_in(cmd); 3356 ret = cmd->se_tfo->queue_data_in(cmd);
3357 if (ret == -EAGAIN) 3357 if (ret == -EAGAIN || ret == -ENOMEM)
3358 goto queue_full; 3358 goto queue_full;
3359 break; 3359 break;
3360 } 3360 }
3361 /* Fall through for DMA_TO_DEVICE */ 3361 /* Fall through for DMA_TO_DEVICE */
3362 case DMA_NONE: 3362 case DMA_NONE:
3363 ret = cmd->se_tfo->queue_status(cmd); 3363 ret = cmd->se_tfo->queue_status(cmd);
3364 if (ret == -EAGAIN) 3364 if (ret == -EAGAIN || ret == -ENOMEM)
3365 goto queue_full; 3365 goto queue_full;
3366 break; 3366 break;
3367 default: 3367 default:
@@ -3890,7 +3890,10 @@ EXPORT_SYMBOL(transport_generic_process_write);
3890 3890
3891static void transport_write_pending_qf(struct se_cmd *cmd) 3891static void transport_write_pending_qf(struct se_cmd *cmd)
3892{ 3892{
3893 if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { 3893 int ret;
3894
3895 ret = cmd->se_tfo->write_pending(cmd);
3896 if (ret == -EAGAIN || ret == -ENOMEM) {
3894 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 3897 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3895 cmd); 3898 cmd);
3896 transport_handle_queue_full(cmd, cmd->se_dev); 3899 transport_handle_queue_full(cmd, cmd->se_dev);
@@ -3920,7 +3923,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
3920 * frontend know that WRITE buffers are ready. 3923 * frontend know that WRITE buffers are ready.
3921 */ 3924 */
3922 ret = cmd->se_tfo->write_pending(cmd); 3925 ret = cmd->se_tfo->write_pending(cmd);
3923 if (ret == -EAGAIN) 3926 if (ret == -EAGAIN || ret == -ENOMEM)
3924 goto queue_full; 3927 goto queue_full;
3925 else if (ret < 0) 3928 else if (ret < 0)
3926 return ret; 3929 return ret;
@@ -3931,7 +3934,7 @@ queue_full:
3931 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3934 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3932 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 3935 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3933 transport_handle_queue_full(cmd, cmd->se_dev); 3936 transport_handle_queue_full(cmd, cmd->se_dev);
3934 return ret; 3937 return 0;
3935} 3938}
3936 3939
3937/** 3940/**
@@ -3949,6 +3952,14 @@ void transport_release_cmd(struct se_cmd *cmd)
3949 core_tmr_release_req(cmd->se_tmr_req); 3952 core_tmr_release_req(cmd->se_tmr_req);
3950 if (cmd->t_task_cdb != cmd->__t_task_cdb) 3953 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3951 kfree(cmd->t_task_cdb); 3954 kfree(cmd->t_task_cdb);
3955 /*
3956 * Check if target_wait_for_sess_cmds() is expecting to
3957 * release se_cmd directly here..
3958 */
3959 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3960 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3961 return;
3962
3952 cmd->se_tfo->release_cmd(cmd); 3963 cmd->se_tfo->release_cmd(cmd);
3953} 3964}
3954EXPORT_SYMBOL(transport_release_cmd); 3965EXPORT_SYMBOL(transport_release_cmd);
@@ -3976,6 +3987,114 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3976} 3987}
3977EXPORT_SYMBOL(transport_generic_free_cmd); 3988EXPORT_SYMBOL(transport_generic_free_cmd);
3978 3989
3990/* target_get_sess_cmd - Add command to active ->sess_cmd_list
3991 * @se_sess: session to reference
3992 * @se_cmd: command descriptor to add
3993 */
3994void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3995{
3996 unsigned long flags;
3997
3998 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3999 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
4000 se_cmd->check_release = 1;
4001 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4002}
4003EXPORT_SYMBOL(target_get_sess_cmd);
4004
4005/* target_put_sess_cmd - Check for active I/O shutdown or list delete
4006 * @se_sess: session to reference
4007 * @se_cmd: command descriptor to drop
4008 */
4009int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4010{
4011 unsigned long flags;
4012
4013 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4014 if (list_empty(&se_cmd->se_cmd_list)) {
4015 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4016 WARN_ON(1);
4017 return 0;
4018 }
4019
4020 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
4021 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4022 complete(&se_cmd->cmd_wait_comp);
4023 return 1;
4024 }
4025 list_del(&se_cmd->se_cmd_list);
4026 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4027
4028 return 0;
4029}
4030EXPORT_SYMBOL(target_put_sess_cmd);
4031
4032/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4033 * @se_sess: session to split
4034 */
4035void target_splice_sess_cmd_list(struct se_session *se_sess)
4036{
4037 struct se_cmd *se_cmd;
4038 unsigned long flags;
4039
4040 WARN_ON(!list_empty(&se_sess->sess_wait_list));
4041 INIT_LIST_HEAD(&se_sess->sess_wait_list);
4042
4043 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4044 se_sess->sess_tearing_down = 1;
4045
4046 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4047
4048 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4049 se_cmd->cmd_wait_set = 1;
4050
4051 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4052}
4053EXPORT_SYMBOL(target_splice_sess_cmd_list);
4054
4055/* target_wait_for_sess_cmds - Wait for outstanding descriptors
4056 * @se_sess: session to wait for active I/O
4057 * @wait_for_tasks: Make extra transport_wait_for_tasks call
4058 */
4059void target_wait_for_sess_cmds(
4060 struct se_session *se_sess,
4061 int wait_for_tasks)
4062{
4063 struct se_cmd *se_cmd, *tmp_cmd;
4064 bool rc = false;
4065
4066 list_for_each_entry_safe(se_cmd, tmp_cmd,
4067 &se_sess->sess_wait_list, se_cmd_list) {
4068 list_del(&se_cmd->se_cmd_list);
4069
4070 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4071 " %d\n", se_cmd, se_cmd->t_state,
4072 se_cmd->se_tfo->get_cmd_state(se_cmd));
4073
4074 if (wait_for_tasks) {
4075 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4076 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4077 se_cmd->se_tfo->get_cmd_state(se_cmd));
4078
4079 rc = transport_wait_for_tasks(se_cmd);
4080
4081 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4082 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4083 se_cmd->se_tfo->get_cmd_state(se_cmd));
4084 }
4085
4086 if (!rc) {
4087 wait_for_completion(&se_cmd->cmd_wait_comp);
4088 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4089 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4090 se_cmd->se_tfo->get_cmd_state(se_cmd));
4091 }
4092
4093 se_cmd->se_tfo->release_cmd(se_cmd);
4094 }
4095}
4096EXPORT_SYMBOL(target_wait_for_sess_cmds);
4097
3979/* transport_lun_wait_for_tasks(): 4098/* transport_lun_wait_for_tasks():
3980 * 4099 *
3981 * Called from ConfigFS context to stop the passed struct se_cmd to allow 4100 * Called from ConfigFS context to stop the passed struct se_cmd to allow
@@ -4152,14 +4271,14 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
4152 * Called from frontend fabric context to wait for storage engine 4271 * Called from frontend fabric context to wait for storage engine
4153 * to pause and/or release frontend generated struct se_cmd. 4272 * to pause and/or release frontend generated struct se_cmd.
4154 */ 4273 */
4155void transport_wait_for_tasks(struct se_cmd *cmd) 4274bool transport_wait_for_tasks(struct se_cmd *cmd)
4156{ 4275{
4157 unsigned long flags; 4276 unsigned long flags;
4158 4277
4159 spin_lock_irqsave(&cmd->t_state_lock, flags); 4278 spin_lock_irqsave(&cmd->t_state_lock, flags);
4160 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { 4279 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4161 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4280 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4162 return; 4281 return false;
4163 } 4282 }
4164 /* 4283 /*
4165 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE 4284 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
@@ -4167,7 +4286,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
4167 */ 4286 */
4168 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { 4287 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4169 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4288 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4170 return; 4289 return false;
4171 } 4290 }
4172 /* 4291 /*
4173 * If we are already stopped due to an external event (ie: LUN shutdown) 4292 * If we are already stopped due to an external event (ie: LUN shutdown)
@@ -4210,7 +4329,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
4210 if (!atomic_read(&cmd->t_transport_active) || 4329 if (!atomic_read(&cmd->t_transport_active) ||
4211 atomic_read(&cmd->t_transport_aborted)) { 4330 atomic_read(&cmd->t_transport_aborted)) {
4212 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4331 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4213 return; 4332 return false;
4214 } 4333 }
4215 4334
4216 atomic_set(&cmd->t_transport_stop, 1); 4335 atomic_set(&cmd->t_transport_stop, 1);
@@ -4235,6 +4354,8 @@ void transport_wait_for_tasks(struct se_cmd *cmd)
4235 cmd->se_tfo->get_task_tag(cmd)); 4354 cmd->se_tfo->get_task_tag(cmd));
4236 4355
4237 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4356 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4357
4358 return true;
4238} 4359}
4239EXPORT_SYMBOL(transport_wait_for_tasks); 4360EXPORT_SYMBOL(transport_wait_for_tasks);
4240 4361
@@ -4583,9 +4704,7 @@ get_cmd:
4583 break; 4704 break;
4584 } 4705 }
4585 ret = transport_generic_new_cmd(cmd); 4706 ret = transport_generic_new_cmd(cmd);
4586 if (ret == -EAGAIN) 4707 if (ret < 0) {
4587 break;
4588 else if (ret < 0) {
4589 cmd->transport_error_status = ret; 4708 cmd->transport_error_status = ret;
4590 transport_generic_request_failure(cmd, 4709 transport_generic_request_failure(cmd,
4591 0, (cmd->data_direction != 4710 0, (cmd->data_direction !=
@@ -4595,9 +4714,6 @@ get_cmd:
4595 case TRANSPORT_PROCESS_WRITE: 4714 case TRANSPORT_PROCESS_WRITE:
4596 transport_generic_process_write(cmd); 4715 transport_generic_process_write(cmd);
4597 break; 4716 break;
4598 case TRANSPORT_FREE_CMD_INTR:
4599 transport_generic_free_cmd(cmd, 0);
4600 break;
4601 case TRANSPORT_PROCESS_TMR: 4717 case TRANSPORT_PROCESS_TMR:
4602 transport_generic_do_tmr(cmd); 4718 transport_generic_do_tmr(cmd);
4603 break; 4719 break;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 3749d8b4b423..e05c55100ec6 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -156,7 +156,7 @@ int ft_lport_notify(struct notifier_block *, unsigned long, void *);
156/* 156/*
157 * IO methods. 157 * IO methods.
158 */ 158 */
159void ft_check_stop_free(struct se_cmd *); 159int ft_check_stop_free(struct se_cmd *);
160void ft_release_cmd(struct se_cmd *); 160void ft_release_cmd(struct se_cmd *);
161int ft_queue_status(struct se_cmd *); 161int ft_queue_status(struct se_cmd *);
162int ft_queue_data_in(struct se_cmd *); 162int ft_queue_data_in(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 6195026cc7b0..4fac37c4c615 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -112,9 +112,10 @@ void ft_release_cmd(struct se_cmd *se_cmd)
112 ft_free_cmd(cmd); 112 ft_free_cmd(cmd);
113} 113}
114 114
115void ft_check_stop_free(struct se_cmd *se_cmd) 115int ft_check_stop_free(struct se_cmd *se_cmd)
116{ 116{
117 transport_generic_free_cmd(se_cmd, 0); 117 transport_generic_free_cmd(se_cmd, 0);
118 return 1;
118} 119}
119 120
120/* 121/*