aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c156
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h25
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c38
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c41
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c60
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c77
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c8
-rw-r--r--drivers/target/target_core_configfs.c30
-rw-r--r--drivers/target/target_core_device.c145
-rw-r--r--drivers/target/target_core_fabric_configfs.c25
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_file.c7
-rw-r--r--drivers/target/target_core_iblock.c52
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c109
-rw-r--r--drivers/target/target_core_pscsi.c82
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_rd.c11
-rw-r--r--drivers/target/target_core_sbc.c65
-rw-r--r--drivers/target/target_core_spc.c42
-rw-r--r--drivers/target/target_core_tmr.c18
-rw-r--r--drivers/target/target_core_tpg.c1
-rw-r--r--drivers/target/target_core_transport.c222
-rw-r--r--drivers/target/target_core_user.c447
-rw-r--r--drivers/target/target_core_xcopy.c184
-rw-r--r--drivers/vhost/scsi.c11
-rw-r--r--drivers/xen/xen-scsiback.c36
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h10
-rw-r--r--include/target/target_core_backend.h17
-rw-r--r--include/target/target_core_base.h11
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/uapi/linux/target_core_user.h12
45 files changed, 1356 insertions, 698 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index fcbed35e95a8..0e662656ef42 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1452,7 +1452,7 @@ static void
1452isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1452isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1453{ 1453{
1454 struct isert_conn *isert_conn = wc->qp->qp_context; 1454 struct isert_conn *isert_conn = wc->qp->qp_context;
1455 struct ib_device *ib_dev = isert_conn->cm_id->device; 1455 struct ib_device *ib_dev = isert_conn->device->ib_device;
1456 1456
1457 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1457 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1458 isert_print_wc(wc, "login recv"); 1458 isert_print_wc(wc, "login recv");
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 1ced0731c140..402275be0931 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1157,8 +1157,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1157 } 1157 }
1158 spin_unlock_irqrestore(&ioctx->spinlock, flags); 1158 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1159 1159
1160 pr_debug("Aborting cmd with state %d and tag %lld\n", state, 1160 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1161 ioctx->cmd.tag); 1161 ioctx->state, ioctx->cmd.tag);
1162 1162
1163 switch (state) { 1163 switch (state) {
1164 case SRPT_STATE_NEW: 1164 case SRPT_STATE_NEW:
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 659ab483d716..1f75d0380516 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -155,6 +155,9 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
155 qrc = h_free_crq(vscsi->dds.unit_id); 155 qrc = h_free_crq(vscsi->dds.unit_id);
156 switch (qrc) { 156 switch (qrc) {
157 case H_SUCCESS: 157 case H_SUCCESS:
158 spin_lock_bh(&vscsi->intr_lock);
159 vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
160 spin_unlock_bh(&vscsi->intr_lock);
158 break; 161 break;
159 162
160 case H_HARDWARE: 163 case H_HARDWARE:
@@ -422,6 +425,9 @@ static void ibmvscsis_disconnect(struct work_struct *work)
422 new_state = vscsi->new_state; 425 new_state = vscsi->new_state;
423 vscsi->new_state = 0; 426 vscsi->new_state = 0;
424 427
428 vscsi->flags |= DISCONNECT_SCHEDULED;
429 vscsi->flags &= ~SCHEDULE_DISCONNECT;
430
425 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, 431 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
426 vscsi->state); 432 vscsi->state);
427 433
@@ -802,6 +808,13 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
802 long rc = ADAPT_SUCCESS; 808 long rc = ADAPT_SUCCESS;
803 uint format; 809 uint format;
804 810
811 rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
812 0, 0, 0, 0);
813 if (rc == H_SUCCESS)
814 vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
815 else if (rc != H_NOT_FOUND)
816 pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
817
805 vscsi->flags &= PRESERVE_FLAG_FIELDS; 818 vscsi->flags &= PRESERVE_FLAG_FIELDS;
806 vscsi->rsp_q_timer.timer_pops = 0; 819 vscsi->rsp_q_timer.timer_pops = 0;
807 vscsi->debit = 0; 820 vscsi->debit = 0;
@@ -951,6 +964,63 @@ static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
951} 964}
952 965
953/** 966/**
967 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
968 * @vscsi: Pointer to our adapter structure
969 * @idle: Indicates whether we were called from adapter_idle. This
970 * is important to know if we need to do a disconnect, since if
971 * we're called from adapter_idle, we're still processing the
972 * current disconnect, so we can't just call post_disconnect.
973 *
974 * This function is called when the adapter is idle when phyp has sent
975 * us a Prepare for Suspend Transport Event.
976 *
977 * EXECUTION ENVIRONMENT:
978 * Process or interrupt environment called with interrupt lock held
979 */
980static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
981{
982 long rc = 0;
983 struct viosrp_crq *crq;
984
985 /* See if there is a Resume event in the queue */
986 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
987
988 pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
989 vscsi->flags, vscsi->state, (int)crq->valid);
990
991 if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
992 rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
993 0, 0);
994 if (rc) {
995 pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
996 rc = 0;
997 }
998 } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
999 (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
1000 ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1001 (crq->format != RESUME_FROM_SUSP)))) {
1002 if (idle) {
1003 vscsi->state = ERR_DISCONNECT_RECONNECT;
1004 ibmvscsis_reset_queue(vscsi);
1005 rc = -1;
1006 } else if (vscsi->state == CONNECTED) {
1007 ibmvscsis_post_disconnect(vscsi,
1008 ERR_DISCONNECT_RECONNECT, 0);
1009 }
1010
1011 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1012
1013 if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1014 (crq->format != RESUME_FROM_SUSP)))
1015 pr_err("Invalid element in CRQ after Prepare for Suspend");
1016 }
1017
1018 vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
1019
1020 return rc;
1021}
1022
1023/**
954 * ibmvscsis_trans_event() - Handle a Transport Event 1024 * ibmvscsis_trans_event() - Handle a Transport Event
955 * @vscsi: Pointer to our adapter structure 1025 * @vscsi: Pointer to our adapter structure
956 * @crq: Pointer to CRQ entry containing the Transport Event 1026 * @crq: Pointer to CRQ entry containing the Transport Event
@@ -974,18 +1044,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
974 case PARTNER_FAILED: 1044 case PARTNER_FAILED:
975 case PARTNER_DEREGISTER: 1045 case PARTNER_DEREGISTER:
976 ibmvscsis_delete_client_info(vscsi, true); 1046 ibmvscsis_delete_client_info(vscsi, true);
977 break; 1047 if (crq->format == MIGRATED)
978 1048 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
979 default:
980 rc = ERROR;
981 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
982 (uint)crq->format);
983 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
984 RESPONSE_Q_DOWN);
985 break;
986 }
987
988 if (rc == ADAPT_SUCCESS) {
989 switch (vscsi->state) { 1049 switch (vscsi->state) {
990 case NO_QUEUE: 1050 case NO_QUEUE:
991 case ERR_DISCONNECTED: 1051 case ERR_DISCONNECTED:
@@ -1034,6 +1094,60 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1034 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 1094 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1035 break; 1095 break;
1036 } 1096 }
1097 break;
1098
1099 case PREPARE_FOR_SUSPEND:
1100 pr_debug("Prep for Suspend, crq status = 0x%x\n",
1101 (int)crq->status);
1102 switch (vscsi->state) {
1103 case ERR_DISCONNECTED:
1104 case WAIT_CONNECTION:
1105 case CONNECTED:
1106 ibmvscsis_ready_for_suspend(vscsi, false);
1107 break;
1108 case SRP_PROCESSING:
1109 vscsi->resume_state = vscsi->state;
1110 vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
1111 if (crq->status == CRQ_ENTRY_OVERWRITTEN)
1112 vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
1113 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
1114 break;
1115 case NO_QUEUE:
1116 case UNDEFINED:
1117 case UNCONFIGURING:
1118 case WAIT_ENABLED:
1119 case ERR_DISCONNECT:
1120 case ERR_DISCONNECT_RECONNECT:
1121 case WAIT_IDLE:
1122 pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1123 vscsi->state);
1124 break;
1125 }
1126 break;
1127
1128 case RESUME_FROM_SUSP:
1129 pr_debug("Resume from Suspend, crq status = 0x%x\n",
1130 (int)crq->status);
1131 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1132 vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1133 } else {
1134 if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
1135 (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
1136 ibmvscsis_post_disconnect(vscsi,
1137 ERR_DISCONNECT_RECONNECT,
1138 0);
1139 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
1140 }
1141 }
1142 break;
1143
1144 default:
1145 rc = ERROR;
1146 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
1147 (uint)crq->format);
1148 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
1149 RESPONSE_Q_DOWN);
1150 break;
1037 } 1151 }
1038 1152
1039 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1153 rc = vscsi->flags & SCHEDULE_DISCONNECT;
@@ -1201,6 +1315,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1201static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) 1315static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1202{ 1316{
1203 int free_qs = false; 1317 int free_qs = false;
1318 long rc = 0;
1204 1319
1205 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, 1320 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1206 vscsi->state); 1321 vscsi->state);
@@ -1240,7 +1355,14 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1240 vscsi->rsp_q_timer.timer_pops = 0; 1355 vscsi->rsp_q_timer.timer_pops = 0;
1241 vscsi->debit = 0; 1356 vscsi->debit = 0;
1242 vscsi->credit = 0; 1357 vscsi->credit = 0;
1243 if (vscsi->flags & TRANS_EVENT) { 1358 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1359 vscsi->state = vscsi->resume_state;
1360 vscsi->resume_state = 0;
1361 rc = ibmvscsis_ready_for_suspend(vscsi, true);
1362 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1363 if (rc)
1364 break;
1365 } else if (vscsi->flags & TRANS_EVENT) {
1244 vscsi->state = WAIT_CONNECTION; 1366 vscsi->state = WAIT_CONNECTION;
1245 vscsi->flags &= PRESERVE_FLAG_FIELDS; 1367 vscsi->flags &= PRESERVE_FLAG_FIELDS;
1246 } else { 1368 } else {
@@ -3792,8 +3914,16 @@ static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3792{ 3914{
3793 struct ibmvscsis_tport *tport = 3915 struct ibmvscsis_tport *tport =
3794 container_of(wwn, struct ibmvscsis_tport, tport_wwn); 3916 container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3917 u16 tpgt;
3795 int rc; 3918 int rc;
3796 3919
3920 if (strstr(name, "tpgt_") != name)
3921 return ERR_PTR(-EINVAL);
3922 rc = kstrtou16(name + 5, 0, &tpgt);
3923 if (rc)
3924 return ERR_PTR(rc);
3925 tport->tport_tpgt = tpgt;
3926
3797 tport->releasing = false; 3927 tport->releasing = false;
3798 3928
3799 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, 3929 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index b4391a8de456..cc96c2731134 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -262,6 +262,14 @@ struct scsi_info {
262#define DISCONNECT_SCHEDULED 0x00800 262#define DISCONNECT_SCHEDULED 0x00800
263 /* remove function is sleeping */ 263 /* remove function is sleeping */
264#define CFG_SLEEPING 0x01000 264#define CFG_SLEEPING 0x01000
265 /* Register for Prepare for Suspend Transport Events */
266#define PREP_FOR_SUSPEND_ENABLED 0x02000
267 /* Prepare for Suspend event sent */
268#define PREP_FOR_SUSPEND_PENDING 0x04000
269 /* Resume from Suspend event sent */
270#define PREP_FOR_SUSPEND_ABORTED 0x08000
271 /* Prepare for Suspend event overwrote another CRQ entry */
272#define PREP_FOR_SUSPEND_OVERWRITE 0x10000
265 u32 flags; 273 u32 flags;
266 /* adapter lock */ 274 /* adapter lock */
267 spinlock_t intr_lock; 275 spinlock_t intr_lock;
@@ -272,6 +280,7 @@ struct scsi_info {
272 /* used in crq, to tag what iu the response is for */ 280 /* used in crq, to tag what iu the response is for */
273 u64 empty_iu_tag; 281 u64 empty_iu_tag;
274 uint new_state; 282 uint new_state;
283 uint resume_state;
275 /* control block for the response queue timer */ 284 /* control block for the response queue timer */
276 struct timer_cb rsp_q_timer; 285 struct timer_cb rsp_q_timer;
277 /* keep last client to enable proper accounting */ 286 /* keep last client to enable proper accounting */
@@ -324,8 +333,13 @@ struct scsi_info {
324#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ 333#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
325 ((VSCSI)->flags & BLOCK)) 334 ((VSCSI)->flags & BLOCK))
326 335
336#define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \
337 PREP_FOR_SUSPEND_PENDING | \
338 PREP_FOR_SUSPEND_ABORTED | \
339 PREP_FOR_SUSPEND_OVERWRITE)
340
327/* flag bit that are not reset during disconnect */ 341/* flag bit that are not reset during disconnect */
328#define PRESERVE_FLAG_FIELDS 0 342#define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS)
329 343
330#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) 344#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
331 345
@@ -333,8 +347,15 @@ struct scsi_info {
333#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) 347#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
334 348
335#ifndef H_GET_PARTNER_INFO 349#ifndef H_GET_PARTNER_INFO
336#define H_GET_PARTNER_INFO 0x0000000000000008LL 350#define H_GET_PARTNER_INFO 0x0000000000000008LL
351#endif
352#ifndef H_ENABLE_PREPARE_FOR_SUSPEND
353#define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL
337#endif 354#endif
355#ifndef H_READY_FOR_SUSPEND
356#define H_READY_FOR_SUSPEND 0x000000000000001ELL
357#endif
358
338 359
339#define h_copy_rdma(l, sa, sb, da, db) \ 360#define h_copy_rdma(l, sa, sb, da, db) \
340 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) 361 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
index 4696f331453e..9fec55b36322 100644
--- a/drivers/scsi/ibmvscsi_tgt/libsrp.h
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -30,10 +30,13 @@ enum srp_trans_event {
30 UNUSED_FORMAT = 0, 30 UNUSED_FORMAT = 0,
31 PARTNER_FAILED = 1, 31 PARTNER_FAILED = 1,
32 PARTNER_DEREGISTER = 2, 32 PARTNER_DEREGISTER = 2,
33 MIGRATED = 6 33 MIGRATED = 6,
34 PREPARE_FOR_SUSPEND = 9,
35 RESUME_FROM_SUSP = 0xA
34}; 36};
35 37
36enum srp_status { 38enum srp_status {
39 CRQ_ENTRY_OVERWRITTEN = 0x20,
37 HEADER_DESCRIPTOR = 0xF1, 40 HEADER_DESCRIPTOR = 0xF1,
38 PING = 0xF5, 41 PING = 0xF5,
39 PING_RESPONSE = 0xF6 42 PING_RESPONSE = 0xF6
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2a0173e5d10e..c2dc836dc484 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1874,36 +1874,13 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1874 struct abts_recv_from_24xx *abts, struct fc_port *sess) 1874 struct abts_recv_from_24xx *abts, struct fc_port *sess)
1875{ 1875{
1876 struct qla_hw_data *ha = vha->hw; 1876 struct qla_hw_data *ha = vha->hw;
1877 struct se_session *se_sess = sess->se_sess;
1878 struct qla_tgt_mgmt_cmd *mcmd; 1877 struct qla_tgt_mgmt_cmd *mcmd;
1879 struct qla_tgt_cmd *cmd;
1880 struct se_cmd *se_cmd;
1881 int rc; 1878 int rc;
1882 bool found_lun = false;
1883 unsigned long flags;
1884
1885 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1886 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1887 if (se_cmd->tag == abts->exchange_addr_to_abort) {
1888 found_lun = true;
1889 break;
1890 }
1891 }
1892 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1893 1879
1894 /* cmd not in LIO lists, look in qla list */ 1880 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1895 if (!found_lun) { 1881 /* send TASK_ABORT response immediately */
1896 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1882 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
1897 /* send TASK_ABORT response immediately */ 1883 return 0;
1898 qlt_24xx_send_abts_resp(ha->base_qpair, abts,
1899 FCP_TMF_CMPL, false);
1900 return 0;
1901 } else {
1902 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1903 "unable to find cmd in driver or LIO for tag 0x%x\n",
1904 abts->exchange_addr_to_abort);
1905 return -ENOENT;
1906 }
1907 } 1884 }
1908 1885
1909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1886 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
@@ -1919,14 +1896,17 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1919 } 1896 }
1920 memset(mcmd, 0, sizeof(*mcmd)); 1897 memset(mcmd, 0, sizeof(*mcmd));
1921 1898
1922 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1923 mcmd->sess = sess; 1899 mcmd->sess = sess;
1924 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1900 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1925 mcmd->reset_count = ha->base_qpair->chip_reset; 1901 mcmd->reset_count = ha->base_qpair->chip_reset;
1926 mcmd->tmr_func = QLA_TGT_ABTS; 1902 mcmd->tmr_func = QLA_TGT_ABTS;
1927 mcmd->qpair = ha->base_qpair; 1903 mcmd->qpair = ha->base_qpair;
1928 1904
1929 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func, 1905 /*
1906 * LUN is looked up by target-core internally based on the passed
1907 * abts->exchange_addr_to_abort tag.
1908 */
1909 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
1930 abts->exchange_addr_to_abort); 1910 abts->exchange_addr_to_abort);
1931 if (rc != 0) { 1911 if (rc != 0) {
1932 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1912 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c4b414833b86..b20da0d27ad7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -600,11 +600,13 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
600 struct fc_port *sess = mcmd->sess; 600 struct fc_port *sess = mcmd->sess;
601 struct se_cmd *se_cmd = &mcmd->se_cmd; 601 struct se_cmd *se_cmd = &mcmd->se_cmd;
602 int transl_tmr_func = 0; 602 int transl_tmr_func = 0;
603 int flags = TARGET_SCF_ACK_KREF;
603 604
604 switch (tmr_func) { 605 switch (tmr_func) {
605 case QLA_TGT_ABTS: 606 case QLA_TGT_ABTS:
606 pr_debug("%ld: ABTS received\n", sess->vha->host_no); 607 pr_debug("%ld: ABTS received\n", sess->vha->host_no);
607 transl_tmr_func = TMR_ABORT_TASK; 608 transl_tmr_func = TMR_ABORT_TASK;
609 flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
608 break; 610 break;
609 case QLA_TGT_2G_ABORT_TASK: 611 case QLA_TGT_2G_ABORT_TASK:
610 pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); 612 pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
@@ -637,7 +639,7 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
637 } 639 }
638 640
639 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 641 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
640 transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); 642 transl_tmr_func, GFP_ATOMIC, tag, flags);
641} 643}
642 644
643static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 645static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3fdca2cdd8da..74e4975dd1b1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -488,15 +488,13 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
488 488
489void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 489void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
490{ 490{
491 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
492
493 spin_lock_bh(&conn->cmd_lock); 491 spin_lock_bh(&conn->cmd_lock);
494 if (!list_empty(&cmd->i_conn_node) && 492 if (!list_empty(&cmd->i_conn_node) &&
495 !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) 493 !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
496 list_del_init(&cmd->i_conn_node); 494 list_del_init(&cmd->i_conn_node);
497 spin_unlock_bh(&conn->cmd_lock); 495 spin_unlock_bh(&conn->cmd_lock);
498 496
499 __iscsit_free_cmd(cmd, scsi_cmd, true); 497 __iscsit_free_cmd(cmd, true);
500} 498}
501EXPORT_SYMBOL(iscsit_aborted_task); 499EXPORT_SYMBOL(iscsit_aborted_task);
502 500
@@ -1251,12 +1249,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1251 * execution. These exceptions are processed in CmdSN order using 1249 * execution. These exceptions are processed in CmdSN order using
1252 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. 1250 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1253 */ 1251 */
1254 if (cmd->sense_reason) { 1252 if (cmd->sense_reason)
1255 if (cmd->reject_reason)
1256 return 0;
1257
1258 return 1; 1253 return 1;
1259 }
1260 /* 1254 /*
1261 * Call directly into transport_generic_new_cmd() to perform 1255 * Call directly into transport_generic_new_cmd() to perform
1262 * the backend memory allocation. 1256 * the backend memory allocation.
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 535a8e06a401..0dd4c45f7575 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -781,6 +781,7 @@ DEF_TPG_ATTRIB(default_erl);
781DEF_TPG_ATTRIB(t10_pi); 781DEF_TPG_ATTRIB(t10_pi);
782DEF_TPG_ATTRIB(fabric_prot_type); 782DEF_TPG_ATTRIB(fabric_prot_type);
783DEF_TPG_ATTRIB(tpg_enabled_sendtargets); 783DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
784DEF_TPG_ATTRIB(login_keys_workaround);
784 785
785static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 786static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
786 &iscsi_tpg_attrib_attr_authentication, 787 &iscsi_tpg_attrib_attr_authentication,
@@ -796,6 +797,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
796 &iscsi_tpg_attrib_attr_t10_pi, 797 &iscsi_tpg_attrib_attr_t10_pi,
797 &iscsi_tpg_attrib_attr_fabric_prot_type, 798 &iscsi_tpg_attrib_attr_fabric_prot_type,
798 &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, 799 &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
800 &iscsi_tpg_attrib_attr_login_keys_workaround,
799 NULL, 801 NULL,
800}; 802};
801 803
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 6f88b31242b0..7a6751fecd32 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -655,28 +655,6 @@ err:
655 iscsit_deaccess_np(np, tpg, tpg_np); 655 iscsit_deaccess_np(np, tpg, tpg_np);
656} 656}
657 657
658static void iscsi_target_do_cleanup(struct work_struct *work)
659{
660 struct iscsi_conn *conn = container_of(work,
661 struct iscsi_conn, login_cleanup_work.work);
662 struct sock *sk = conn->sock->sk;
663 struct iscsi_login *login = conn->login;
664 struct iscsi_np *np = login->np;
665 struct iscsi_portal_group *tpg = conn->tpg;
666 struct iscsi_tpg_np *tpg_np = conn->tpg_np;
667
668 pr_debug("Entering iscsi_target_do_cleanup\n");
669
670 cancel_delayed_work_sync(&conn->login_work);
671 conn->orig_state_change(sk);
672
673 iscsi_target_restore_sock_callbacks(conn);
674 iscsi_target_login_drop(conn, login);
675 iscsit_deaccess_np(np, tpg, tpg_np);
676
677 pr_debug("iscsi_target_do_cleanup done()\n");
678}
679
680static void iscsi_target_sk_state_change(struct sock *sk) 658static void iscsi_target_sk_state_change(struct sock *sk)
681{ 659{
682 struct iscsi_conn *conn; 660 struct iscsi_conn *conn;
@@ -886,7 +864,8 @@ static int iscsi_target_handle_csg_zero(
886 SENDER_TARGET, 864 SENDER_TARGET,
887 login->rsp_buf, 865 login->rsp_buf,
888 &login->rsp_length, 866 &login->rsp_length,
889 conn->param_list); 867 conn->param_list,
868 conn->tpg->tpg_attrib.login_keys_workaround);
890 if (ret < 0) 869 if (ret < 0)
891 return -1; 870 return -1;
892 871
@@ -956,7 +935,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
956 SENDER_TARGET, 935 SENDER_TARGET,
957 login->rsp_buf, 936 login->rsp_buf,
958 &login->rsp_length, 937 &login->rsp_length,
959 conn->param_list); 938 conn->param_list,
939 conn->tpg->tpg_attrib.login_keys_workaround);
960 if (ret < 0) { 940 if (ret < 0) {
961 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 941 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
962 ISCSI_LOGIN_STATUS_INIT_ERR); 942 ISCSI_LOGIN_STATUS_INIT_ERR);
@@ -1082,7 +1062,6 @@ int iscsi_target_locate_portal(
1082 int sessiontype = 0, ret = 0, tag_num, tag_size; 1062 int sessiontype = 0, ret = 0, tag_num, tag_size;
1083 1063
1084 INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); 1064 INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
1085 INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
1086 iscsi_target_set_sock_callbacks(conn); 1065 iscsi_target_set_sock_callbacks(conn);
1087 1066
1088 login->np = np; 1067 login->np = np;
@@ -1331,7 +1310,6 @@ int iscsi_target_start_negotiation(
1331 1310
1332 if (ret < 0) { 1311 if (ret < 0) {
1333 cancel_delayed_work_sync(&conn->login_work); 1312 cancel_delayed_work_sync(&conn->login_work);
1334 cancel_delayed_work_sync(&conn->login_cleanup_work);
1335 iscsi_target_restore_sock_callbacks(conn); 1313 iscsi_target_restore_sock_callbacks(conn);
1336 iscsi_remove_failed_auth_entry(conn); 1314 iscsi_remove_failed_auth_entry(conn);
1337 } 1315 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index fce627628200..caab1045742d 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -765,7 +765,8 @@ static int iscsi_check_for_auth_key(char *key)
765 return 0; 765 return 0;
766} 766}
767 767
768static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) 768static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
769 bool keys_workaround)
769{ 770{
770 if (IS_TYPE_BOOL_AND(param)) { 771 if (IS_TYPE_BOOL_AND(param)) {
771 if (!strcmp(param->value, NO)) 772 if (!strcmp(param->value, NO))
@@ -773,19 +774,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
773 } else if (IS_TYPE_BOOL_OR(param)) { 774 } else if (IS_TYPE_BOOL_OR(param)) {
774 if (!strcmp(param->value, YES)) 775 if (!strcmp(param->value, YES))
775 SET_PSTATE_REPLY_OPTIONAL(param); 776 SET_PSTATE_REPLY_OPTIONAL(param);
776 /* 777
777 * Required for gPXE iSCSI boot client 778 if (keys_workaround) {
778 */ 779 /*
779 if (!strcmp(param->name, IMMEDIATEDATA)) 780 * Required for gPXE iSCSI boot client
780 SET_PSTATE_REPLY_OPTIONAL(param); 781 */
782 if (!strcmp(param->name, IMMEDIATEDATA))
783 SET_PSTATE_REPLY_OPTIONAL(param);
784 }
781 } else if (IS_TYPE_NUMBER(param)) { 785 } else if (IS_TYPE_NUMBER(param)) {
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 786 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 787 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 788
785 * Required for gPXE iSCSI boot client 789 if (keys_workaround) {
786 */ 790 /*
787 if (!strcmp(param->name, MAXCONNECTIONS)) 791 * Required for Mellanox Flexboot PXE boot ROM
788 SET_PSTATE_REPLY_OPTIONAL(param); 792 */
793 if (!strcmp(param->name, FIRSTBURSTLENGTH))
794 SET_PSTATE_REPLY_OPTIONAL(param);
795
796 /*
797 * Required for gPXE iSCSI boot client
798 */
799 if (!strcmp(param->name, MAXCONNECTIONS))
800 SET_PSTATE_REPLY_OPTIONAL(param);
801 }
789 } else if (IS_PHASE_DECLARATIVE(param)) 802 } else if (IS_PHASE_DECLARATIVE(param))
790 SET_PSTATE_REPLY_OPTIONAL(param); 803 SET_PSTATE_REPLY_OPTIONAL(param);
791} 804}
@@ -1422,7 +1435,8 @@ int iscsi_encode_text_output(
1422 u8 sender, 1435 u8 sender,
1423 char *textbuf, 1436 char *textbuf,
1424 u32 *length, 1437 u32 *length,
1425 struct iscsi_param_list *param_list) 1438 struct iscsi_param_list *param_list,
1439 bool keys_workaround)
1426{ 1440{
1427 char *output_buf = NULL; 1441 char *output_buf = NULL;
1428 struct iscsi_extra_response *er; 1442 struct iscsi_extra_response *er;
@@ -1458,7 +1472,8 @@ int iscsi_encode_text_output(
1458 *length += 1; 1472 *length += 1;
1459 output_buf = textbuf + *length; 1473 output_buf = textbuf + *length;
1460 SET_PSTATE_PROPOSER(param); 1474 SET_PSTATE_PROPOSER(param);
1461 iscsi_check_proposer_for_optional_reply(param); 1475 iscsi_check_proposer_for_optional_reply(param,
1476 keys_workaround);
1462 pr_debug("Sending key: %s=%s\n", 1477 pr_debug("Sending key: %s=%s\n",
1463 param->name, param->value); 1478 param->name, param->value);
1464 } 1479 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 9962ccf0ccd7..c47b73f57528 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -46,7 +46,7 @@ extern int iscsi_extract_key_value(char *, char **, char **);
46extern int iscsi_update_param_value(struct iscsi_param *, char *); 46extern int iscsi_update_param_value(struct iscsi_param *, char *);
47extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *); 47extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
48extern int iscsi_encode_text_output(u8, u8, char *, u32 *, 48extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
49 struct iscsi_param_list *); 49 struct iscsi_param_list *, bool);
50extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); 50extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
51extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, 51extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
52 struct iscsi_param_list *); 52 struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 2e7e08dbda48..594d07a1e995 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
227 a->t10_pi = TA_DEFAULT_T10_PI; 227 a->t10_pi = TA_DEFAULT_T10_PI;
228 a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; 228 a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
229 a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; 229 a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
230 a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
230} 231}
231 232
232int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 233int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -311,11 +312,9 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
311 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 312 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
312 int ret; 313 int ret;
313 314
314 spin_lock(&tpg->tpg_state_lock);
315 if (tpg->tpg_state == TPG_STATE_ACTIVE) { 315 if (tpg->tpg_state == TPG_STATE_ACTIVE) {
316 pr_err("iSCSI target portal group: %hu is already" 316 pr_err("iSCSI target portal group: %hu is already"
317 " active, ignoring request.\n", tpg->tpgt); 317 " active, ignoring request.\n", tpg->tpgt);
318 spin_unlock(&tpg->tpg_state_lock);
319 return -EINVAL; 318 return -EINVAL;
320 } 319 }
321 /* 320 /*
@@ -324,10 +323,8 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
324 * is enforced (as per default), and remove the NONE option. 323 * is enforced (as per default), and remove the NONE option.
325 */ 324 */
326 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 325 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
327 if (!param) { 326 if (!param)
328 spin_unlock(&tpg->tpg_state_lock);
329 return -EINVAL; 327 return -EINVAL;
330 }
331 328
332 if (tpg->tpg_attrib.authentication) { 329 if (tpg->tpg_attrib.authentication) {
333 if (!strcmp(param->value, NONE)) { 330 if (!strcmp(param->value, NONE)) {
@@ -341,6 +338,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
341 goto err; 338 goto err;
342 } 339 }
343 340
341 spin_lock(&tpg->tpg_state_lock);
344 tpg->tpg_state = TPG_STATE_ACTIVE; 342 tpg->tpg_state = TPG_STATE_ACTIVE;
345 spin_unlock(&tpg->tpg_state_lock); 343 spin_unlock(&tpg->tpg_state_lock);
346 344
@@ -353,7 +351,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
353 return 0; 351 return 0;
354 352
355err: 353err:
356 spin_unlock(&tpg->tpg_state_lock);
357 return ret; 354 return ret;
358} 355}
359 356
@@ -899,3 +896,21 @@ int iscsit_ta_tpg_enabled_sendtargets(
899 896
900 return 0; 897 return 0;
901} 898}
899
900int iscsit_ta_login_keys_workaround(
901 struct iscsi_portal_group *tpg,
902 u32 flag)
903{
904 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
905
906 if ((flag != 0) && (flag != 1)) {
907 pr_err("Illegal value %d\n", flag);
908 return -EINVAL;
909 }
910
911 a->login_keys_workaround = flag;
912 pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
913 tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
914
915 return 0;
916}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index ceba29851167..59fd3cabe89d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -48,5 +48,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
48extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); 48extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
49extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); 49extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
50extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); 50extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
51extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
51 52
52#endif /* ISCSI_TARGET_TPG_H */ 53#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7d3e2fcc26a0..1e36f83b5961 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -167,6 +167,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
167 167
168 cmd->se_cmd.map_tag = tag; 168 cmd->se_cmd.map_tag = tag;
169 cmd->conn = conn; 169 cmd->conn = conn;
170 cmd->data_direction = DMA_NONE;
170 INIT_LIST_HEAD(&cmd->i_conn_node); 171 INIT_LIST_HEAD(&cmd->i_conn_node);
171 INIT_LIST_HEAD(&cmd->datain_list); 172 INIT_LIST_HEAD(&cmd->datain_list);
172 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 173 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
@@ -711,19 +712,16 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
711} 712}
712EXPORT_SYMBOL(iscsit_release_cmd); 713EXPORT_SYMBOL(iscsit_release_cmd);
713 714
714void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 715void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
715 bool check_queues)
716{ 716{
717 struct iscsi_conn *conn = cmd->conn; 717 struct iscsi_conn *conn = cmd->conn;
718 718
719 if (scsi_cmd) { 719 if (cmd->data_direction == DMA_TO_DEVICE) {
720 if (cmd->data_direction == DMA_TO_DEVICE) { 720 iscsit_stop_dataout_timer(cmd);
721 iscsit_stop_dataout_timer(cmd); 721 iscsit_free_r2ts_from_list(cmd);
722 iscsit_free_r2ts_from_list(cmd);
723 }
724 if (cmd->data_direction == DMA_FROM_DEVICE)
725 iscsit_free_all_datain_reqs(cmd);
726 } 722 }
723 if (cmd->data_direction == DMA_FROM_DEVICE)
724 iscsit_free_all_datain_reqs(cmd);
727 725
728 if (conn && check_queues) { 726 if (conn && check_queues) {
729 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 727 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
@@ -736,50 +734,18 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
736 734
737void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 735void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
738{ 736{
739 struct se_cmd *se_cmd = NULL; 737 struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
740 int rc; 738 int rc;
741 bool op_scsi = false; 739
742 /* 740 __iscsit_free_cmd(cmd, shutdown);
743 * Determine if a struct se_cmd is associated with 741 if (se_cmd) {
744 * this struct iscsi_cmd.
745 */
746 switch (cmd->iscsi_opcode) {
747 case ISCSI_OP_SCSI_CMD:
748 op_scsi = true;
749 /*
750 * Fallthrough
751 */
752 case ISCSI_OP_SCSI_TMFUNC:
753 se_cmd = &cmd->se_cmd;
754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 rc = transport_generic_free_cmd(se_cmd, shutdown); 742 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 743 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown); 744 __iscsit_free_cmd(cmd, shutdown);
758 target_put_sess_cmd(se_cmd); 745 target_put_sess_cmd(se_cmd);
759 } 746 }
760 break; 747 } else {
761 case ISCSI_OP_REJECT:
762 /*
763 * Handle special case for REJECT when iscsi_add_reject*() has
764 * overwritten the original iscsi_opcode assignment, and the
765 * associated cmd->se_cmd needs to be released.
766 */
767 if (cmd->se_cmd.se_tfo != NULL) {
768 se_cmd = &cmd->se_cmd;
769 __iscsit_free_cmd(cmd, true, shutdown);
770
771 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
772 if (!rc && shutdown && se_cmd->se_sess) {
773 __iscsit_free_cmd(cmd, true, shutdown);
774 target_put_sess_cmd(se_cmd);
775 }
776 break;
777 }
778 /* Fall-through */
779 default:
780 __iscsit_free_cmd(cmd, false, shutdown);
781 iscsit_release_cmd(cmd); 748 iscsit_release_cmd(cmd);
782 break;
783 } 749 }
784} 750}
785EXPORT_SYMBOL(iscsit_free_cmd); 751EXPORT_SYMBOL(iscsit_free_cmd);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 9e4197af8708..425160565d0c 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -37,7 +37,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
38extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 38extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
39extern void iscsit_release_cmd(struct iscsi_cmd *); 39extern void iscsit_release_cmd(struct iscsi_cmd *);
40extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); 40extern void __iscsit_free_cmd(struct iscsi_cmd *, bool);
41extern void iscsit_free_cmd(struct iscsi_cmd *, bool); 41extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
42extern int iscsit_check_session_usage_count(struct iscsi_session *); 42extern int iscsit_check_session_usage_count(struct iscsi_session *);
43extern void iscsit_dec_session_usage_count(struct iscsi_session *); 43extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 5091b31b3e56..b6a913e38b30 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -51,19 +51,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd);
51 */ 51 */
52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 52static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
53{ 53{
54 /* 54 return transport_generic_free_cmd(se_cmd, 0);
55 * Do not release struct se_cmd's containing a valid TMR
56 * pointer. These will be released directly in tcm_loop_device_reset()
57 * with transport_generic_free_cmd().
58 */
59 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
60 return 0;
61 /*
62 * Release the struct se_cmd, which will make a callback to release
63 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
64 */
65 transport_generic_free_cmd(se_cmd, 0);
66 return 1;
67} 55}
68 56
69static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 57static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
@@ -218,10 +206,8 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
218{ 206{
219 struct se_cmd *se_cmd = NULL; 207 struct se_cmd *se_cmd = NULL;
220 struct se_session *se_sess; 208 struct se_session *se_sess;
221 struct se_portal_group *se_tpg;
222 struct tcm_loop_nexus *tl_nexus; 209 struct tcm_loop_nexus *tl_nexus;
223 struct tcm_loop_cmd *tl_cmd = NULL; 210 struct tcm_loop_cmd *tl_cmd = NULL;
224 struct tcm_loop_tmr *tl_tmr = NULL;
225 int ret = TMR_FUNCTION_FAILED, rc; 211 int ret = TMR_FUNCTION_FAILED, rc;
226 212
227 /* 213 /*
@@ -240,55 +226,29 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
240 return ret; 226 return ret;
241 } 227 }
242 228
243 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 229 init_completion(&tl_cmd->tmr_done);
244 if (!tl_tmr) {
245 pr_err("Unable to allocate memory for tl_tmr\n");
246 goto release;
247 }
248 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
249 230
250 se_cmd = &tl_cmd->tl_se_cmd; 231 se_cmd = &tl_cmd->tl_se_cmd;
251 se_tpg = &tl_tpg->tl_se_tpg;
252 se_sess = tl_tpg->tl_nexus->se_sess; 232 se_sess = tl_tpg->tl_nexus->se_sess;
253 /*
254 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
255 */
256 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
257 DMA_NONE, TCM_SIMPLE_TAG,
258 &tl_cmd->tl_sense_buf[0]);
259 233
260 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 234 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
235 NULL, tmr, GFP_KERNEL, task,
236 TARGET_SCF_ACK_KREF);
261 if (rc < 0) 237 if (rc < 0)
262 goto release; 238 goto release;
239 wait_for_completion(&tl_cmd->tmr_done);
240 ret = se_cmd->se_tmr_req->response;
241 target_put_sess_cmd(se_cmd);
263 242
264 if (tmr == TMR_ABORT_TASK) 243out:
265 se_cmd->se_tmr_req->ref_task_tag = task; 244 return ret;
266 245
267 /*
268 * Locate the underlying TCM struct se_lun
269 */
270 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
271 ret = TMR_LUN_DOES_NOT_EXIST;
272 goto release;
273 }
274 /*
275 * Queue the TMR to TCM Core and sleep waiting for
276 * tcm_loop_queue_tm_rsp() to wake us up.
277 */
278 transport_generic_handle_tmr(se_cmd);
279 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
280 /*
281 * The TMR LUN_RESET has completed, check the response status and
282 * then release allocations.
283 */
284 ret = se_cmd->se_tmr_req->response;
285release: 246release:
286 if (se_cmd) 247 if (se_cmd)
287 transport_generic_free_cmd(se_cmd, 1); 248 transport_generic_free_cmd(se_cmd, 0);
288 else 249 else
289 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 250 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
290 kfree(tl_tmr); 251 goto out;
291 return ret;
292} 252}
293 253
294static int tcm_loop_abort_task(struct scsi_cmnd *sc) 254static int tcm_loop_abort_task(struct scsi_cmnd *sc)
@@ -669,14 +629,11 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
669 629
670static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 630static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
671{ 631{
672 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 632 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
673 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 633 struct tcm_loop_cmd, tl_se_cmd);
674 /* 634
675 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 635 /* Wake up tcm_loop_issue_tmr(). */
676 * and wake up the wait_queue_head_t in tcm_loop_device_reset() 636 complete(&tl_cmd->tmr_done);
677 */
678 atomic_set(&tl_tmr->tmr_complete, 1);
679 wake_up(&tl_tmr->tl_tmr_wait);
680} 637}
681 638
682static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 639static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index a8a230b4e6b5..3acc43c05117 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,15 +16,11 @@ struct tcm_loop_cmd {
16 /* The TCM I/O descriptor that is accessed via container_of() */ 16 /* The TCM I/O descriptor that is accessed via container_of() */
17 struct se_cmd tl_se_cmd; 17 struct se_cmd tl_se_cmd;
18 struct work_struct work; 18 struct work_struct work;
19 struct completion tmr_done;
19 /* Sense buffer that will be mapped into outgoing status */ 20 /* Sense buffer that will be mapped into outgoing status */
20 unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; 21 unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
21}; 22};
22 23
23struct tcm_loop_tmr {
24 atomic_t tmr_complete;
25 wait_queue_head_t tl_tmr_wait;
26};
27
28struct tcm_loop_nexus { 24struct tcm_loop_nexus {
29 /* 25 /*
30 * Pointer to TCM session for I_T Nexus 26 * Pointer to TCM session for I_T Nexus
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fc4a9c303d55..a91b7c25ffd4 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -205,8 +205,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
205 /* 205 /*
206 * TARGET PORT GROUP 206 * TARGET PORT GROUP
207 */ 207 */
208 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 208 put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
209 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 209 off += 2;
210 210
211 off++; /* Skip over Reserved */ 211 off++; /* Skip over Reserved */
212 /* 212 /*
@@ -235,8 +235,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
235 /* 235 /*
236 * Set RELATIVE TARGET PORT IDENTIFIER 236 * Set RELATIVE TARGET PORT IDENTIFIER
237 */ 237 */
238 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 238 put_unaligned_be16(lun->lun_rtpi, &buf[off]);
239 buf[off++] = (lun->lun_rtpi & 0xff); 239 off += 2;
240 rd_len += 4; 240 rd_len += 4;
241 } 241 }
242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0326607e5ab8..7e87d952bb7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1085,6 +1085,24 @@ static ssize_t block_size_store(struct config_item *item,
1085 return count; 1085 return count;
1086} 1086}
1087 1087
1088static ssize_t alua_support_show(struct config_item *item, char *page)
1089{
1090 struct se_dev_attrib *da = to_attrib(item);
1091 u8 flags = da->da_dev->transport->transport_flags;
1092
1093 return snprintf(page, PAGE_SIZE, "%d\n",
1094 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1095}
1096
1097static ssize_t pgr_support_show(struct config_item *item, char *page)
1098{
1099 struct se_dev_attrib *da = to_attrib(item);
1100 u8 flags = da->da_dev->transport->transport_flags;
1101
1102 return snprintf(page, PAGE_SIZE, "%d\n",
1103 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1104}
1105
1088CONFIGFS_ATTR(, emulate_model_alias); 1106CONFIGFS_ATTR(, emulate_model_alias);
1089CONFIGFS_ATTR(, emulate_dpo); 1107CONFIGFS_ATTR(, emulate_dpo);
1090CONFIGFS_ATTR(, emulate_fua_write); 1108CONFIGFS_ATTR(, emulate_fua_write);
@@ -1116,6 +1134,8 @@ CONFIGFS_ATTR(, unmap_granularity);
1116CONFIGFS_ATTR(, unmap_granularity_alignment); 1134CONFIGFS_ATTR(, unmap_granularity_alignment);
1117CONFIGFS_ATTR(, unmap_zeroes_data); 1135CONFIGFS_ATTR(, unmap_zeroes_data);
1118CONFIGFS_ATTR(, max_write_same_len); 1136CONFIGFS_ATTR(, max_write_same_len);
1137CONFIGFS_ATTR_RO(, alua_support);
1138CONFIGFS_ATTR_RO(, pgr_support);
1119 1139
1120/* 1140/*
1121 * dev_attrib attributes for devices using the target core SBC/SPC 1141 * dev_attrib attributes for devices using the target core SBC/SPC
@@ -1154,6 +1174,8 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
1154 &attr_unmap_granularity_alignment, 1174 &attr_unmap_granularity_alignment,
1155 &attr_unmap_zeroes_data, 1175 &attr_unmap_zeroes_data,
1156 &attr_max_write_same_len, 1176 &attr_max_write_same_len,
1177 &attr_alua_support,
1178 &attr_pgr_support,
1157 NULL, 1179 NULL,
1158}; 1180};
1159EXPORT_SYMBOL(sbc_attrib_attrs); 1181EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -1168,6 +1190,8 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
1168 &attr_hw_block_size, 1190 &attr_hw_block_size,
1169 &attr_hw_max_sectors, 1191 &attr_hw_max_sectors,
1170 &attr_hw_queue_depth, 1192 &attr_hw_queue_depth,
1193 &attr_alua_support,
1194 &attr_pgr_support,
1171 NULL, 1195 NULL,
1172}; 1196};
1173EXPORT_SYMBOL(passthrough_attrib_attrs); 1197EXPORT_SYMBOL(passthrough_attrib_attrs);
@@ -2236,7 +2260,11 @@ static void target_core_dev_release(struct config_item *item)
2236 target_free_device(dev); 2260 target_free_device(dev);
2237} 2261}
2238 2262
2239static struct configfs_item_operations target_core_dev_item_ops = { 2263/*
2264 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2265 * within target_fabric_port_link()
2266 */
2267struct configfs_item_operations target_core_dev_item_ops = {
2240 .release = target_core_dev_release, 2268 .release = target_core_dev_release,
2241}; 2269};
2242 2270
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8add07f387f9..e8dd6da164b2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -49,8 +49,9 @@
49#include "target_core_pr.h" 49#include "target_core_pr.h"
50#include "target_core_ua.h" 50#include "target_core_ua.h"
51 51
52DEFINE_MUTEX(g_device_mutex); 52static DEFINE_MUTEX(device_mutex);
53LIST_HEAD(g_device_list); 53static LIST_HEAD(device_list);
54static DEFINE_IDR(devices_idr);
54 55
55static struct se_hba *lun0_hba; 56static struct se_hba *lun0_hba;
56/* not static, needed by tpg.c */ 57/* not static, needed by tpg.c */
@@ -168,11 +169,20 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
168 rcu_read_lock(); 169 rcu_read_lock();
169 deve = target_nacl_find_deve(nacl, unpacked_lun); 170 deve = target_nacl_find_deve(nacl, unpacked_lun);
170 if (deve) { 171 if (deve) {
171 se_cmd->se_lun = rcu_dereference(deve->se_lun);
172 se_lun = rcu_dereference(deve->se_lun); 172 se_lun = rcu_dereference(deve->se_lun);
173
174 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
175 se_lun = NULL;
176 goto out_unlock;
177 }
178
179 se_cmd->se_lun = rcu_dereference(deve->se_lun);
173 se_cmd->pr_res_key = deve->pr_res_key; 180 se_cmd->pr_res_key = deve->pr_res_key;
174 se_cmd->orig_fe_lun = unpacked_lun; 181 se_cmd->orig_fe_lun = unpacked_lun;
182 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
183 se_cmd->lun_ref_active = true;
175 } 184 }
185out_unlock:
176 rcu_read_unlock(); 186 rcu_read_unlock();
177 187
178 if (!se_lun) { 188 if (!se_lun) {
@@ -182,9 +192,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
182 unpacked_lun); 192 unpacked_lun);
183 return -ENODEV; 193 return -ENODEV;
184 } 194 }
185 /*
186 * XXX: Add percpu se_lun->lun_ref reference count for TMR
187 */
188 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 195 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
189 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 196 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
190 197
@@ -756,19 +763,16 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
756 if (!dev) 763 if (!dev)
757 return NULL; 764 return NULL;
758 765
759 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
760 dev->se_hba = hba; 766 dev->se_hba = hba;
761 dev->transport = hba->backend->ops; 767 dev->transport = hba->backend->ops;
762 dev->prot_length = sizeof(struct t10_pi_tuple); 768 dev->prot_length = sizeof(struct t10_pi_tuple);
763 dev->hba_index = hba->hba_index; 769 dev->hba_index = hba->hba_index;
764 770
765 INIT_LIST_HEAD(&dev->dev_list);
766 INIT_LIST_HEAD(&dev->dev_sep_list); 771 INIT_LIST_HEAD(&dev->dev_sep_list);
767 INIT_LIST_HEAD(&dev->dev_tmr_list); 772 INIT_LIST_HEAD(&dev->dev_tmr_list);
768 INIT_LIST_HEAD(&dev->delayed_cmd_list); 773 INIT_LIST_HEAD(&dev->delayed_cmd_list);
769 INIT_LIST_HEAD(&dev->state_list); 774 INIT_LIST_HEAD(&dev->state_list);
770 INIT_LIST_HEAD(&dev->qf_cmd_list); 775 INIT_LIST_HEAD(&dev->qf_cmd_list);
771 INIT_LIST_HEAD(&dev->g_dev_node);
772 spin_lock_init(&dev->execute_task_lock); 776 spin_lock_init(&dev->execute_task_lock);
773 spin_lock_init(&dev->delayed_cmd_lock); 777 spin_lock_init(&dev->delayed_cmd_lock);
774 spin_lock_init(&dev->dev_reservation_lock); 778 spin_lock_init(&dev->dev_reservation_lock);
@@ -851,7 +855,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
851 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 855 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
852 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 856 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
853 block_size; 857 block_size;
854 attrib->unmap_zeroes_data = 0; 858 attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
855 return true; 859 return true;
856} 860}
857EXPORT_SYMBOL(target_configure_unmap_from_queue); 861EXPORT_SYMBOL(target_configure_unmap_from_queue);
@@ -875,10 +879,79 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
875} 879}
876EXPORT_SYMBOL(target_to_linux_sector); 880EXPORT_SYMBOL(target_to_linux_sector);
877 881
882/**
883 * target_find_device - find a se_device by its dev_index
884 * @id: dev_index
885 * @do_depend: true if caller needs target_depend_item to be done
886 *
887 * If do_depend is true, the caller must do a target_undepend_item
888 * when finished using the device.
889 *
890 * If do_depend is false, the caller must be called in a configfs
891 * callback or during removal.
892 */
893struct se_device *target_find_device(int id, bool do_depend)
894{
895 struct se_device *dev;
896
897 mutex_lock(&device_mutex);
898 dev = idr_find(&devices_idr, id);
899 if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
900 dev = NULL;
901 mutex_unlock(&device_mutex);
902 return dev;
903}
904EXPORT_SYMBOL(target_find_device);
905
906struct devices_idr_iter {
907 int (*fn)(struct se_device *dev, void *data);
908 void *data;
909};
910
911static int target_devices_idr_iter(int id, void *p, void *data)
912{
913 struct devices_idr_iter *iter = data;
914 struct se_device *dev = p;
915
916 /*
917 * We add the device early to the idr, so it can be used
918 * by backend modules during configuration. We do not want
919 * to allow other callers to access partially setup devices,
920 * so we skip them here.
921 */
922 if (!(dev->dev_flags & DF_CONFIGURED))
923 return 0;
924
925 return iter->fn(dev, iter->data);
926}
927
928/**
929 * target_for_each_device - iterate over configured devices
930 * @fn: iterator function
931 * @data: pointer to data that will be passed to fn
932 *
933 * fn must return 0 to continue looping over devices. non-zero will break
934 * from the loop and return that value to the caller.
935 */
936int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
937 void *data)
938{
939 struct devices_idr_iter iter;
940 int ret;
941
942 iter.fn = fn;
943 iter.data = data;
944
945 mutex_lock(&device_mutex);
946 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
947 mutex_unlock(&device_mutex);
948 return ret;
949}
950
878int target_configure_device(struct se_device *dev) 951int target_configure_device(struct se_device *dev)
879{ 952{
880 struct se_hba *hba = dev->se_hba; 953 struct se_hba *hba = dev->se_hba;
881 int ret; 954 int ret, id;
882 955
883 if (dev->dev_flags & DF_CONFIGURED) { 956 if (dev->dev_flags & DF_CONFIGURED) {
884 pr_err("se_dev->se_dev_ptr already set for storage" 957 pr_err("se_dev->se_dev_ptr already set for storage"
@@ -886,9 +959,26 @@ int target_configure_device(struct se_device *dev)
886 return -EEXIST; 959 return -EEXIST;
887 } 960 }
888 961
962 /*
963 * Add early so modules like tcmu can use during its
964 * configuration.
965 */
966 mutex_lock(&device_mutex);
967 /*
968 * Use cyclic to try and avoid collisions with devices
969 * that were recently removed.
970 */
971 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
972 mutex_unlock(&device_mutex);
973 if (id < 0) {
974 ret = -ENOMEM;
975 goto out;
976 }
977 dev->dev_index = id;
978
889 ret = dev->transport->configure_device(dev); 979 ret = dev->transport->configure_device(dev);
890 if (ret) 980 if (ret)
891 goto out; 981 goto out_free_index;
892 /* 982 /*
893 * XXX: there is not much point to have two different values here.. 983 * XXX: there is not much point to have two different values here..
894 */ 984 */
@@ -903,12 +993,11 @@ int target_configure_device(struct se_device *dev)
903 dev->dev_attrib.hw_block_size); 993 dev->dev_attrib.hw_block_size);
904 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 994 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
905 995
906 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
907 dev->creation_time = get_jiffies_64(); 996 dev->creation_time = get_jiffies_64();
908 997
909 ret = core_setup_alua(dev); 998 ret = core_setup_alua(dev);
910 if (ret) 999 if (ret)
911 goto out; 1000 goto out_free_index;
912 1001
913 /* 1002 /*
914 * Startup the struct se_device processing thread 1003 * Startup the struct se_device processing thread
@@ -946,16 +1035,16 @@ int target_configure_device(struct se_device *dev)
946 hba->dev_count++; 1035 hba->dev_count++;
947 spin_unlock(&hba->device_lock); 1036 spin_unlock(&hba->device_lock);
948 1037
949 mutex_lock(&g_device_mutex);
950 list_add_tail(&dev->g_dev_node, &g_device_list);
951 mutex_unlock(&g_device_mutex);
952
953 dev->dev_flags |= DF_CONFIGURED; 1038 dev->dev_flags |= DF_CONFIGURED;
954 1039
955 return 0; 1040 return 0;
956 1041
957out_free_alua: 1042out_free_alua:
958 core_alua_free_lu_gp_mem(dev); 1043 core_alua_free_lu_gp_mem(dev);
1044out_free_index:
1045 mutex_lock(&device_mutex);
1046 idr_remove(&devices_idr, dev->dev_index);
1047 mutex_unlock(&device_mutex);
959out: 1048out:
960 se_release_vpd_for_dev(dev); 1049 se_release_vpd_for_dev(dev);
961 return ret; 1050 return ret;
@@ -970,9 +1059,11 @@ void target_free_device(struct se_device *dev)
970 if (dev->dev_flags & DF_CONFIGURED) { 1059 if (dev->dev_flags & DF_CONFIGURED) {
971 destroy_workqueue(dev->tmr_wq); 1060 destroy_workqueue(dev->tmr_wq);
972 1061
973 mutex_lock(&g_device_mutex); 1062 dev->transport->destroy_device(dev);
974 list_del(&dev->g_dev_node); 1063
975 mutex_unlock(&g_device_mutex); 1064 mutex_lock(&device_mutex);
1065 idr_remove(&devices_idr, dev->dev_index);
1066 mutex_unlock(&device_mutex);
976 1067
977 spin_lock(&hba->device_lock); 1068 spin_lock(&hba->device_lock);
978 hba->dev_count--; 1069 hba->dev_count--;
@@ -1087,19 +1178,19 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1087 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1178 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1088 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1179 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1089 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1180 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1090 size = (cdb[7] << 8) + cdb[8]; 1181 size = get_unaligned_be16(&cdb[7]);
1091 return target_cmd_size_check(cmd, size); 1182 return target_cmd_size_check(cmd, size);
1092 } 1183 }
1093 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1184 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1094 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1185 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1095 size = (cdb[7] << 8) + cdb[8]; 1186 size = get_unaligned_be32(&cdb[5]);
1096 return target_cmd_size_check(cmd, size); 1187 return target_cmd_size_check(cmd, size);
1097 } 1188 }
1098 1189
1099 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1190 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1100 cmd->execute_cmd = target_scsi2_reservation_release; 1191 cmd->execute_cmd = target_scsi2_reservation_release;
1101 if (cdb[0] == RELEASE_10) 1192 if (cdb[0] == RELEASE_10)
1102 size = (cdb[7] << 8) | cdb[8]; 1193 size = get_unaligned_be16(&cdb[7]);
1103 else 1194 else
1104 size = cmd->data_length; 1195 size = cmd->data_length;
1105 return target_cmd_size_check(cmd, size); 1196 return target_cmd_size_check(cmd, size);
@@ -1107,7 +1198,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1107 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1198 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1108 cmd->execute_cmd = target_scsi2_reservation_reserve; 1199 cmd->execute_cmd = target_scsi2_reservation_reserve;
1109 if (cdb[0] == RESERVE_10) 1200 if (cdb[0] == RESERVE_10)
1110 size = (cdb[7] << 8) | cdb[8]; 1201 size = get_unaligned_be16(&cdb[7]);
1111 else 1202 else
1112 size = cmd->data_length; 1203 size = cmd->data_length;
1113 return target_cmd_size_check(cmd, size); 1204 return target_cmd_size_check(cmd, size);
@@ -1126,7 +1217,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1126 case WRITE_16: 1217 case WRITE_16:
1127 case WRITE_VERIFY: 1218 case WRITE_VERIFY:
1128 case WRITE_VERIFY_12: 1219 case WRITE_VERIFY_12:
1129 case 0x8e: /* WRITE_VERIFY_16 */ 1220 case WRITE_VERIFY_16:
1130 case COMPARE_AND_WRITE: 1221 case COMPARE_AND_WRITE:
1131 case XDWRITEREAD_10: 1222 case XDWRITEREAD_10:
1132 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1223 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -1135,7 +1226,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1135 switch (get_unaligned_be16(&cdb[8])) { 1226 switch (get_unaligned_be16(&cdb[8])) {
1136 case READ_32: 1227 case READ_32:
1137 case WRITE_32: 1228 case WRITE_32:
1138 case 0x0c: /* WRITE_VERIFY_32 */ 1229 case WRITE_VERIFY_32:
1139 case XDWRITEREAD_32: 1230 case XDWRITEREAD_32:
1140 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1231 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1141 break; 1232 break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d1e6cab8e3d3..e9e917cc6441 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -65,6 +65,8 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
65 pr_debug("Setup generic %s\n", __stringify(_name)); \ 65 pr_debug("Setup generic %s\n", __stringify(_name)); \
66} 66}
67 67
68static struct configfs_item_operations target_fabric_port_item_ops;
69
68/* Start of tfc_tpg_mappedlun_cit */ 70/* Start of tfc_tpg_mappedlun_cit */
69 71
70static int target_fabric_mappedlun_link( 72static int target_fabric_mappedlun_link(
@@ -72,19 +74,20 @@ static int target_fabric_mappedlun_link(
72 struct config_item *lun_ci) 74 struct config_item *lun_ci)
73{ 75{
74 struct se_dev_entry *deve; 76 struct se_dev_entry *deve;
75 struct se_lun *lun = container_of(to_config_group(lun_ci), 77 struct se_lun *lun;
76 struct se_lun, lun_group);
77 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), 78 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
78 struct se_lun_acl, se_lun_group); 79 struct se_lun_acl, se_lun_group);
79 struct se_portal_group *se_tpg; 80 struct se_portal_group *se_tpg;
80 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; 81 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
81 bool lun_access_ro; 82 bool lun_access_ro;
82 83
83 if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { 84 if (!lun_ci->ci_type ||
84 pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" 85 lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) {
85 " %p to struct lun: %p\n", lun_ci, lun); 86 pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci);
86 return -EFAULT; 87 return -EFAULT;
87 } 88 }
89 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
90
88 /* 91 /*
89 * Ensure that the source port exists 92 * Ensure that the source port exists
90 */ 93 */
@@ -620,6 +623,8 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
620 NULL, 623 NULL,
621}; 624};
622 625
626extern struct configfs_item_operations target_core_dev_item_ops;
627
623static int target_fabric_port_link( 628static int target_fabric_port_link(
624 struct config_item *lun_ci, 629 struct config_item *lun_ci,
625 struct config_item *se_dev_ci) 630 struct config_item *se_dev_ci)
@@ -628,16 +633,16 @@ static int target_fabric_port_link(
628 struct se_lun *lun = container_of(to_config_group(lun_ci), 633 struct se_lun *lun = container_of(to_config_group(lun_ci),
629 struct se_lun, lun_group); 634 struct se_lun, lun_group);
630 struct se_portal_group *se_tpg; 635 struct se_portal_group *se_tpg;
631 struct se_device *dev = 636 struct se_device *dev;
632 container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
633 struct target_fabric_configfs *tf; 637 struct target_fabric_configfs *tf;
634 int ret; 638 int ret;
635 639
636 if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) { 640 if (!se_dev_ci->ci_type ||
637 pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:" 641 se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) {
638 " %p to struct se_device: %p\n", se_dev_ci, dev); 642 pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci);
639 return -EFAULT; 643 return -EFAULT;
640 } 644 }
645 dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
641 646
642 if (!(dev->dev_flags & DF_CONFIGURED)) { 647 if (!(dev->dev_flags & DF_CONFIGURED)) {
643 pr_err("se_device not configured yet, cannot port link\n"); 648 pr_err("se_device not configured yet, cannot port link\n");
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index cb6497ce4b61..508da345b73f 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,6 +34,7 @@
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/export.h> 36#include <linux/export.h>
37#include <asm/unaligned.h>
37 38
38#include <scsi/scsi_proto.h> 39#include <scsi/scsi_proto.h>
39 40
@@ -216,8 +217,7 @@ static int iscsi_get_pr_transport_id(
216 if (padding != 0) 217 if (padding != 0)
217 len += padding; 218 len += padding;
218 219
219 buf[2] = ((len >> 8) & 0xff); 220 put_unaligned_be16(len, &buf[2]);
220 buf[3] = (len & 0xff);
221 /* 221 /*
222 * Increment value for total payload + header length for 222 * Increment value for total payload + header length for
223 * full status descriptor 223 * full status descriptor
@@ -306,7 +306,7 @@ static char *iscsi_parse_pr_out_transport_id(
306 */ 306 */
307 if (out_tid_len) { 307 if (out_tid_len) {
308 /* The shift works thanks to integer promotion rules */ 308 /* The shift works thanks to integer promotion rules */
309 add_len = (buf[2] << 8) | buf[3]; 309 add_len = get_unaligned_be16(&buf[2]);
310 310
311 tid_len = strlen(&buf[4]); 311 tid_len = strlen(&buf[4]);
312 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 312 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e921948415c7..24cf11d9e50a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -237,13 +237,17 @@ static void fd_dev_call_rcu(struct rcu_head *p)
237 237
238static void fd_free_device(struct se_device *dev) 238static void fd_free_device(struct se_device *dev)
239{ 239{
240 call_rcu(&dev->rcu_head, fd_dev_call_rcu);
241}
242
243static void fd_destroy_device(struct se_device *dev)
244{
240 struct fd_dev *fd_dev = FD_DEV(dev); 245 struct fd_dev *fd_dev = FD_DEV(dev);
241 246
242 if (fd_dev->fd_file) { 247 if (fd_dev->fd_file) {
243 filp_close(fd_dev->fd_file, NULL); 248 filp_close(fd_dev->fd_file, NULL);
244 fd_dev->fd_file = NULL; 249 fd_dev->fd_file = NULL;
245 } 250 }
246 call_rcu(&dev->rcu_head, fd_dev_call_rcu);
247} 251}
248 252
249static int fd_do_rw(struct se_cmd *cmd, struct file *fd, 253static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
@@ -826,6 +830,7 @@ static const struct target_backend_ops fileio_ops = {
826 .detach_hba = fd_detach_hba, 830 .detach_hba = fd_detach_hba,
827 .alloc_device = fd_alloc_device, 831 .alloc_device = fd_alloc_device,
828 .configure_device = fd_configure_device, 832 .configure_device = fd_configure_device,
833 .destroy_device = fd_destroy_device,
829 .free_device = fd_free_device, 834 .free_device = fd_free_device,
830 .parse_cdb = fd_parse_cdb, 835 .parse_cdb = fd_parse_cdb,
831 .set_configfs_dev_params = fd_set_configfs_dev_params, 836 .set_configfs_dev_params = fd_set_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c05d38016556..ee7c7fa55dad 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -86,6 +86,7 @@ static int iblock_configure_device(struct se_device *dev)
86 struct block_device *bd = NULL; 86 struct block_device *bd = NULL;
87 struct blk_integrity *bi; 87 struct blk_integrity *bi;
88 fmode_t mode; 88 fmode_t mode;
89 unsigned int max_write_zeroes_sectors;
89 int ret = -ENOMEM; 90 int ret = -ENOMEM;
90 91
91 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 92 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
@@ -129,7 +130,11 @@ static int iblock_configure_device(struct se_device *dev)
129 * Enable write same emulation for IBLOCK and use 0xFFFF as 130 * Enable write same emulation for IBLOCK and use 0xFFFF as
130 * the smaller WRITE_SAME(10) only has a two-byte block count. 131 * the smaller WRITE_SAME(10) only has a two-byte block count.
131 */ 132 */
132 dev->dev_attrib.max_write_same_len = 0xFFFF; 133 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
134 if (max_write_zeroes_sectors)
135 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
136 else
137 dev->dev_attrib.max_write_same_len = 0xFFFF;
133 138
134 if (blk_queue_nonrot(q)) 139 if (blk_queue_nonrot(q))
135 dev->dev_attrib.is_nonrot = 1; 140 dev->dev_attrib.is_nonrot = 1;
@@ -185,14 +190,17 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
185 190
186static void iblock_free_device(struct se_device *dev) 191static void iblock_free_device(struct se_device *dev)
187{ 192{
193 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
194}
195
196static void iblock_destroy_device(struct se_device *dev)
197{
188 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 198 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
189 199
190 if (ib_dev->ibd_bd != NULL) 200 if (ib_dev->ibd_bd != NULL)
191 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 201 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
192 if (ib_dev->ibd_bio_set != NULL) 202 if (ib_dev->ibd_bio_set != NULL)
193 bioset_free(ib_dev->ibd_bio_set); 203 bioset_free(ib_dev->ibd_bio_set);
194
195 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
196} 204}
197 205
198static unsigned long long iblock_emulate_read_cap_with_block_size( 206static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -415,28 +423,31 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
415} 423}
416 424
417static sense_reason_t 425static sense_reason_t
418iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd) 426iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
419{ 427{
420 struct se_device *dev = cmd->se_dev; 428 struct se_device *dev = cmd->se_dev;
421 struct scatterlist *sg = &cmd->t_data_sg[0]; 429 struct scatterlist *sg = &cmd->t_data_sg[0];
422 struct page *page = NULL; 430 unsigned char *buf, zero = 0x00, *p = &zero;
423 int ret; 431 int rc, ret;
424 432
425 if (sg->offset) { 433 buf = kmap(sg_page(sg)) + sg->offset;
426 page = alloc_page(GFP_KERNEL); 434 if (!buf)
427 if (!page) 435 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
428 return TCM_OUT_OF_RESOURCES; 436 /*
429 sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page), 437 * Fall back to block_execute_write_same() slow-path if
430 dev->dev_attrib.block_size); 438 * incoming WRITE_SAME payload does not contain zeros.
431 } 439 */
440 rc = memcmp(buf, p, cmd->data_length);
441 kunmap(sg_page(sg));
432 442
433 ret = blkdev_issue_write_same(bdev, 443 if (rc)
444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
445
446 ret = blkdev_issue_zeroout(bdev,
434 target_to_linux_sector(dev, cmd->t_task_lba), 447 target_to_linux_sector(dev, cmd->t_task_lba),
435 target_to_linux_sector(dev, 448 target_to_linux_sector(dev,
436 sbc_get_write_same_sectors(cmd)), 449 sbc_get_write_same_sectors(cmd)),
437 GFP_KERNEL, page ? page : sg_page(sg)); 450 GFP_KERNEL, false);
438 if (page)
439 __free_page(page);
440 if (ret) 451 if (ret)
441 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 452 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
442 453
@@ -472,8 +483,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
472 return TCM_INVALID_CDB_FIELD; 483 return TCM_INVALID_CDB_FIELD;
473 } 484 }
474 485
475 if (bdev_write_same(bdev)) 486 if (bdev_write_zeroes_sectors(bdev)) {
476 return iblock_execute_write_same_direct(bdev, cmd); 487 if (!iblock_execute_zero_out(bdev, cmd))
488 return 0;
489 }
477 490
478 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 491 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
479 if (!ibr) 492 if (!ibr)
@@ -848,6 +861,7 @@ static const struct target_backend_ops iblock_ops = {
848 .detach_hba = iblock_detach_hba, 861 .detach_hba = iblock_detach_hba,
849 .alloc_device = iblock_alloc_device, 862 .alloc_device = iblock_alloc_device,
850 .configure_device = iblock_configure_device, 863 .configure_device = iblock_configure_device,
864 .destroy_device = iblock_destroy_device,
851 .free_device = iblock_free_device, 865 .free_device = iblock_free_device,
852 .parse_cdb = iblock_parse_cdb, 866 .parse_cdb = iblock_parse_cdb,
853 .set_configfs_dev_params = iblock_set_configfs_dev_params, 867 .set_configfs_dev_params = iblock_set_configfs_dev_params,
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0912de7c0cf8..f30e8ac13386 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -56,9 +56,6 @@ struct target_fabric_configfs {
56extern struct t10_alua_lu_gp *default_lu_gp; 56extern struct t10_alua_lu_gp *default_lu_gp;
57 57
58/* target_core_device.c */ 58/* target_core_device.c */
59extern struct mutex g_device_mutex;
60extern struct list_head g_device_list;
61
62int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); 59int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
63struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 60struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
64void target_pr_kref_release(struct kref *); 61void target_pr_kref_release(struct kref *);
@@ -87,6 +84,8 @@ void core_dev_release_virtual_lun0(void);
87struct se_device *target_alloc_device(struct se_hba *hba, const char *name); 84struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
88int target_configure_device(struct se_device *dev); 85int target_configure_device(struct se_device *dev);
89void target_free_device(struct se_device *); 86void target_free_device(struct se_device *);
87int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
88 void *data);
90 89
91/* target_core_configfs.c */ 90/* target_core_configfs.c */
92void target_setup_backend_cits(struct target_backend *); 91void target_setup_backend_cits(struct target_backend *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 129ca572673c..6d5def64db61 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1562,10 +1562,7 @@ core_scsi3_decode_spec_i_port(
1562 * first extract TransportID Parameter Data Length, and make sure 1562 * first extract TransportID Parameter Data Length, and make sure
1563 * the value matches up to the SCSI expected data transfer length. 1563 * the value matches up to the SCSI expected data transfer length.
1564 */ 1564 */
1565 tpdl = (buf[24] & 0xff) << 24; 1565 tpdl = get_unaligned_be32(&buf[24]);
1566 tpdl |= (buf[25] & 0xff) << 16;
1567 tpdl |= (buf[26] & 0xff) << 8;
1568 tpdl |= buf[27] & 0xff;
1569 1566
1570 if ((tpdl + 28) != cmd->data_length) { 1567 if ((tpdl + 28) != cmd->data_length) {
1571 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1568 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
@@ -3221,12 +3218,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3221 goto out_put_pr_reg; 3218 goto out_put_pr_reg;
3222 } 3219 }
3223 3220
3224 rtpi = (buf[18] & 0xff) << 8; 3221 rtpi = get_unaligned_be16(&buf[18]);
3225 rtpi |= buf[19] & 0xff; 3222 tid_len = get_unaligned_be32(&buf[20]);
3226 tid_len = (buf[20] & 0xff) << 24;
3227 tid_len |= (buf[21] & 0xff) << 16;
3228 tid_len |= (buf[22] & 0xff) << 8;
3229 tid_len |= buf[23] & 0xff;
3230 transport_kunmap_data_sg(cmd); 3223 transport_kunmap_data_sg(cmd);
3231 buf = NULL; 3224 buf = NULL;
3232 3225
@@ -3552,16 +3545,6 @@ out_put_pr_reg:
3552 return ret; 3545 return ret;
3553} 3546}
3554 3547
3555static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3556{
3557 unsigned int __v1, __v2;
3558
3559 __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
3560 __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
3561
3562 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
3563}
3564
3565/* 3548/*
3566 * See spc4r17 section 6.14 Table 170 3549 * See spc4r17 section 6.14 Table 170
3567 */ 3550 */
@@ -3602,7 +3585,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3602 if (cmd->data_length < 24) { 3585 if (cmd->data_length < 24) {
3603 pr_warn("SPC-PR: Received PR OUT parameter list" 3586 pr_warn("SPC-PR: Received PR OUT parameter list"
3604 " length too small: %u\n", cmd->data_length); 3587 " length too small: %u\n", cmd->data_length);
3605 return TCM_INVALID_PARAMETER_LIST; 3588 return TCM_PARAMETER_LIST_LENGTH_ERROR;
3606 } 3589 }
3607 3590
3608 /* 3591 /*
@@ -3619,8 +3602,8 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3619 /* 3602 /*
3620 * From PERSISTENT_RESERVE_OUT parameter list (payload) 3603 * From PERSISTENT_RESERVE_OUT parameter list (payload)
3621 */ 3604 */
3622 res_key = core_scsi3_extract_reservation_key(&buf[0]); 3605 res_key = get_unaligned_be64(&buf[0]);
3623 sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); 3606 sa_res_key = get_unaligned_be64(&buf[8]);
3624 /* 3607 /*
3625 * REGISTER_AND_MOVE uses a different SA parameter list containing 3608 * REGISTER_AND_MOVE uses a different SA parameter list containing
3626 * SCSI TransportIDs. 3609 * SCSI TransportIDs.
@@ -3646,7 +3629,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3646 /* 3629 /*
3647 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3630 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3648 */ 3631 */
3649 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) 3632 if (spec_i_pt && (sa != PRO_REGISTER))
3650 return TCM_INVALID_PARAMETER_LIST; 3633 return TCM_INVALID_PARAMETER_LIST;
3651 3634
3652 /* 3635 /*
@@ -3658,11 +3641,11 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3658 * the sense key set to ILLEGAL REQUEST, and the additional sense 3641 * the sense key set to ILLEGAL REQUEST, and the additional sense
3659 * code set to PARAMETER LIST LENGTH ERROR. 3642 * code set to PARAMETER LIST LENGTH ERROR.
3660 */ 3643 */
3661 if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3644 if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) &&
3662 (cmd->data_length != 24)) { 3645 (cmd->data_length != 24)) {
3663 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3646 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3664 " list length: %u\n", cmd->data_length); 3647 " list length: %u\n", cmd->data_length);
3665 return TCM_INVALID_PARAMETER_LIST; 3648 return TCM_PARAMETER_LIST_LENGTH_ERROR;
3666 } 3649 }
3667 3650
3668 /* 3651 /*
@@ -3702,7 +3685,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3702 break; 3685 break;
3703 default: 3686 default:
3704 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3687 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3705 " action: 0x%02x\n", cdb[1] & 0x1f); 3688 " action: 0x%02x\n", sa);
3706 return TCM_INVALID_CDB_FIELD; 3689 return TCM_INVALID_CDB_FIELD;
3707 } 3690 }
3708 3691
@@ -3734,10 +3717,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
3734 if (!buf) 3717 if (!buf)
3735 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3718 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3736 3719
3737 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3720 put_unaligned_be32(dev->t10_pr.pr_generation, buf);
3738 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
3739 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3740 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3741 3721
3742 spin_lock(&dev->t10_pr.registration_lock); 3722 spin_lock(&dev->t10_pr.registration_lock);
3743 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 3723 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
@@ -3749,23 +3729,13 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
3749 if ((add_len + 8) > (cmd->data_length - 8)) 3729 if ((add_len + 8) > (cmd->data_length - 8))
3750 break; 3730 break;
3751 3731
3752 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 3732 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
3753 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 3733 off += 8;
3754 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
3755 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
3756 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
3757 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
3758 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
3759 buf[off++] = (pr_reg->pr_res_key & 0xff);
3760
3761 add_len += 8; 3734 add_len += 8;
3762 } 3735 }
3763 spin_unlock(&dev->t10_pr.registration_lock); 3736 spin_unlock(&dev->t10_pr.registration_lock);
3764 3737
3765 buf[4] = ((add_len >> 24) & 0xff); 3738 put_unaligned_be32(add_len, &buf[4]);
3766 buf[5] = ((add_len >> 16) & 0xff);
3767 buf[6] = ((add_len >> 8) & 0xff);
3768 buf[7] = (add_len & 0xff);
3769 3739
3770 transport_kunmap_data_sg(cmd); 3740 transport_kunmap_data_sg(cmd);
3771 3741
@@ -3796,10 +3766,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3796 if (!buf) 3766 if (!buf)
3797 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3767 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3798 3768
3799 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3769 put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
3800 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
3801 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3802 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3803 3770
3804 spin_lock(&dev->dev_reservation_lock); 3771 spin_lock(&dev->dev_reservation_lock);
3805 pr_reg = dev->dev_pr_res_holder; 3772 pr_reg = dev->dev_pr_res_holder;
@@ -3807,10 +3774,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3807 /* 3774 /*
3808 * Set the hardcoded Additional Length 3775 * Set the hardcoded Additional Length
3809 */ 3776 */
3810 buf[4] = ((add_len >> 24) & 0xff); 3777 put_unaligned_be32(add_len, &buf[4]);
3811 buf[5] = ((add_len >> 16) & 0xff);
3812 buf[6] = ((add_len >> 8) & 0xff);
3813 buf[7] = (add_len & 0xff);
3814 3778
3815 if (cmd->data_length < 22) 3779 if (cmd->data_length < 22)
3816 goto err; 3780 goto err;
@@ -3837,14 +3801,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3837 else 3801 else
3838 pr_res_key = pr_reg->pr_res_key; 3802 pr_res_key = pr_reg->pr_res_key;
3839 3803
3840 buf[8] = ((pr_res_key >> 56) & 0xff); 3804 put_unaligned_be64(pr_res_key, &buf[8]);
3841 buf[9] = ((pr_res_key >> 48) & 0xff);
3842 buf[10] = ((pr_res_key >> 40) & 0xff);
3843 buf[11] = ((pr_res_key >> 32) & 0xff);
3844 buf[12] = ((pr_res_key >> 24) & 0xff);
3845 buf[13] = ((pr_res_key >> 16) & 0xff);
3846 buf[14] = ((pr_res_key >> 8) & 0xff);
3847 buf[15] = (pr_res_key & 0xff);
3848 /* 3805 /*
3849 * Set the SCOPE and TYPE 3806 * Set the SCOPE and TYPE
3850 */ 3807 */
@@ -3882,8 +3839,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3882 if (!buf) 3839 if (!buf)
3883 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3840 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3884 3841
3885 buf[0] = ((add_len >> 8) & 0xff); 3842 put_unaligned_be16(add_len, &buf[0]);
3886 buf[1] = (add_len & 0xff);
3887 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 3843 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3888 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 3844 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
3889 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ 3845 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
@@ -3947,10 +3903,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3947 if (!buf) 3903 if (!buf)
3948 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3904 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3949 3905
3950 buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3906 put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
3951 buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
3952 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3953 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3954 3907
3955 spin_lock(&dev->dev_reservation_lock); 3908 spin_lock(&dev->dev_reservation_lock);
3956 if (dev->dev_pr_res_holder) { 3909 if (dev->dev_pr_res_holder) {
@@ -3992,14 +3945,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3992 /* 3945 /*
3993 * Set RESERVATION KEY 3946 * Set RESERVATION KEY
3994 */ 3947 */
3995 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 3948 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
3996 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 3949 off += 8;
3997 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
3998 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
3999 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
4000 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
4001 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
4002 buf[off++] = (pr_reg->pr_res_key & 0xff);
4003 off += 4; /* Skip Over Reserved area */ 3950 off += 4; /* Skip Over Reserved area */
4004 3951
4005 /* 3952 /*
@@ -4041,8 +3988,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4041 if (!pr_reg->pr_reg_all_tg_pt) { 3988 if (!pr_reg->pr_reg_all_tg_pt) {
4042 u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi; 3989 u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
4043 3990
4044 buf[off++] = ((sep_rtpi >> 8) & 0xff); 3991 put_unaligned_be16(sep_rtpi, &buf[off]);
4045 buf[off++] = (sep_rtpi & 0xff); 3992 off += 2;
4046 } else 3993 } else
4047 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ 3994 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
4048 3995
@@ -4062,10 +4009,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4062 /* 4009 /*
4063 * Set the ADDITIONAL DESCRIPTOR LENGTH 4010 * Set the ADDITIONAL DESCRIPTOR LENGTH
4064 */ 4011 */
4065 buf[off++] = ((desc_len >> 24) & 0xff); 4012 put_unaligned_be32(desc_len, &buf[off]);
4066 buf[off++] = ((desc_len >> 16) & 0xff);
4067 buf[off++] = ((desc_len >> 8) & 0xff);
4068 buf[off++] = (desc_len & 0xff);
4069 /* 4013 /*
4070 * Size of full desctipor header minus TransportID 4014 * Size of full desctipor header minus TransportID
4071 * containing $FABRIC_MOD specific) initiator device/port 4015 * containing $FABRIC_MOD specific) initiator device/port
@@ -4082,10 +4026,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4082 /* 4026 /*
4083 * Set ADDITIONAL_LENGTH 4027 * Set ADDITIONAL_LENGTH
4084 */ 4028 */
4085 buf[4] = ((add_len >> 24) & 0xff); 4029 put_unaligned_be32(add_len, &buf[4]);
4086 buf[5] = ((add_len >> 16) & 0xff);
4087 buf[6] = ((add_len >> 8) & 0xff);
4088 buf[7] = (add_len & 0xff);
4089 4030
4090 transport_kunmap_data_sg(cmd); 4031 transport_kunmap_data_sg(cmd);
4091 4032
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ceec0211e84e..7c69b4a9694d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -168,7 +168,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
168 /* 168 /*
169 * If MODE_SENSE still returns zero, set the default value to 1024. 169 * If MODE_SENSE still returns zero, set the default value to 1024.
170 */ 170 */
171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 171 sdev->sector_size = get_unaligned_be24(&buf[9]);
172out_free: 172out_free:
173 if (!sdev->sector_size) 173 if (!sdev->sector_size)
174 sdev->sector_size = 1024; 174 sdev->sector_size = 1024;
@@ -209,8 +209,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
209 cdb[0] = INQUIRY; 209 cdb[0] = INQUIRY;
210 cdb[1] = 0x01; /* Query VPD */ 210 cdb[1] = 0x01; /* Query VPD */
211 cdb[2] = 0x80; /* Unit Serial Number */ 211 cdb[2] = 0x80; /* Unit Serial Number */
212 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; 212 put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
213 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
214 213
215 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 214 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
216 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); 215 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
@@ -245,8 +244,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
245 cdb[0] = INQUIRY; 244 cdb[0] = INQUIRY;
246 cdb[1] = 0x01; /* Query VPD */ 245 cdb[1] = 0x01; /* Query VPD */
247 cdb[2] = 0x83; /* Device Identifier */ 246 cdb[2] = 0x83; /* Device Identifier */
248 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; 247 put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
249 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
250 248
251 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 249 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
252 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, 250 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
@@ -254,7 +252,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
254 if (ret) 252 if (ret)
255 goto out; 253 goto out;
256 254
257 page_len = (buf[2] << 8) | buf[3]; 255 page_len = get_unaligned_be16(&buf[2]);
258 while (page_len > 0) { 256 while (page_len > 0) {
259 /* Grab a pointer to the Identification descriptor */ 257 /* Grab a pointer to the Identification descriptor */
260 page_83 = &buf[off]; 258 page_83 = &buf[off];
@@ -384,7 +382,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
384 spin_unlock_irq(sh->host_lock); 382 spin_unlock_irq(sh->host_lock);
385 /* 383 /*
386 * Claim exclusive struct block_device access to struct scsi_device 384 * Claim exclusive struct block_device access to struct scsi_device
387 * for TYPE_DISK using supplied udev_path 385 * for TYPE_DISK and TYPE_ZBC using supplied udev_path
388 */ 386 */
389 bd = blkdev_get_by_path(dev->udev_path, 387 bd = blkdev_get_by_path(dev->udev_path,
390 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 388 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
@@ -402,8 +400,9 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
402 return ret; 400 return ret;
403 } 401 }
404 402
405 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n", 403 pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
406 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 404 phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
405 sh->host_no, sd->channel, sd->id, sd->lun);
407 return 0; 406 return 0;
408} 407}
409 408
@@ -522,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
522 */ 521 */
523 switch (sd->type) { 522 switch (sd->type) {
524 case TYPE_DISK: 523 case TYPE_DISK:
524 case TYPE_ZBC:
525 ret = pscsi_create_type_disk(dev, sd); 525 ret = pscsi_create_type_disk(dev, sd);
526 break; 526 break;
527 default: 527 default:
@@ -566,6 +566,11 @@ static void pscsi_dev_call_rcu(struct rcu_head *p)
566 566
567static void pscsi_free_device(struct se_device *dev) 567static void pscsi_free_device(struct se_device *dev)
568{ 568{
569 call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
570}
571
572static void pscsi_destroy_device(struct se_device *dev)
573{
569 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 574 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
570 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 575 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
571 struct scsi_device *sd = pdv->pdv_sd; 576 struct scsi_device *sd = pdv->pdv_sd;
@@ -573,9 +578,11 @@ static void pscsi_free_device(struct se_device *dev)
573 if (sd) { 578 if (sd) {
574 /* 579 /*
575 * Release exclusive pSCSI internal struct block_device claim for 580 * Release exclusive pSCSI internal struct block_device claim for
576 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() 581 * struct scsi_device with TYPE_DISK or TYPE_ZBC
582 * from pscsi_create_type_disk()
577 */ 583 */
578 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { 584 if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
585 pdv->pdv_bd) {
579 blkdev_put(pdv->pdv_bd, 586 blkdev_put(pdv->pdv_bd,
580 FMODE_WRITE|FMODE_READ|FMODE_EXCL); 587 FMODE_WRITE|FMODE_READ|FMODE_EXCL);
581 pdv->pdv_bd = NULL; 588 pdv->pdv_bd = NULL;
@@ -594,15 +601,13 @@ static void pscsi_free_device(struct se_device *dev)
594 601
595 pdv->pdv_sd = NULL; 602 pdv->pdv_sd = NULL;
596 } 603 }
597 call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
598} 604}
599 605
600static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, 606static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
601 unsigned char *sense_buffer) 607 unsigned char *req_sense)
602{ 608{
603 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 609 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
604 struct scsi_device *sd = pdv->pdv_sd; 610 struct scsi_device *sd = pdv->pdv_sd;
605 int result;
606 struct pscsi_plugin_task *pt = cmd->priv; 611 struct pscsi_plugin_task *pt = cmd->priv;
607 unsigned char *cdb; 612 unsigned char *cdb;
608 /* 613 /*
@@ -613,7 +618,6 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
613 return; 618 return;
614 619
615 cdb = &pt->pscsi_cdb[0]; 620 cdb = &pt->pscsi_cdb[0];
616 result = pt->pscsi_result;
617 /* 621 /*
618 * Hack to make sure that Write-Protect modepage is set if R/O mode is 622 * Hack to make sure that Write-Protect modepage is set if R/O mode is
619 * forced. 623 * forced.
@@ -622,7 +626,7 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
622 goto after_mode_sense; 626 goto after_mode_sense;
623 627
624 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 628 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
625 (status_byte(result) << 1) == SAM_STAT_GOOD) { 629 scsi_status == SAM_STAT_GOOD) {
626 bool read_only = target_lun_is_rdonly(cmd); 630 bool read_only = target_lun_is_rdonly(cmd);
627 631
628 if (read_only) { 632 if (read_only) {
@@ -657,40 +661,36 @@ after_mode_sense:
657 * storage engine. 661 * storage engine.
658 */ 662 */
659 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 663 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
660 (status_byte(result) << 1) == SAM_STAT_GOOD) { 664 scsi_status == SAM_STAT_GOOD) {
661 unsigned char *buf; 665 unsigned char *buf;
662 u16 bdl; 666 u16 bdl;
663 u32 blocksize; 667 u32 blocksize;
664 668
665 buf = sg_virt(&sg[0]); 669 buf = sg_virt(&cmd->t_data_sg[0]);
666 if (!buf) { 670 if (!buf) {
667 pr_err("Unable to get buf for scatterlist\n"); 671 pr_err("Unable to get buf for scatterlist\n");
668 goto after_mode_select; 672 goto after_mode_select;
669 } 673 }
670 674
671 if (cdb[0] == MODE_SELECT) 675 if (cdb[0] == MODE_SELECT)
672 bdl = (buf[3]); 676 bdl = buf[3];
673 else 677 else
674 bdl = (buf[6] << 8) | (buf[7]); 678 bdl = get_unaligned_be16(&buf[6]);
675 679
676 if (!bdl) 680 if (!bdl)
677 goto after_mode_select; 681 goto after_mode_select;
678 682
679 if (cdb[0] == MODE_SELECT) 683 if (cdb[0] == MODE_SELECT)
680 blocksize = (buf[9] << 16) | (buf[10] << 8) | 684 blocksize = get_unaligned_be24(&buf[9]);
681 (buf[11]);
682 else 685 else
683 blocksize = (buf[13] << 16) | (buf[14] << 8) | 686 blocksize = get_unaligned_be24(&buf[13]);
684 (buf[15]);
685 687
686 sd->sector_size = blocksize; 688 sd->sector_size = blocksize;
687 } 689 }
688after_mode_select: 690after_mode_select:
689 691
690 if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { 692 if (scsi_status == SAM_STAT_CHECK_CONDITION)
691 memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); 693 transport_copy_sense_to_cmd(cmd, req_sense);
692 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
693 }
694} 694}
695 695
696enum { 696enum {
@@ -1002,7 +1002,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1002 req->end_io_data = cmd; 1002 req->end_io_data = cmd;
1003 scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb); 1003 scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
1004 scsi_req(req)->cmd = &pt->pscsi_cdb[0]; 1004 scsi_req(req)->cmd = &pt->pscsi_cdb[0];
1005 if (pdv->pdv_sd->type == TYPE_DISK) 1005 if (pdv->pdv_sd->type == TYPE_DISK ||
1006 pdv->pdv_sd->type == TYPE_ZBC)
1006 req->timeout = PS_TIMEOUT_DISK; 1007 req->timeout = PS_TIMEOUT_DISK;
1007 else 1008 else
1008 req->timeout = PS_TIMEOUT_OTHER; 1009 req->timeout = PS_TIMEOUT_OTHER;
@@ -1047,30 +1048,29 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
1047{ 1048{
1048 struct se_cmd *cmd = req->end_io_data; 1049 struct se_cmd *cmd = req->end_io_data;
1049 struct pscsi_plugin_task *pt = cmd->priv; 1050 struct pscsi_plugin_task *pt = cmd->priv;
1051 int result = scsi_req(req)->result;
1052 u8 scsi_status = status_byte(result) << 1;
1050 1053
1051 pt->pscsi_result = scsi_req(req)->result; 1054 if (scsi_status) {
1052 pt->pscsi_resid = scsi_req(req)->resid_len;
1053
1054 cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
1055 if (cmd->scsi_status) {
1056 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" 1055 pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
1057 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1056 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
1058 pt->pscsi_result); 1057 result);
1059 } 1058 }
1060 1059
1061 switch (host_byte(pt->pscsi_result)) { 1060 pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
1061
1062 switch (host_byte(result)) {
1062 case DID_OK: 1063 case DID_OK:
1063 target_complete_cmd(cmd, cmd->scsi_status); 1064 target_complete_cmd(cmd, scsi_status);
1064 break; 1065 break;
1065 default: 1066 default:
1066 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" 1067 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
1067 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], 1068 " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
1068 pt->pscsi_result); 1069 result);
1069 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 1070 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
1070 break; 1071 break;
1071 } 1072 }
1072 1073
1073 memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
1074 __blk_put_request(req->q, req); 1074 __blk_put_request(req->q, req);
1075 kfree(pt); 1075 kfree(pt);
1076} 1076}
@@ -1086,8 +1086,8 @@ static const struct target_backend_ops pscsi_ops = {
1086 .pmode_enable_hba = pscsi_pmode_enable_hba, 1086 .pmode_enable_hba = pscsi_pmode_enable_hba,
1087 .alloc_device = pscsi_alloc_device, 1087 .alloc_device = pscsi_alloc_device,
1088 .configure_device = pscsi_configure_device, 1088 .configure_device = pscsi_configure_device,
1089 .destroy_device = pscsi_destroy_device,
1089 .free_device = pscsi_free_device, 1090 .free_device = pscsi_free_device,
1090 .transport_complete = pscsi_transport_complete,
1091 .parse_cdb = pscsi_parse_cdb, 1091 .parse_cdb = pscsi_parse_cdb,
1092 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1092 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1093 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1093 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 8a02fa47c7e8..b86fb0e1b783 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -23,10 +23,6 @@ struct scsi_device;
23struct Scsi_Host; 23struct Scsi_Host;
24 24
25struct pscsi_plugin_task { 25struct pscsi_plugin_task {
26 unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
27 int pscsi_direction;
28 int pscsi_result;
29 u32 pscsi_resid;
30 unsigned char pscsi_cdb[0]; 26 unsigned char pscsi_cdb[0];
31} ____cacheline_aligned; 27} ____cacheline_aligned;
32 28
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 20253d04103f..a6e8106abd6f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -339,10 +339,14 @@ static void rd_dev_call_rcu(struct rcu_head *p)
339 339
340static void rd_free_device(struct se_device *dev) 340static void rd_free_device(struct se_device *dev)
341{ 341{
342 call_rcu(&dev->rcu_head, rd_dev_call_rcu);
343}
344
345static void rd_destroy_device(struct se_device *dev)
346{
342 struct rd_dev *rd_dev = RD_DEV(dev); 347 struct rd_dev *rd_dev = RD_DEV(dev);
343 348
344 rd_release_device_space(rd_dev); 349 rd_release_device_space(rd_dev);
345 call_rcu(&dev->rcu_head, rd_dev_call_rcu);
346} 350}
347 351
348static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 352static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
@@ -554,7 +558,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
554 struct rd_dev *rd_dev = RD_DEV(dev); 558 struct rd_dev *rd_dev = RD_DEV(dev);
555 char *orig, *ptr, *opts; 559 char *orig, *ptr, *opts;
556 substring_t args[MAX_OPT_ARGS]; 560 substring_t args[MAX_OPT_ARGS];
557 int ret = 0, arg, token; 561 int arg, token;
558 562
559 opts = kstrdup(page, GFP_KERNEL); 563 opts = kstrdup(page, GFP_KERNEL);
560 if (!opts) 564 if (!opts)
@@ -589,7 +593,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
589 } 593 }
590 594
591 kfree(orig); 595 kfree(orig);
592 return (!ret) ? count : ret; 596 return count;
593} 597}
594 598
595static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) 599static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
@@ -651,6 +655,7 @@ static const struct target_backend_ops rd_mcp_ops = {
651 .detach_hba = rd_detach_hba, 655 .detach_hba = rd_detach_hba,
652 .alloc_device = rd_alloc_device, 656 .alloc_device = rd_alloc_device,
653 .configure_device = rd_configure_device, 657 .configure_device = rd_configure_device,
658 .destroy_device = rd_destroy_device,
654 .free_device = rd_free_device, 659 .free_device = rd_free_device,
655 .parse_cdb = rd_parse_cdb, 660 .parse_cdb = rd_parse_cdb,
656 .set_configfs_dev_params = rd_set_configfs_dev_params, 661 .set_configfs_dev_params = rd_set_configfs_dev_params,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index dc9456e7dac9..750a04ed0e93 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -71,14 +71,8 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
71 else 71 else
72 blocks = (u32)blocks_long; 72 blocks = (u32)blocks_long;
73 73
74 buf[0] = (blocks >> 24) & 0xff; 74 put_unaligned_be32(blocks, &buf[0]);
75 buf[1] = (blocks >> 16) & 0xff; 75 put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
76 buf[2] = (blocks >> 8) & 0xff;
77 buf[3] = blocks & 0xff;
78 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
79 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
80 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
81 buf[7] = dev->dev_attrib.block_size & 0xff;
82 76
83 rbuf = transport_kmap_data_sg(cmd); 77 rbuf = transport_kmap_data_sg(cmd);
84 if (rbuf) { 78 if (rbuf) {
@@ -102,18 +96,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
102 unsigned long long blocks = dev->transport->get_blocks(dev); 96 unsigned long long blocks = dev->transport->get_blocks(dev);
103 97
104 memset(buf, 0, sizeof(buf)); 98 memset(buf, 0, sizeof(buf));
105 buf[0] = (blocks >> 56) & 0xff; 99 put_unaligned_be64(blocks, &buf[0]);
106 buf[1] = (blocks >> 48) & 0xff; 100 put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
107 buf[2] = (blocks >> 40) & 0xff;
108 buf[3] = (blocks >> 32) & 0xff;
109 buf[4] = (blocks >> 24) & 0xff;
110 buf[5] = (blocks >> 16) & 0xff;
111 buf[6] = (blocks >> 8) & 0xff;
112 buf[7] = blocks & 0xff;
113 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
114 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
115 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
116 buf[11] = dev->dev_attrib.block_size & 0xff;
117 /* 101 /*
118 * Set P_TYPE and PROT_EN bits for DIF support 102 * Set P_TYPE and PROT_EN bits for DIF support
119 */ 103 */
@@ -134,8 +118,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
134 118
135 if (dev->transport->get_alignment_offset_lbas) { 119 if (dev->transport->get_alignment_offset_lbas) {
136 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 120 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
137 buf[14] = (lalba >> 8) & 0x3f; 121
138 buf[15] = lalba & 0xff; 122 put_unaligned_be16(lalba, &buf[14]);
139 } 123 }
140 124
141 /* 125 /*
@@ -262,18 +246,17 @@ static inline u32 transport_get_sectors_6(unsigned char *cdb)
262 246
263static inline u32 transport_get_sectors_10(unsigned char *cdb) 247static inline u32 transport_get_sectors_10(unsigned char *cdb)
264{ 248{
265 return (u32)(cdb[7] << 8) + cdb[8]; 249 return get_unaligned_be16(&cdb[7]);
266} 250}
267 251
268static inline u32 transport_get_sectors_12(unsigned char *cdb) 252static inline u32 transport_get_sectors_12(unsigned char *cdb)
269{ 253{
270 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 254 return get_unaligned_be32(&cdb[6]);
271} 255}
272 256
273static inline u32 transport_get_sectors_16(unsigned char *cdb) 257static inline u32 transport_get_sectors_16(unsigned char *cdb)
274{ 258{
275 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 259 return get_unaligned_be32(&cdb[10]);
276 (cdb[12] << 8) + cdb[13];
277} 260}
278 261
279/* 262/*
@@ -281,29 +264,23 @@ static inline u32 transport_get_sectors_16(unsigned char *cdb)
281 */ 264 */
282static inline u32 transport_get_sectors_32(unsigned char *cdb) 265static inline u32 transport_get_sectors_32(unsigned char *cdb)
283{ 266{
284 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 267 return get_unaligned_be32(&cdb[28]);
285 (cdb[30] << 8) + cdb[31];
286 268
287} 269}
288 270
289static inline u32 transport_lba_21(unsigned char *cdb) 271static inline u32 transport_lba_21(unsigned char *cdb)
290{ 272{
291 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 273 return get_unaligned_be24(&cdb[1]) & 0x1fffff;
292} 274}
293 275
294static inline u32 transport_lba_32(unsigned char *cdb) 276static inline u32 transport_lba_32(unsigned char *cdb)
295{ 277{
296 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 278 return get_unaligned_be32(&cdb[2]);
297} 279}
298 280
299static inline unsigned long long transport_lba_64(unsigned char *cdb) 281static inline unsigned long long transport_lba_64(unsigned char *cdb)
300{ 282{
301 unsigned int __v1, __v2; 283 return get_unaligned_be64(&cdb[2]);
302
303 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
304 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
305
306 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
307} 284}
308 285
309/* 286/*
@@ -311,12 +288,7 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
311 */ 288 */
312static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 289static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
313{ 290{
314 unsigned int __v1, __v2; 291 return get_unaligned_be64(&cdb[12]);
315
316 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
317 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
318
319 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
320} 292}
321 293
322static sense_reason_t 294static sense_reason_t
@@ -1005,6 +977,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1005 break; 977 break;
1006 } 978 }
1007 case COMPARE_AND_WRITE: 979 case COMPARE_AND_WRITE:
980 if (!dev->dev_attrib.emulate_caw) {
981 pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
982 " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
983 dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
984 return TCM_UNSUPPORTED_SCSI_OPCODE;
985 }
1008 sectors = cdb[13]; 986 sectors = cdb[13];
1009 /* 987 /*
1010 * Currently enforce COMPARE_AND_WRITE for a single sector 988 * Currently enforce COMPARE_AND_WRITE for a single sector
@@ -1045,8 +1023,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1045 cmd->t_task_cdb[1] & 0x1f); 1023 cmd->t_task_cdb[1] & 0x1f);
1046 return TCM_INVALID_CDB_FIELD; 1024 return TCM_INVALID_CDB_FIELD;
1047 } 1025 }
1048 size = (cdb[10] << 24) | (cdb[11] << 16) | 1026 size = get_unaligned_be32(&cdb[10]);
1049 (cdb[12] << 8) | cdb[13];
1050 break; 1027 break;
1051 case SYNCHRONIZE_CACHE: 1028 case SYNCHRONIZE_CACHE:
1052 case SYNCHRONIZE_CACHE_16: 1029 case SYNCHRONIZE_CACHE_16:
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 2a91ed3ef380..cb0461a10808 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -287,8 +287,8 @@ check_t10_vend_desc:
287 /* Skip over Obsolete field in RTPI payload 287 /* Skip over Obsolete field in RTPI payload
288 * in Table 472 */ 288 * in Table 472 */
289 off += 2; 289 off += 2;
290 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 290 put_unaligned_be16(lun->lun_rtpi, &buf[off]);
291 buf[off++] = (lun->lun_rtpi & 0xff); 291 off += 2;
292 len += 8; /* Header size + Designation descriptor */ 292 len += 8; /* Header size + Designation descriptor */
293 /* 293 /*
294 * Target port group identifier, see spc4r17 294 * Target port group identifier, see spc4r17
@@ -316,8 +316,8 @@ check_t10_vend_desc:
316 off++; /* Skip over Reserved */ 316 off++; /* Skip over Reserved */
317 buf[off++] = 4; /* DESIGNATOR LENGTH */ 317 buf[off++] = 4; /* DESIGNATOR LENGTH */
318 off += 2; /* Skip over Reserved Field */ 318 off += 2; /* Skip over Reserved Field */
319 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 319 put_unaligned_be16(tg_pt_gp_id, &buf[off]);
320 buf[off++] = (tg_pt_gp_id & 0xff); 320 off += 2;
321 len += 8; /* Header size + Designation descriptor */ 321 len += 8; /* Header size + Designation descriptor */
322 /* 322 /*
323 * Logical Unit Group identifier, see spc4r17 323 * Logical Unit Group identifier, see spc4r17
@@ -343,8 +343,8 @@ check_lu_gp:
343 off++; /* Skip over Reserved */ 343 off++; /* Skip over Reserved */
344 buf[off++] = 4; /* DESIGNATOR LENGTH */ 344 buf[off++] = 4; /* DESIGNATOR LENGTH */
345 off += 2; /* Skip over Reserved Field */ 345 off += 2; /* Skip over Reserved Field */
346 buf[off++] = ((lu_gp_id >> 8) & 0xff); 346 put_unaligned_be16(lu_gp_id, &buf[off]);
347 buf[off++] = (lu_gp_id & 0xff); 347 off += 2;
348 len += 8; /* Header size + Designation descriptor */ 348 len += 8; /* Header size + Designation descriptor */
349 /* 349 /*
350 * SCSI name string designator, see spc4r17 350 * SCSI name string designator, see spc4r17
@@ -431,8 +431,7 @@ check_scsi_name:
431 /* Header size + Designation descriptor */ 431 /* Header size + Designation descriptor */
432 len += (scsi_target_len + 4); 432 len += (scsi_target_len + 4);
433 } 433 }
434 buf[2] = ((len >> 8) & 0xff); 434 put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
435 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
436 return 0; 435 return 0;
437} 436}
438EXPORT_SYMBOL(spc_emulate_evpd_83); 437EXPORT_SYMBOL(spc_emulate_evpd_83);
@@ -1288,7 +1287,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1288 cmd->execute_cmd = spc_emulate_modeselect; 1287 cmd->execute_cmd = spc_emulate_modeselect;
1289 break; 1288 break;
1290 case MODE_SELECT_10: 1289 case MODE_SELECT_10:
1291 *size = (cdb[7] << 8) + cdb[8]; 1290 *size = get_unaligned_be16(&cdb[7]);
1292 cmd->execute_cmd = spc_emulate_modeselect; 1291 cmd->execute_cmd = spc_emulate_modeselect;
1293 break; 1292 break;
1294 case MODE_SENSE: 1293 case MODE_SENSE:
@@ -1296,25 +1295,25 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1296 cmd->execute_cmd = spc_emulate_modesense; 1295 cmd->execute_cmd = spc_emulate_modesense;
1297 break; 1296 break;
1298 case MODE_SENSE_10: 1297 case MODE_SENSE_10:
1299 *size = (cdb[7] << 8) + cdb[8]; 1298 *size = get_unaligned_be16(&cdb[7]);
1300 cmd->execute_cmd = spc_emulate_modesense; 1299 cmd->execute_cmd = spc_emulate_modesense;
1301 break; 1300 break;
1302 case LOG_SELECT: 1301 case LOG_SELECT:
1303 case LOG_SENSE: 1302 case LOG_SENSE:
1304 *size = (cdb[7] << 8) + cdb[8]; 1303 *size = get_unaligned_be16(&cdb[7]);
1305 break; 1304 break;
1306 case PERSISTENT_RESERVE_IN: 1305 case PERSISTENT_RESERVE_IN:
1307 *size = (cdb[7] << 8) + cdb[8]; 1306 *size = get_unaligned_be16(&cdb[7]);
1308 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1307 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1309 break; 1308 break;
1310 case PERSISTENT_RESERVE_OUT: 1309 case PERSISTENT_RESERVE_OUT:
1311 *size = (cdb[7] << 8) + cdb[8]; 1310 *size = get_unaligned_be32(&cdb[5]);
1312 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1311 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1313 break; 1312 break;
1314 case RELEASE: 1313 case RELEASE:
1315 case RELEASE_10: 1314 case RELEASE_10:
1316 if (cdb[0] == RELEASE_10) 1315 if (cdb[0] == RELEASE_10)
1317 *size = (cdb[7] << 8) | cdb[8]; 1316 *size = get_unaligned_be16(&cdb[7]);
1318 else 1317 else
1319 *size = cmd->data_length; 1318 *size = cmd->data_length;
1320 1319
@@ -1327,7 +1326,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1327 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1326 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1328 */ 1327 */
1329 if (cdb[0] == RESERVE_10) 1328 if (cdb[0] == RESERVE_10)
1330 *size = (cdb[7] << 8) | cdb[8]; 1329 *size = get_unaligned_be16(&cdb[7]);
1331 else 1330 else
1332 *size = cmd->data_length; 1331 *size = cmd->data_length;
1333 1332
@@ -1338,7 +1337,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1338 cmd->execute_cmd = spc_emulate_request_sense; 1337 cmd->execute_cmd = spc_emulate_request_sense;
1339 break; 1338 break;
1340 case INQUIRY: 1339 case INQUIRY:
1341 *size = (cdb[3] << 8) + cdb[4]; 1340 *size = get_unaligned_be16(&cdb[3]);
1342 1341
1343 /* 1342 /*
1344 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1343 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
@@ -1349,7 +1348,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1349 break; 1348 break;
1350 case SECURITY_PROTOCOL_IN: 1349 case SECURITY_PROTOCOL_IN:
1351 case SECURITY_PROTOCOL_OUT: 1350 case SECURITY_PROTOCOL_OUT:
1352 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1351 *size = get_unaligned_be32(&cdb[6]);
1353 break; 1352 break;
1354 case EXTENDED_COPY: 1353 case EXTENDED_COPY:
1355 *size = get_unaligned_be32(&cdb[10]); 1354 *size = get_unaligned_be32(&cdb[10]);
@@ -1361,19 +1360,18 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1361 break; 1360 break;
1362 case READ_ATTRIBUTE: 1361 case READ_ATTRIBUTE:
1363 case WRITE_ATTRIBUTE: 1362 case WRITE_ATTRIBUTE:
1364 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1363 *size = get_unaligned_be32(&cdb[10]);
1365 (cdb[12] << 8) | cdb[13];
1366 break; 1364 break;
1367 case RECEIVE_DIAGNOSTIC: 1365 case RECEIVE_DIAGNOSTIC:
1368 case SEND_DIAGNOSTIC: 1366 case SEND_DIAGNOSTIC:
1369 *size = (cdb[3] << 8) | cdb[4]; 1367 *size = get_unaligned_be16(&cdb[3]);
1370 break; 1368 break;
1371 case WRITE_BUFFER: 1369 case WRITE_BUFFER:
1372 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1370 *size = get_unaligned_be24(&cdb[6]);
1373 break; 1371 break;
1374 case REPORT_LUNS: 1372 case REPORT_LUNS:
1375 cmd->execute_cmd = spc_emulate_report_luns; 1373 cmd->execute_cmd = spc_emulate_report_luns;
1376 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1374 *size = get_unaligned_be32(&cdb[6]);
1377 /* 1375 /*
1378 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1376 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1379 * See spc4r17 section 5.3 1377 * See spc4r17 section 5.3
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 13f47bf4d16b..e22847bd79b9 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -355,20 +355,10 @@ static void core_tmr_drain_state_list(
355 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); 355 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
356 list_del_init(&cmd->state_list); 356 list_del_init(&cmd->state_list);
357 357
358 pr_debug("LUN_RESET: %s cmd: %p" 358 target_show_cmd("LUN_RESET: ", cmd);
359 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" 359 pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
360 "cdb: 0x%02x\n", 360 cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
361 (preempt_and_abort_list) ? "Preempt" : "", cmd, 361 cmd->pr_res_key);
362 cmd->tag, 0,
363 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
364 cmd->t_task_cdb[0]);
365 pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
366 " -- CMD_T_ACTIVE: %d"
367 " CMD_T_STOP: %d CMD_T_SENT: %d\n",
368 cmd->tag, cmd->pr_res_key,
369 (cmd->transport_state & CMD_T_ACTIVE) != 0,
370 (cmd->transport_state & CMD_T_STOP) != 0,
371 (cmd->transport_state & CMD_T_SENT) != 0);
372 362
373 /* 363 /*
374 * If the command may be queued onto a workqueue cancel it now. 364 * If the command may be queued onto a workqueue cancel it now.
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 310d9e55c6eb..36913734c6bc 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -576,7 +576,6 @@ struct se_lun *core_tpg_alloc_lun(
576 return ERR_PTR(-ENOMEM); 576 return ERR_PTR(-ENOMEM);
577 } 577 }
578 lun->unpacked_lun = unpacked_lun; 578 lun->unpacked_lun = unpacked_lun;
579 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
580 atomic_set(&lun->lun_acl_count, 0); 579 atomic_set(&lun->lun_acl_count, 0);
581 init_completion(&lun->lun_ref_comp); 580 init_completion(&lun->lun_ref_comp);
582 init_completion(&lun->lun_shutdown_comp); 581 init_completion(&lun->lun_shutdown_comp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 1bdc10651bcd..97fed9a298bd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -704,23 +704,43 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
704 return cmd->sense_buffer; 704 return cmd->sense_buffer;
705} 705}
706 706
707void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
708{
709 unsigned char *cmd_sense_buf;
710 unsigned long flags;
711
712 spin_lock_irqsave(&cmd->t_state_lock, flags);
713 cmd_sense_buf = transport_get_sense_buffer(cmd);
714 if (!cmd_sense_buf) {
715 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
716 return;
717 }
718
719 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
720 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
721 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
722}
723EXPORT_SYMBOL(transport_copy_sense_to_cmd);
724
707void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 725void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
708{ 726{
709 struct se_device *dev = cmd->se_dev; 727 struct se_device *dev = cmd->se_dev;
710 int success = scsi_status == GOOD; 728 int success;
711 unsigned long flags; 729 unsigned long flags;
712 730
713 cmd->scsi_status = scsi_status; 731 cmd->scsi_status = scsi_status;
714 732
715
716 spin_lock_irqsave(&cmd->t_state_lock, flags); 733 spin_lock_irqsave(&cmd->t_state_lock, flags);
717 734 switch (cmd->scsi_status) {
718 if (dev && dev->transport->transport_complete) { 735 case SAM_STAT_CHECK_CONDITION:
719 dev->transport->transport_complete(cmd,
720 cmd->t_data_sg,
721 transport_get_sense_buffer(cmd));
722 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 736 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
723 success = 1; 737 success = 1;
738 else
739 success = 0;
740 break;
741 default:
742 success = 1;
743 break;
724 } 744 }
725 745
726 /* 746 /*
@@ -730,6 +750,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
730 if (cmd->transport_state & CMD_T_ABORTED || 750 if (cmd->transport_state & CMD_T_ABORTED ||
731 cmd->transport_state & CMD_T_STOP) { 751 cmd->transport_state & CMD_T_STOP) {
732 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 752 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
753 /*
754 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
755 * release se_device->caw_sem obtained by sbc_compare_and_write()
756 * since target_complete_ok_work() or target_complete_failure_work()
757 * won't be called to invoke the normal CAW completion callbacks.
758 */
759 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
760 up(&dev->caw_sem);
761 }
733 complete_all(&cmd->t_transport_stop_comp); 762 complete_all(&cmd->t_transport_stop_comp);
734 return; 763 return;
735 } else if (!success) { 764 } else if (!success) {
@@ -1239,6 +1268,7 @@ void transport_init_se_cmd(
1239 init_completion(&cmd->t_transport_stop_comp); 1268 init_completion(&cmd->t_transport_stop_comp);
1240 init_completion(&cmd->cmd_wait_comp); 1269 init_completion(&cmd->cmd_wait_comp);
1241 spin_lock_init(&cmd->t_state_lock); 1270 spin_lock_init(&cmd->t_state_lock);
1271 INIT_WORK(&cmd->work, NULL);
1242 kref_init(&cmd->cmd_kref); 1272 kref_init(&cmd->cmd_kref);
1243 1273
1244 cmd->se_tfo = tfo; 1274 cmd->se_tfo = tfo;
@@ -1590,9 +1620,33 @@ static void target_complete_tmr_failure(struct work_struct *work)
1590 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1620 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1591 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1621 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1592 1622
1623 transport_lun_remove_cmd(se_cmd);
1593 transport_cmd_check_stop_to_fabric(se_cmd); 1624 transport_cmd_check_stop_to_fabric(se_cmd);
1594} 1625}
1595 1626
1627static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1628 u64 *unpacked_lun)
1629{
1630 struct se_cmd *se_cmd;
1631 unsigned long flags;
1632 bool ret = false;
1633
1634 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1635 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1636 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1637 continue;
1638
1639 if (se_cmd->tag == tag) {
1640 *unpacked_lun = se_cmd->orig_fe_lun;
1641 ret = true;
1642 break;
1643 }
1644 }
1645 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1646
1647 return ret;
1648}
1649
1596/** 1650/**
1597 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1651 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1598 * for TMR CDBs 1652 * for TMR CDBs
@@ -1640,19 +1694,31 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1640 core_tmr_release_req(se_cmd->se_tmr_req); 1694 core_tmr_release_req(se_cmd->se_tmr_req);
1641 return ret; 1695 return ret;
1642 } 1696 }
1697 /*
1698 * If this is ABORT_TASK with no explicit fabric provided LUN,
1699 * go ahead and search active session tags for a match to figure
1700 * out unpacked_lun for the original se_cmd.
1701 */
1702 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1703 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1704 goto failure;
1705 }
1643 1706
1644 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1707 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1645 if (ret) { 1708 if (ret)
1646 /* 1709 goto failure;
1647 * For callback during failure handling, push this work off 1710
1648 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1649 */
1650 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1651 schedule_work(&se_cmd->work);
1652 return 0;
1653 }
1654 transport_generic_handle_tmr(se_cmd); 1711 transport_generic_handle_tmr(se_cmd);
1655 return 0; 1712 return 0;
1713
1714 /*
1715 * For callback during failure handling, push this work off
1716 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1717 */
1718failure:
1719 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1720 schedule_work(&se_cmd->work);
1721 return 0;
1656} 1722}
1657EXPORT_SYMBOL(target_submit_tmr); 1723EXPORT_SYMBOL(target_submit_tmr);
1658 1724
@@ -1667,15 +1733,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1667 if (transport_check_aborted_status(cmd, 1)) 1733 if (transport_check_aborted_status(cmd, 1))
1668 return; 1734 return;
1669 1735
1670 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1736 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1671 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1737 sense_reason);
1672 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1738 target_show_cmd("-----[ ", cmd);
1673 cmd->se_tfo->get_cmd_state(cmd),
1674 cmd->t_state, sense_reason);
1675 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1676 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1677 (cmd->transport_state & CMD_T_STOP) != 0,
1678 (cmd->transport_state & CMD_T_SENT) != 0);
1679 1739
1680 /* 1740 /*
1681 * For SAM Task Attribute emulation for failed struct se_cmd 1741 * For SAM Task Attribute emulation for failed struct se_cmd
@@ -2668,6 +2728,108 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2668} 2728}
2669EXPORT_SYMBOL(target_put_sess_cmd); 2729EXPORT_SYMBOL(target_put_sess_cmd);
2670 2730
2731static const char *data_dir_name(enum dma_data_direction d)
2732{
2733 switch (d) {
2734 case DMA_BIDIRECTIONAL: return "BIDI";
2735 case DMA_TO_DEVICE: return "WRITE";
2736 case DMA_FROM_DEVICE: return "READ";
2737 case DMA_NONE: return "NONE";
2738 }
2739
2740 return "(?)";
2741}
2742
2743static const char *cmd_state_name(enum transport_state_table t)
2744{
2745 switch (t) {
2746 case TRANSPORT_NO_STATE: return "NO_STATE";
2747 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2748 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2749 case TRANSPORT_PROCESSING: return "PROCESSING";
2750 case TRANSPORT_COMPLETE: return "COMPLETE";
2751 case TRANSPORT_ISTATE_PROCESSING:
2752 return "ISTATE_PROCESSING";
2753 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2754 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2755 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2756 }
2757
2758 return "(?)";
2759}
2760
2761static void target_append_str(char **str, const char *txt)
2762{
2763 char *prev = *str;
2764
2765 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2766 kstrdup(txt, GFP_ATOMIC);
2767 kfree(prev);
2768}
2769
2770/*
2771 * Convert a transport state bitmask into a string. The caller is
2772 * responsible for freeing the returned pointer.
2773 */
2774static char *target_ts_to_str(u32 ts)
2775{
2776 char *str = NULL;
2777
2778 if (ts & CMD_T_ABORTED)
2779 target_append_str(&str, "aborted");
2780 if (ts & CMD_T_ACTIVE)
2781 target_append_str(&str, "active");
2782 if (ts & CMD_T_COMPLETE)
2783 target_append_str(&str, "complete");
2784 if (ts & CMD_T_SENT)
2785 target_append_str(&str, "sent");
2786 if (ts & CMD_T_STOP)
2787 target_append_str(&str, "stop");
2788 if (ts & CMD_T_FABRIC_STOP)
2789 target_append_str(&str, "fabric_stop");
2790
2791 return str;
2792}
2793
2794static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2795{
2796 switch (tmf) {
2797 case TMR_ABORT_TASK: return "ABORT_TASK";
2798 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2799 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2800 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2801 case TMR_LUN_RESET: return "LUN_RESET";
2802 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2803 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2804 case TMR_UNKNOWN: break;
2805 }
2806 return "(?)";
2807}
2808
2809void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2810{
2811 char *ts_str = target_ts_to_str(cmd->transport_state);
2812 const u8 *cdb = cmd->t_task_cdb;
2813 struct se_tmr_req *tmf = cmd->se_tmr_req;
2814
2815 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2816 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2817 pfx, cdb[0], cdb[1], cmd->tag,
2818 data_dir_name(cmd->data_direction),
2819 cmd->se_tfo->get_cmd_state(cmd),
2820 cmd_state_name(cmd->t_state), cmd->data_length,
2821 kref_read(&cmd->cmd_kref), ts_str);
2822 } else {
2823 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2824 pfx, target_tmf_name(tmf->function), cmd->tag,
2825 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2826 cmd_state_name(cmd->t_state),
2827 kref_read(&cmd->cmd_kref), ts_str);
2828 }
2829 kfree(ts_str);
2830}
2831EXPORT_SYMBOL(target_show_cmd);
2832
2671/* target_sess_cmd_list_set_waiting - Flag all commands in 2833/* target_sess_cmd_list_set_waiting - Flag all commands in
2672 * sess_cmd_list to complete cmd_wait_comp. Set 2834 * sess_cmd_list to complete cmd_wait_comp. Set
2673 * sess_tearing_down so no more commands are queued. 2835 * sess_tearing_down so no more commands are queued.
@@ -2812,13 +2974,13 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2812 2974
2813 cmd->transport_state |= CMD_T_STOP; 2975 cmd->transport_state |= CMD_T_STOP;
2814 2976
2815 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2977 target_show_cmd("wait_for_tasks: Stopping ", cmd);
2816 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2817 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2818 2978
2819 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2979 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2820 2980
2821 wait_for_completion(&cmd->t_transport_stop_comp); 2981 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
2982 180 * HZ))
2983 target_show_cmd("wait for tasks: ", cmd);
2822 2984
2823 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2985 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2824 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2986 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
@@ -3201,6 +3363,7 @@ static void target_tmr_work(struct work_struct *work)
3201 cmd->se_tfo->queue_tm_rsp(cmd); 3363 cmd->se_tfo->queue_tm_rsp(cmd);
3202 3364
3203check_stop: 3365check_stop:
3366 transport_lun_remove_cmd(cmd);
3204 transport_cmd_check_stop_to_fabric(cmd); 3367 transport_cmd_check_stop_to_fabric(cmd);
3205} 3368}
3206 3369
@@ -3223,6 +3386,7 @@ int transport_generic_handle_tmr(
3223 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3386 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3224 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3387 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3225 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3388 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3389 transport_lun_remove_cmd(cmd);
3226 transport_cmd_check_stop_to_fabric(cmd); 3390 transport_cmd_check_stop_to_fabric(cmd);
3227 return 0; 3391 return 0;
3228 } 3392 }
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index beb5f098f32d..80ee130f8253 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -87,6 +87,8 @@
87/* Default maximum of the global data blocks(512K * PAGE_SIZE) */ 87/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
88#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) 88#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
89 89
90static u8 tcmu_kern_cmd_reply_supported;
91
90static struct device *tcmu_root_device; 92static struct device *tcmu_root_device;
91 93
92struct tcmu_hba { 94struct tcmu_hba {
@@ -95,6 +97,13 @@ struct tcmu_hba {
95 97
96#define TCMU_CONFIG_LEN 256 98#define TCMU_CONFIG_LEN 256
97 99
100struct tcmu_nl_cmd {
101 /* wake up thread waiting for reply */
102 struct completion complete;
103 int cmd;
104 int status;
105};
106
98struct tcmu_dev { 107struct tcmu_dev {
99 struct list_head node; 108 struct list_head node;
100 struct kref kref; 109 struct kref kref;
@@ -135,6 +144,11 @@ struct tcmu_dev {
135 struct timer_list timeout; 144 struct timer_list timeout;
136 unsigned int cmd_time_out; 145 unsigned int cmd_time_out;
137 146
147 spinlock_t nl_cmd_lock;
148 struct tcmu_nl_cmd curr_nl_cmd;
149 /* wake up threads waiting on curr_nl_cmd */
150 wait_queue_head_t nl_cmd_wq;
151
138 char dev_config[TCMU_CONFIG_LEN]; 152 char dev_config[TCMU_CONFIG_LEN];
139}; 153};
140 154
@@ -178,16 +192,128 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
178 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 192 [TCMU_MCGRP_CONFIG] = { .name = "config", },
179}; 193};
180 194
195static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
196 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
197 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
198 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
199 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
200 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
201};
202
203static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
204{
205 struct se_device *dev;
206 struct tcmu_dev *udev;
207 struct tcmu_nl_cmd *nl_cmd;
208 int dev_id, rc, ret = 0;
209 bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
210
211 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
212 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
213 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
214 return -EINVAL;
215 }
216
217 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
218 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
219
220 dev = target_find_device(dev_id, !is_removed);
221 if (!dev) {
222 printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
223 completed_cmd, rc, dev_id);
224 return -ENODEV;
225 }
226 udev = TCMU_DEV(dev);
227
228 spin_lock(&udev->nl_cmd_lock);
229 nl_cmd = &udev->curr_nl_cmd;
230
231 pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
232 nl_cmd->cmd, completed_cmd, rc);
233
234 if (nl_cmd->cmd != completed_cmd) {
235 printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
236 completed_cmd, nl_cmd->cmd);
237 ret = -EINVAL;
238 } else {
239 nl_cmd->status = rc;
240 }
241
242 spin_unlock(&udev->nl_cmd_lock);
243 if (!is_removed)
244 target_undepend_item(&dev->dev_group.cg_item);
245 if (!ret)
246 complete(&nl_cmd->complete);
247 return ret;
248}
249
250static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
251{
252 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
253}
254
255static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
256{
257 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
258}
259
260static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
261 struct genl_info *info)
262{
263 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
264}
265
266static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
267{
268 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
269 tcmu_kern_cmd_reply_supported =
270 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
271 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
272 tcmu_kern_cmd_reply_supported);
273 }
274
275 return 0;
276}
277
278static const struct genl_ops tcmu_genl_ops[] = {
279 {
280 .cmd = TCMU_CMD_SET_FEATURES,
281 .flags = GENL_ADMIN_PERM,
282 .policy = tcmu_attr_policy,
283 .doit = tcmu_genl_set_features,
284 },
285 {
286 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
287 .flags = GENL_ADMIN_PERM,
288 .policy = tcmu_attr_policy,
289 .doit = tcmu_genl_add_dev_done,
290 },
291 {
292 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
293 .flags = GENL_ADMIN_PERM,
294 .policy = tcmu_attr_policy,
295 .doit = tcmu_genl_rm_dev_done,
296 },
297 {
298 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
299 .flags = GENL_ADMIN_PERM,
300 .policy = tcmu_attr_policy,
301 .doit = tcmu_genl_reconfig_dev_done,
302 },
303};
304
181/* Our generic netlink family */ 305/* Our generic netlink family */
182static struct genl_family tcmu_genl_family __ro_after_init = { 306static struct genl_family tcmu_genl_family __ro_after_init = {
183 .module = THIS_MODULE, 307 .module = THIS_MODULE,
184 .hdrsize = 0, 308 .hdrsize = 0,
185 .name = "TCM-USER", 309 .name = "TCM-USER",
186 .version = 1, 310 .version = 2,
187 .maxattr = TCMU_ATTR_MAX, 311 .maxattr = TCMU_ATTR_MAX,
188 .mcgrps = tcmu_mcgrps, 312 .mcgrps = tcmu_mcgrps,
189 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 313 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
190 .netnsok = true, 314 .netnsok = true,
315 .ops = tcmu_genl_ops,
316 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
191}; 317};
192 318
193#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 319#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
@@ -216,7 +342,6 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
216 342
217 page = radix_tree_lookup(&udev->data_blocks, dbi); 343 page = radix_tree_lookup(&udev->data_blocks, dbi);
218 if (!page) { 344 if (!page) {
219
220 if (atomic_add_return(1, &global_db_count) > 345 if (atomic_add_return(1, &global_db_count) >
221 TCMU_GLOBAL_MAX_BLOCKS) { 346 TCMU_GLOBAL_MAX_BLOCKS) {
222 atomic_dec(&global_db_count); 347 atomic_dec(&global_db_count);
@@ -226,14 +351,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
226 /* try to get new page from the mm */ 351 /* try to get new page from the mm */
227 page = alloc_page(GFP_KERNEL); 352 page = alloc_page(GFP_KERNEL);
228 if (!page) 353 if (!page)
229 return false; 354 goto err_alloc;
230 355
231 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 356 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
232 if (ret) { 357 if (ret)
233 __free_page(page); 358 goto err_insert;
234 return false;
235 }
236
237 } 359 }
238 360
239 if (dbi > udev->dbi_max) 361 if (dbi > udev->dbi_max)
@@ -243,6 +365,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
243 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 365 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
244 366
245 return true; 367 return true;
368err_insert:
369 __free_page(page);
370err_alloc:
371 atomic_dec(&global_db_count);
372 return false;
246} 373}
247 374
248static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 375static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
@@ -401,7 +528,7 @@ static inline size_t get_block_offset_user(struct tcmu_dev *dev,
401 DATA_BLOCK_SIZE - remaining; 528 DATA_BLOCK_SIZE - remaining;
402} 529}
403 530
404static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov) 531static inline size_t iov_tail(struct iovec *iov)
405{ 532{
406 return (size_t)iov->iov_base + iov->iov_len; 533 return (size_t)iov->iov_base + iov->iov_len;
407} 534}
@@ -437,10 +564,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
437 to_offset = get_block_offset_user(udev, dbi, 564 to_offset = get_block_offset_user(udev, dbi,
438 block_remaining); 565 block_remaining);
439 offset = DATA_BLOCK_SIZE - block_remaining; 566 offset = DATA_BLOCK_SIZE - block_remaining;
440 to = (void *)(unsigned long)to + offset; 567 to += offset;
441 568
442 if (*iov_cnt != 0 && 569 if (*iov_cnt != 0 &&
443 to_offset == iov_tail(udev, *iov)) { 570 to_offset == iov_tail(*iov)) {
444 (*iov)->iov_len += copy_bytes; 571 (*iov)->iov_len += copy_bytes;
445 } else { 572 } else {
446 new_iov(iov, iov_cnt, udev); 573 new_iov(iov, iov_cnt, udev);
@@ -510,7 +637,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
510 copy_bytes = min_t(size_t, sg_remaining, 637 copy_bytes = min_t(size_t, sg_remaining,
511 block_remaining); 638 block_remaining);
512 offset = DATA_BLOCK_SIZE - block_remaining; 639 offset = DATA_BLOCK_SIZE - block_remaining;
513 from = (void *)(unsigned long)from + offset; 640 from += offset;
514 tcmu_flush_dcache_range(from, copy_bytes); 641 tcmu_flush_dcache_range(from, copy_bytes);
515 memcpy(to + sg->length - sg_remaining, from, 642 memcpy(to + sg->length - sg_remaining, from,
516 copy_bytes); 643 copy_bytes);
@@ -596,10 +723,7 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
596 } 723 }
597 } 724 }
598 725
599 if (!tcmu_get_empty_blocks(udev, cmd)) 726 return tcmu_get_empty_blocks(udev, cmd);
600 return false;
601
602 return true;
603} 727}
604 728
605static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 729static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -699,25 +823,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
699 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 823 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
700 824
701 entry = (void *) mb + CMDR_OFF + cmd_head; 825 entry = (void *) mb + CMDR_OFF + cmd_head;
702 tcmu_flush_dcache_range(entry, sizeof(*entry));
703 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 826 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
704 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 827 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
705 entry->hdr.cmd_id = 0; /* not used for PAD */ 828 entry->hdr.cmd_id = 0; /* not used for PAD */
706 entry->hdr.kflags = 0; 829 entry->hdr.kflags = 0;
707 entry->hdr.uflags = 0; 830 entry->hdr.uflags = 0;
831 tcmu_flush_dcache_range(entry, sizeof(*entry));
708 832
709 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 833 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
834 tcmu_flush_dcache_range(mb, sizeof(*mb));
710 835
711 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 836 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
712 WARN_ON(cmd_head != 0); 837 WARN_ON(cmd_head != 0);
713 } 838 }
714 839
715 entry = (void *) mb + CMDR_OFF + cmd_head; 840 entry = (void *) mb + CMDR_OFF + cmd_head;
716 tcmu_flush_dcache_range(entry, sizeof(*entry)); 841 memset(entry, 0, command_size);
717 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 842 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
718 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 843 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
719 entry->hdr.kflags = 0;
720 entry->hdr.uflags = 0;
721 844
722 /* Handle allocating space from the data area */ 845 /* Handle allocating space from the data area */
723 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 846 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -736,11 +859,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
736 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 859 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
737 } 860 }
738 entry->req.iov_cnt = iov_cnt; 861 entry->req.iov_cnt = iov_cnt;
739 entry->req.iov_dif_cnt = 0;
740 862
741 /* Handle BIDI commands */ 863 /* Handle BIDI commands */
864 iov_cnt = 0;
742 if (se_cmd->se_cmd_flags & SCF_BIDI) { 865 if (se_cmd->se_cmd_flags & SCF_BIDI) {
743 iov_cnt = 0;
744 iov++; 866 iov++;
745 ret = scatter_data_area(udev, tcmu_cmd, 867 ret = scatter_data_area(udev, tcmu_cmd,
746 se_cmd->t_bidi_data_sg, 868 se_cmd->t_bidi_data_sg,
@@ -753,8 +875,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
753 pr_err("tcmu: alloc and scatter bidi data failed\n"); 875 pr_err("tcmu: alloc and scatter bidi data failed\n");
754 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 876 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
755 } 877 }
756 entry->req.iov_bidi_cnt = iov_cnt;
757 } 878 }
879 entry->req.iov_bidi_cnt = iov_cnt;
758 880
759 /* 881 /*
760 * Recalaulate the command's base size and size according 882 * Recalaulate the command's base size and size according
@@ -830,8 +952,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
830 cmd->se_cmd); 952 cmd->se_cmd);
831 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 953 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
832 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 954 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
833 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 955 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
834 se_cmd->scsi_sense_length);
835 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 956 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
836 /* Get Data-In buffer before clean up */ 957 /* Get Data-In buffer before clean up */
837 gather_data_area(udev, cmd, true); 958 gather_data_area(udev, cmd, true);
@@ -989,6 +1110,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
989 setup_timer(&udev->timeout, tcmu_device_timedout, 1110 setup_timer(&udev->timeout, tcmu_device_timedout,
990 (unsigned long)udev); 1111 (unsigned long)udev);
991 1112
1113 init_waitqueue_head(&udev->nl_cmd_wq);
1114 spin_lock_init(&udev->nl_cmd_lock);
1115
992 return &udev->se_dev; 1116 return &udev->se_dev;
993} 1117}
994 1118
@@ -1140,6 +1264,7 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
1140 return -EBUSY; 1264 return -EBUSY;
1141 1265
1142 udev->inode = inode; 1266 udev->inode = inode;
1267 kref_get(&udev->kref);
1143 1268
1144 pr_debug("open\n"); 1269 pr_debug("open\n");
1145 1270
@@ -1171,12 +1296,59 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
1171 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1296 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1172 1297
1173 pr_debug("close\n"); 1298 pr_debug("close\n");
1174 /* release ref from configure */ 1299 /* release ref from open */
1175 kref_put(&udev->kref, tcmu_dev_kref_release); 1300 kref_put(&udev->kref, tcmu_dev_kref_release);
1176 return 0; 1301 return 0;
1177} 1302}
1178 1303
1179static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) 1304static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1305{
1306 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1307
1308 if (!tcmu_kern_cmd_reply_supported)
1309 return;
1310relock:
1311 spin_lock(&udev->nl_cmd_lock);
1312
1313 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1314 spin_unlock(&udev->nl_cmd_lock);
1315 pr_debug("sleeping for open nl cmd\n");
1316 wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
1317 goto relock;
1318 }
1319
1320 memset(nl_cmd, 0, sizeof(*nl_cmd));
1321 nl_cmd->cmd = cmd;
1322 init_completion(&nl_cmd->complete);
1323
1324 spin_unlock(&udev->nl_cmd_lock);
1325}
1326
1327static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1328{
1329 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1330 int ret;
1331 DEFINE_WAIT(__wait);
1332
1333 if (!tcmu_kern_cmd_reply_supported)
1334 return 0;
1335
1336 pr_debug("sleeping for nl reply\n");
1337 wait_for_completion(&nl_cmd->complete);
1338
1339 spin_lock(&udev->nl_cmd_lock);
1340 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1341 ret = nl_cmd->status;
1342 nl_cmd->status = 0;
1343 spin_unlock(&udev->nl_cmd_lock);
1344
1345 wake_up_all(&udev->nl_cmd_wq);
1346
1347 return ret;;
1348}
1349
1350static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
1351 int reconfig_attr, const void *reconfig_data)
1180{ 1352{
1181 struct sk_buff *skb; 1353 struct sk_buff *skb;
1182 void *msg_header; 1354 void *msg_header;
@@ -1190,22 +1362,51 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
1190 if (!msg_header) 1362 if (!msg_header)
1191 goto free_skb; 1363 goto free_skb;
1192 1364
1193 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); 1365 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1366 if (ret < 0)
1367 goto free_skb;
1368
1369 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1194 if (ret < 0) 1370 if (ret < 0)
1195 goto free_skb; 1371 goto free_skb;
1196 1372
1197 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); 1373 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1198 if (ret < 0) 1374 if (ret < 0)
1199 goto free_skb; 1375 goto free_skb;
1200 1376
1377 if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
1378 switch (reconfig_attr) {
1379 case TCMU_ATTR_DEV_CFG:
1380 ret = nla_put_string(skb, reconfig_attr, reconfig_data);
1381 break;
1382 case TCMU_ATTR_DEV_SIZE:
1383 ret = nla_put_u64_64bit(skb, reconfig_attr,
1384 *((u64 *)reconfig_data),
1385 TCMU_ATTR_PAD);
1386 break;
1387 case TCMU_ATTR_WRITECACHE:
1388 ret = nla_put_u8(skb, reconfig_attr,
1389 *((u8 *)reconfig_data));
1390 break;
1391 default:
1392 BUG();
1393 }
1394
1395 if (ret < 0)
1396 goto free_skb;
1397 }
1398
1201 genlmsg_end(skb, msg_header); 1399 genlmsg_end(skb, msg_header);
1202 1400
1401 tcmu_init_genl_cmd_reply(udev, cmd);
1402
1203 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1403 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1204 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1404 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1205
1206 /* We don't care if no one is listening */ 1405 /* We don't care if no one is listening */
1207 if (ret == -ESRCH) 1406 if (ret == -ESRCH)
1208 ret = 0; 1407 ret = 0;
1408 if (!ret)
1409 ret = tcmu_wait_genl_cmd_reply(udev);
1209 1410
1210 return ret; 1411 return ret;
1211free_skb: 1412free_skb:
@@ -1213,19 +1414,14 @@ free_skb:
1213 return ret; 1414 return ret;
1214} 1415}
1215 1416
1216static int tcmu_configure_device(struct se_device *dev) 1417static int tcmu_update_uio_info(struct tcmu_dev *udev)
1217{ 1418{
1218 struct tcmu_dev *udev = TCMU_DEV(dev);
1219 struct tcmu_hba *hba = udev->hba->hba_ptr; 1419 struct tcmu_hba *hba = udev->hba->hba_ptr;
1220 struct uio_info *info; 1420 struct uio_info *info;
1221 struct tcmu_mailbox *mb; 1421 size_t size, used;
1222 size_t size;
1223 size_t used;
1224 int ret = 0;
1225 char *str; 1422 char *str;
1226 1423
1227 info = &udev->uio_info; 1424 info = &udev->uio_info;
1228
1229 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 1425 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1230 udev->dev_config); 1426 udev->dev_config);
1231 size += 1; /* for \0 */ 1427 size += 1; /* for \0 */
@@ -1234,12 +1430,27 @@ static int tcmu_configure_device(struct se_device *dev)
1234 return -ENOMEM; 1430 return -ENOMEM;
1235 1431
1236 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 1432 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1237
1238 if (udev->dev_config[0]) 1433 if (udev->dev_config[0])
1239 snprintf(str + used, size - used, "/%s", udev->dev_config); 1434 snprintf(str + used, size - used, "/%s", udev->dev_config);
1240 1435
1241 info->name = str; 1436 info->name = str;
1242 1437
1438 return 0;
1439}
1440
1441static int tcmu_configure_device(struct se_device *dev)
1442{
1443 struct tcmu_dev *udev = TCMU_DEV(dev);
1444 struct uio_info *info;
1445 struct tcmu_mailbox *mb;
1446 int ret = 0;
1447
1448 ret = tcmu_update_uio_info(udev);
1449 if (ret)
1450 return ret;
1451
1452 info = &udev->uio_info;
1453
1243 udev->mb_addr = vzalloc(CMDR_SIZE); 1454 udev->mb_addr = vzalloc(CMDR_SIZE);
1244 if (!udev->mb_addr) { 1455 if (!udev->mb_addr) {
1245 ret = -ENOMEM; 1456 ret = -ENOMEM;
@@ -1290,6 +1501,8 @@ static int tcmu_configure_device(struct se_device *dev)
1290 /* Other attributes can be configured in userspace */ 1501 /* Other attributes can be configured in userspace */
1291 if (!dev->dev_attrib.hw_max_sectors) 1502 if (!dev->dev_attrib.hw_max_sectors)
1292 dev->dev_attrib.hw_max_sectors = 128; 1503 dev->dev_attrib.hw_max_sectors = 128;
1504 if (!dev->dev_attrib.emulate_write_cache)
1505 dev->dev_attrib.emulate_write_cache = 0;
1293 dev->dev_attrib.hw_queue_depth = 128; 1506 dev->dev_attrib.hw_queue_depth = 128;
1294 1507
1295 /* 1508 /*
@@ -1298,8 +1511,7 @@ static int tcmu_configure_device(struct se_device *dev)
1298 */ 1511 */
1299 kref_get(&udev->kref); 1512 kref_get(&udev->kref);
1300 1513
1301 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1514 ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
1302 udev->uio_info.uio_dev->minor);
1303 if (ret) 1515 if (ret)
1304 goto err_netlink; 1516 goto err_netlink;
1305 1517
@@ -1355,6 +1567,14 @@ static void tcmu_blocks_release(struct tcmu_dev *udev)
1355static void tcmu_free_device(struct se_device *dev) 1567static void tcmu_free_device(struct se_device *dev)
1356{ 1568{
1357 struct tcmu_dev *udev = TCMU_DEV(dev); 1569 struct tcmu_dev *udev = TCMU_DEV(dev);
1570
1571 /* release ref from init */
1572 kref_put(&udev->kref, tcmu_dev_kref_release);
1573}
1574
1575static void tcmu_destroy_device(struct se_device *dev)
1576{
1577 struct tcmu_dev *udev = TCMU_DEV(dev);
1358 struct tcmu_cmd *cmd; 1578 struct tcmu_cmd *cmd;
1359 bool all_expired = true; 1579 bool all_expired = true;
1360 int i; 1580 int i;
@@ -1379,14 +1599,11 @@ static void tcmu_free_device(struct se_device *dev)
1379 1599
1380 tcmu_blocks_release(udev); 1600 tcmu_blocks_release(udev);
1381 1601
1382 if (tcmu_dev_configured(udev)) { 1602 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
1383 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1384 udev->uio_info.uio_dev->minor);
1385 1603
1386 uio_unregister_device(&udev->uio_info); 1604 uio_unregister_device(&udev->uio_info);
1387 }
1388 1605
1389 /* release ref from init */ 1606 /* release ref from configure */
1390 kref_put(&udev->kref, tcmu_dev_kref_release); 1607 kref_put(&udev->kref, tcmu_dev_kref_release);
1391} 1608}
1392 1609
@@ -1546,6 +1763,129 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1546} 1763}
1547CONFIGFS_ATTR(tcmu_, cmd_time_out); 1764CONFIGFS_ATTR(tcmu_, cmd_time_out);
1548 1765
1766static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
1767{
1768 struct se_dev_attrib *da = container_of(to_config_group(item),
1769 struct se_dev_attrib, da_group);
1770 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1771
1772 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
1773}
1774
1775static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
1776 size_t count)
1777{
1778 struct se_dev_attrib *da = container_of(to_config_group(item),
1779 struct se_dev_attrib, da_group);
1780 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1781 int ret, len;
1782
1783 len = strlen(page);
1784 if (!len || len > TCMU_CONFIG_LEN - 1)
1785 return -EINVAL;
1786
1787 /* Check if device has been configured before */
1788 if (tcmu_dev_configured(udev)) {
1789 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1790 TCMU_ATTR_DEV_CFG, page);
1791 if (ret) {
1792 pr_err("Unable to reconfigure device\n");
1793 return ret;
1794 }
1795 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
1796
1797 ret = tcmu_update_uio_info(udev);
1798 if (ret)
1799 return ret;
1800 return count;
1801 }
1802 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
1803
1804 return count;
1805}
1806CONFIGFS_ATTR(tcmu_, dev_config);
1807
1808static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
1809{
1810 struct se_dev_attrib *da = container_of(to_config_group(item),
1811 struct se_dev_attrib, da_group);
1812 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1813
1814 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
1815}
1816
1817static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
1818 size_t count)
1819{
1820 struct se_dev_attrib *da = container_of(to_config_group(item),
1821 struct se_dev_attrib, da_group);
1822 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1823 u64 val;
1824 int ret;
1825
1826 ret = kstrtou64(page, 0, &val);
1827 if (ret < 0)
1828 return ret;
1829
1830 /* Check if device has been configured before */
1831 if (tcmu_dev_configured(udev)) {
1832 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1833 TCMU_ATTR_DEV_SIZE, &val);
1834 if (ret) {
1835 pr_err("Unable to reconfigure device\n");
1836 return ret;
1837 }
1838 }
1839 udev->dev_size = val;
1840 return count;
1841}
1842CONFIGFS_ATTR(tcmu_, dev_size);
1843
1844static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
1845 char *page)
1846{
1847 struct se_dev_attrib *da = container_of(to_config_group(item),
1848 struct se_dev_attrib, da_group);
1849
1850 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
1851}
1852
1853static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
1854 const char *page, size_t count)
1855{
1856 struct se_dev_attrib *da = container_of(to_config_group(item),
1857 struct se_dev_attrib, da_group);
1858 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1859 u8 val;
1860 int ret;
1861
1862 ret = kstrtou8(page, 0, &val);
1863 if (ret < 0)
1864 return ret;
1865
1866 /* Check if device has been configured before */
1867 if (tcmu_dev_configured(udev)) {
1868 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1869 TCMU_ATTR_WRITECACHE, &val);
1870 if (ret) {
1871 pr_err("Unable to reconfigure device\n");
1872 return ret;
1873 }
1874 }
1875
1876 da->emulate_write_cache = val;
1877 return count;
1878}
1879CONFIGFS_ATTR(tcmu_, emulate_write_cache);
1880
1881static struct configfs_attribute *tcmu_attrib_attrs[] = {
1882 &tcmu_attr_cmd_time_out,
1883 &tcmu_attr_dev_config,
1884 &tcmu_attr_dev_size,
1885 &tcmu_attr_emulate_write_cache,
1886 NULL,
1887};
1888
1549static struct configfs_attribute **tcmu_attrs; 1889static struct configfs_attribute **tcmu_attrs;
1550 1890
1551static struct target_backend_ops tcmu_ops = { 1891static struct target_backend_ops tcmu_ops = {
@@ -1556,6 +1896,7 @@ static struct target_backend_ops tcmu_ops = {
1556 .detach_hba = tcmu_detach_hba, 1896 .detach_hba = tcmu_detach_hba,
1557 .alloc_device = tcmu_alloc_device, 1897 .alloc_device = tcmu_alloc_device,
1558 .configure_device = tcmu_configure_device, 1898 .configure_device = tcmu_configure_device,
1899 .destroy_device = tcmu_destroy_device,
1559 .free_device = tcmu_free_device, 1900 .free_device = tcmu_free_device,
1560 .parse_cdb = tcmu_parse_cdb, 1901 .parse_cdb = tcmu_parse_cdb,
1561 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1902 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -1573,7 +1914,7 @@ static int unmap_thread_fn(void *data)
1573 struct page *page; 1914 struct page *page;
1574 int i; 1915 int i;
1575 1916
1576 while (1) { 1917 while (!kthread_should_stop()) {
1577 DEFINE_WAIT(__wait); 1918 DEFINE_WAIT(__wait);
1578 1919
1579 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); 1920 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
@@ -1645,7 +1986,7 @@ static int unmap_thread_fn(void *data)
1645 1986
1646static int __init tcmu_module_init(void) 1987static int __init tcmu_module_init(void)
1647{ 1988{
1648 int ret, i, len = 0; 1989 int ret, i, k, len = 0;
1649 1990
1650 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1991 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1651 1992
@@ -1670,7 +2011,10 @@ static int __init tcmu_module_init(void)
1670 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2011 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1671 len += sizeof(struct configfs_attribute *); 2012 len += sizeof(struct configfs_attribute *);
1672 } 2013 }
1673 len += sizeof(struct configfs_attribute *) * 2; 2014 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2015 len += sizeof(struct configfs_attribute *);
2016 }
2017 len += sizeof(struct configfs_attribute *);
1674 2018
1675 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2019 tcmu_attrs = kzalloc(len, GFP_KERNEL);
1676 if (!tcmu_attrs) { 2020 if (!tcmu_attrs) {
@@ -1681,7 +2025,10 @@ static int __init tcmu_module_init(void)
1681 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2025 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1682 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2026 tcmu_attrs[i] = passthrough_attrib_attrs[i];
1683 } 2027 }
1684 tcmu_attrs[i] = &tcmu_attr_cmd_time_out; 2028 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2029 tcmu_attrs[i] = tcmu_attrib_attrs[k];
2030 i++;
2031 }
1685 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2032 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1686 2033
1687 ret = transport_backend_register(&tcmu_ops); 2034 ret = transport_backend_register(&tcmu_ops);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index cac5a20a4de0..9ee89e00cd77 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,6 +40,8 @@
40 40
41static struct workqueue_struct *xcopy_wq = NULL; 41static struct workqueue_struct *xcopy_wq = NULL;
42 42
43static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
44
43static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 45static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
44{ 46{
45 int off = 0; 47 int off = 0;
@@ -53,48 +55,60 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
53 return 0; 55 return 0;
54} 56}
55 57
56static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, 58struct xcopy_dev_search_info {
57 struct se_device **found_dev) 59 const unsigned char *dev_wwn;
60 struct se_device *found_dev;
61};
62
63static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
64 void *data)
58{ 65{
59 struct se_device *se_dev; 66 struct xcopy_dev_search_info *info = data;
60 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; 67 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
61 int rc; 68 int rc;
62 69
63 mutex_lock(&g_device_mutex); 70 if (!se_dev->dev_attrib.emulate_3pc)
64 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 71 return 0;
65 72
66 if (!se_dev->dev_attrib.emulate_3pc) 73 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
67 continue; 74 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
68 75
69 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 76 rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
70 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 77 if (rc != 0)
78 return 0;
71 79
72 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 80 info->found_dev = se_dev;
73 if (rc != 0) 81 pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
74 continue;
75 82
76 *found_dev = se_dev; 83 rc = target_depend_item(&se_dev->dev_group.cg_item);
77 pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); 84 if (rc != 0) {
85 pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
86 rc, se_dev);
87 return rc;
88 }
78 89
79 rc = target_depend_item(&se_dev->dev_group.cg_item); 90 pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
80 if (rc != 0) { 91 se_dev, &se_dev->dev_group);
81 pr_err("configfs_depend_item attempt failed:" 92 return 1;
82 " %d for se_dev: %p\n", rc, se_dev); 93}
83 mutex_unlock(&g_device_mutex);
84 return rc;
85 }
86 94
87 pr_debug("Called configfs_depend_item for se_dev: %p" 95static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
88 " se_dev->se_dev_group: %p\n", se_dev, 96 struct se_device **found_dev)
89 &se_dev->dev_group); 97{
98 struct xcopy_dev_search_info info;
99 int ret;
100
101 memset(&info, 0, sizeof(info));
102 info.dev_wwn = dev_wwn;
90 103
91 mutex_unlock(&g_device_mutex); 104 ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
105 if (ret == 1) {
106 *found_dev = info.found_dev;
92 return 0; 107 return 0;
108 } else {
109 pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
110 return -EINVAL;
93 } 111 }
94 mutex_unlock(&g_device_mutex);
95
96 pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
97 return -EINVAL;
98} 112}
99 113
100static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 114static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -311,9 +325,7 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
311 (unsigned long long)xop->dst_lba); 325 (unsigned long long)xop->dst_lba);
312 326
313 if (dc != 0) { 327 if (dc != 0) {
314 xop->dbl = (desc[29] & 0xff) << 16; 328 xop->dbl = get_unaligned_be24(&desc[29]);
315 xop->dbl |= (desc[30] & 0xff) << 8;
316 xop->dbl |= desc[31] & 0xff;
317 329
318 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 330 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
319 } 331 }
@@ -781,13 +793,24 @@ static int target_xcopy_write_destination(
781static void target_xcopy_do_work(struct work_struct *work) 793static void target_xcopy_do_work(struct work_struct *work)
782{ 794{
783 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 795 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
784 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
785 struct se_cmd *ec_cmd = xop->xop_se_cmd; 796 struct se_cmd *ec_cmd = xop->xop_se_cmd;
786 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 797 struct se_device *src_dev, *dst_dev;
798 sector_t src_lba, dst_lba, end_lba;
787 unsigned int max_sectors; 799 unsigned int max_sectors;
788 int rc; 800 int rc = 0;
789 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 801 unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
802
803 if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
804 goto err_free;
790 805
806 if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
807 goto err_free;
808
809 src_dev = xop->src_dev;
810 dst_dev = xop->dst_dev;
811 src_lba = xop->src_lba;
812 dst_lba = xop->dst_lba;
813 nolb = xop->nolb;
791 end_lba = src_lba + nolb; 814 end_lba = src_lba + nolb;
792 /* 815 /*
793 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the 816 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
@@ -855,6 +878,8 @@ static void target_xcopy_do_work(struct work_struct *work)
855 878
856out: 879out:
857 xcopy_pt_undepend_remotedev(xop); 880 xcopy_pt_undepend_remotedev(xop);
881
882err_free:
858 kfree(xop); 883 kfree(xop);
859 /* 884 /*
860 * Don't override an error scsi status if it has already been set 885 * Don't override an error scsi status if it has already been set
@@ -867,48 +892,22 @@ out:
867 target_complete_cmd(ec_cmd, ec_cmd->scsi_status); 892 target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
868} 893}
869 894
870sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 895/*
896 * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
897 * fails.
898 */
899static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
871{ 900{
872 struct se_device *dev = se_cmd->se_dev; 901 struct se_cmd *se_cmd = xop->xop_se_cmd;
873 struct xcopy_op *xop = NULL;
874 unsigned char *p = NULL, *seg_desc; 902 unsigned char *p = NULL, *seg_desc;
875 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 903 unsigned int list_id, list_id_usage, sdll, inline_dl;
876 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 904 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
877 int rc; 905 int rc;
878 unsigned short tdll; 906 unsigned short tdll;
879 907
880 if (!dev->dev_attrib.emulate_3pc) {
881 pr_err("EXTENDED_COPY operation explicitly disabled\n");
882 return TCM_UNSUPPORTED_SCSI_OPCODE;
883 }
884
885 sa = se_cmd->t_task_cdb[1] & 0x1f;
886 if (sa != 0x00) {
887 pr_err("EXTENDED_COPY(LID4) not supported\n");
888 return TCM_UNSUPPORTED_SCSI_OPCODE;
889 }
890
891 if (se_cmd->data_length == 0) {
892 target_complete_cmd(se_cmd, SAM_STAT_GOOD);
893 return TCM_NO_SENSE;
894 }
895 if (se_cmd->data_length < XCOPY_HDR_LEN) {
896 pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
897 se_cmd->data_length, XCOPY_HDR_LEN);
898 return TCM_PARAMETER_LIST_LENGTH_ERROR;
899 }
900
901 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
902 if (!xop) {
903 pr_err("Unable to allocate xcopy_op\n");
904 return TCM_OUT_OF_RESOURCES;
905 }
906 xop->xop_se_cmd = se_cmd;
907
908 p = transport_kmap_data_sg(se_cmd); 908 p = transport_kmap_data_sg(se_cmd);
909 if (!p) { 909 if (!p) {
910 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 910 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
911 kfree(xop);
912 return TCM_OUT_OF_RESOURCES; 911 return TCM_OUT_OF_RESOURCES;
913 } 912 }
914 913
@@ -977,18 +976,57 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
977 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 976 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
978 rc * XCOPY_TARGET_DESC_LEN); 977 rc * XCOPY_TARGET_DESC_LEN);
979 transport_kunmap_data_sg(se_cmd); 978 transport_kunmap_data_sg(se_cmd);
980
981 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
982 queue_work(xcopy_wq, &xop->xop_work);
983 return TCM_NO_SENSE; 979 return TCM_NO_SENSE;
984 980
985out: 981out:
986 if (p) 982 if (p)
987 transport_kunmap_data_sg(se_cmd); 983 transport_kunmap_data_sg(se_cmd);
988 kfree(xop);
989 return ret; 984 return ret;
990} 985}
991 986
987sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
988{
989 struct se_device *dev = se_cmd->se_dev;
990 struct xcopy_op *xop;
991 unsigned int sa;
992
993 if (!dev->dev_attrib.emulate_3pc) {
994 pr_err("EXTENDED_COPY operation explicitly disabled\n");
995 return TCM_UNSUPPORTED_SCSI_OPCODE;
996 }
997
998 sa = se_cmd->t_task_cdb[1] & 0x1f;
999 if (sa != 0x00) {
1000 pr_err("EXTENDED_COPY(LID4) not supported\n");
1001 return TCM_UNSUPPORTED_SCSI_OPCODE;
1002 }
1003
1004 if (se_cmd->data_length == 0) {
1005 target_complete_cmd(se_cmd, SAM_STAT_GOOD);
1006 return TCM_NO_SENSE;
1007 }
1008 if (se_cmd->data_length < XCOPY_HDR_LEN) {
1009 pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
1010 se_cmd->data_length, XCOPY_HDR_LEN);
1011 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1012 }
1013
1014 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
1015 if (!xop)
1016 goto err;
1017 xop->xop_se_cmd = se_cmd;
1018 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
1019 if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
1020 goto free;
1021 return TCM_NO_SENSE;
1022
1023free:
1024 kfree(xop);
1025
1026err:
1027 return TCM_OUT_OF_RESOURCES;
1028}
1029
992static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 1030static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
993{ 1031{
994 unsigned char *p; 1032 unsigned char *p;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ff02a942c4d5..046f6d280af5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -496,14 +496,12 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
496 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 496 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
497 vs_event_work); 497 vs_event_work);
498 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 498 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
499 struct vhost_scsi_evt *evt; 499 struct vhost_scsi_evt *evt, *t;
500 struct llist_node *llnode; 500 struct llist_node *llnode;
501 501
502 mutex_lock(&vq->mutex); 502 mutex_lock(&vq->mutex);
503 llnode = llist_del_all(&vs->vs_event_list); 503 llnode = llist_del_all(&vs->vs_event_list);
504 while (llnode) { 504 llist_for_each_entry_safe(evt, t, llnode, list) {
505 evt = llist_entry(llnode, struct vhost_scsi_evt, list);
506 llnode = llist_next(llnode);
507 vhost_scsi_do_evt_work(vs, evt); 505 vhost_scsi_do_evt_work(vs, evt);
508 vhost_scsi_free_evt(vs, evt); 506 vhost_scsi_free_evt(vs, evt);
509 } 507 }
@@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
529 527
530 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 528 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
531 llnode = llist_del_all(&vs->vs_completion_list); 529 llnode = llist_del_all(&vs->vs_completion_list);
532 while (llnode) { 530 llist_for_each_entry(cmd, llnode, tvc_completion_list) {
533 cmd = llist_entry(llnode, struct vhost_scsi_cmd,
534 tvc_completion_list);
535 llnode = llist_next(llnode);
536 se_cmd = &cmd->tvc_se_cmd; 531 se_cmd = &cmd->tvc_se_cmd;
537 532
538 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 533 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d6950e0802b7..7bc88fd43cfc 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -134,11 +134,8 @@ struct vscsibk_pend {
134 struct page *pages[VSCSI_MAX_GRANTS]; 134 struct page *pages[VSCSI_MAX_GRANTS];
135 135
136 struct se_cmd se_cmd; 136 struct se_cmd se_cmd;
137};
138 137
139struct scsiback_tmr { 138 struct completion tmr_done;
140 atomic_t tmr_complete;
141 wait_queue_head_t tmr_wait;
142}; 139};
143 140
144#define VSCSI_DEFAULT_SESSION_TAGS 128 141#define VSCSI_DEFAULT_SESSION_TAGS 128
@@ -599,36 +596,28 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
599 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 596 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
600 struct scsiback_nexus *nexus = tpg->tpg_nexus; 597 struct scsiback_nexus *nexus = tpg->tpg_nexus;
601 struct se_cmd *se_cmd = &pending_req->se_cmd; 598 struct se_cmd *se_cmd = &pending_req->se_cmd;
602 struct scsiback_tmr *tmr;
603 u64 unpacked_lun = pending_req->v2p->lun; 599 u64 unpacked_lun = pending_req->v2p->lun;
604 int rc, err = FAILED; 600 int rc, err = FAILED;
605 601
606 tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); 602 init_completion(&pending_req->tmr_done);
607 if (!tmr) {
608 target_put_sess_cmd(se_cmd);
609 goto err;
610 }
611
612 init_waitqueue_head(&tmr->tmr_wait);
613 603
614 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, 604 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
615 &pending_req->sense_buffer[0], 605 &pending_req->sense_buffer[0],
616 unpacked_lun, tmr, act, GFP_KERNEL, 606 unpacked_lun, NULL, act, GFP_KERNEL,
617 tag, TARGET_SCF_ACK_KREF); 607 tag, TARGET_SCF_ACK_KREF);
618 if (rc) 608 if (rc)
619 goto err; 609 goto err;
620 610
621 wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); 611 wait_for_completion(&pending_req->tmr_done);
622 612
623 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 613 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
624 SUCCESS : FAILED; 614 SUCCESS : FAILED;
625 615
626 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 616 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
627 transport_generic_free_cmd(&pending_req->se_cmd, 1); 617 transport_generic_free_cmd(&pending_req->se_cmd, 0);
628 return; 618 return;
619
629err: 620err:
630 if (tmr)
631 kfree(tmr);
632 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 621 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
633} 622}
634 623
@@ -1389,12 +1378,6 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
1389static void scsiback_release_cmd(struct se_cmd *se_cmd) 1378static void scsiback_release_cmd(struct se_cmd *se_cmd)
1390{ 1379{
1391 struct se_session *se_sess = se_cmd->se_sess; 1380 struct se_session *se_sess = se_cmd->se_sess;
1392 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
1393
1394 if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
1395 struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
1396 kfree(tmr);
1397 }
1398 1381
1399 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 1382 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1400} 1383}
@@ -1455,11 +1438,10 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
1455 1438
1456static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) 1439static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
1457{ 1440{
1458 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 1441 struct vscsibk_pend *pending_req = container_of(se_cmd,
1459 struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; 1442 struct vscsibk_pend, se_cmd);
1460 1443
1461 atomic_set(&tmr->tmr_complete, 1); 1444 complete(&pending_req->tmr_done);
1462 wake_up(&tmr->tmr_wait);
1463} 1445}
1464 1446
1465static void scsiback_aborted_task(struct se_cmd *se_cmd) 1447static void scsiback_aborted_task(struct se_cmd *se_cmd)
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 8260700d662b..8c285d9a06d8 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -158,6 +158,7 @@
158#define READ_32 0x09 158#define READ_32 0x09
159#define VERIFY_32 0x0a 159#define VERIFY_32 0x0a
160#define WRITE_32 0x0b 160#define WRITE_32 0x0b
161#define WRITE_VERIFY_32 0x0c
161#define WRITE_SAME_32 0x0d 162#define WRITE_SAME_32 0x0d
162#define ATA_32 0x1ff0 163#define ATA_32 0x1ff0
163 164
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 5f17fb770477..0ca1fb08805b 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -66,6 +66,14 @@ struct sock;
66#define TA_DEFAULT_FABRIC_PROT_TYPE 0 66#define TA_DEFAULT_FABRIC_PROT_TYPE 0
67/* TPG status needs to be enabled to return sendtargets discovery endpoint info */ 67/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
68#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1 68#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
69/*
70 * Used to control the sending of keys with optional to respond state bit,
71 * as a workaround for non RFC compliant initiators,that do not propose,
72 * nor respond to specific keys required for login to complete.
73 *
74 * See iscsi_check_proposer_for_optional_reply() for more details.
75 */
76#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
69 77
70#define ISCSI_IOV_DATA_BUFFER 5 78#define ISCSI_IOV_DATA_BUFFER 5
71 79
@@ -560,7 +568,6 @@ struct iscsi_conn {
560#define LOGIN_FLAGS_INITIAL_PDU 8 568#define LOGIN_FLAGS_INITIAL_PDU 8
561 unsigned long login_flags; 569 unsigned long login_flags;
562 struct delayed_work login_work; 570 struct delayed_work login_work;
563 struct delayed_work login_cleanup_work;
564 struct iscsi_login *login; 571 struct iscsi_login *login;
565 struct timer_list nopin_timer; 572 struct timer_list nopin_timer;
566 struct timer_list nopin_response_timer; 573 struct timer_list nopin_response_timer;
@@ -769,6 +776,7 @@ struct iscsi_tpg_attrib {
769 u8 t10_pi; 776 u8 t10_pi;
770 u32 fabric_prot_type; 777 u32 fabric_prot_type;
771 u32 tpg_enabled_sendtargets; 778 u32 tpg_enabled_sendtargets;
779 u32 login_keys_workaround;
772 struct iscsi_portal_group *tpg; 780 struct iscsi_portal_group *tpg;
773}; 781};
774 782
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index e475531565fd..e150e391878b 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -2,6 +2,7 @@
2#define TARGET_CORE_BACKEND_H 2#define TARGET_CORE_BACKEND_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/unaligned.h>
5#include <target/target_core_base.h> 6#include <target/target_core_base.h>
6 7
7#define TRANSPORT_FLAG_PASSTHROUGH 0x1 8#define TRANSPORT_FLAG_PASSTHROUGH 0x1
@@ -29,16 +30,13 @@ struct target_backend_ops {
29 30
30 struct se_device *(*alloc_device)(struct se_hba *, const char *); 31 struct se_device *(*alloc_device)(struct se_hba *, const char *);
31 int (*configure_device)(struct se_device *); 32 int (*configure_device)(struct se_device *);
33 void (*destroy_device)(struct se_device *);
32 void (*free_device)(struct se_device *device); 34 void (*free_device)(struct se_device *device);
33 35
34 ssize_t (*set_configfs_dev_params)(struct se_device *, 36 ssize_t (*set_configfs_dev_params)(struct se_device *,
35 const char *, ssize_t); 37 const char *, ssize_t);
36 ssize_t (*show_configfs_dev_params)(struct se_device *, char *); 38 ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
37 39
38 void (*transport_complete)(struct se_cmd *cmd,
39 struct scatterlist *,
40 unsigned char *);
41
42 sense_reason_t (*parse_cdb)(struct se_cmd *cmd); 40 sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
43 u32 (*get_device_type)(struct se_device *); 41 u32 (*get_device_type)(struct se_device *);
44 sector_t (*get_blocks)(struct se_device *); 42 sector_t (*get_blocks)(struct se_device *);
@@ -71,6 +69,8 @@ void target_backend_unregister(const struct target_backend_ops *);
71void target_complete_cmd(struct se_cmd *, u8); 69void target_complete_cmd(struct se_cmd *, u8);
72void target_complete_cmd_with_length(struct se_cmd *, u8, int); 70void target_complete_cmd_with_length(struct se_cmd *, u8, int);
73 71
72void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
73
74sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); 74sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
75sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); 75sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
76sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *); 76sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
@@ -104,9 +104,18 @@ bool target_lun_is_rdonly(struct se_cmd *);
104sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 104sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
105 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 105 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
106 106
107struct se_device *target_find_device(int id, bool do_depend);
108
107bool target_sense_desc_format(struct se_device *dev); 109bool target_sense_desc_format(struct se_device *dev);
108sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); 110sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
109bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 111bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
110 struct request_queue *q); 112 struct request_queue *q);
111 113
114
115/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
116static inline uint32_t get_unaligned_be24(const uint8_t *const p)
117{
118 return get_unaligned_be32(p - 1) & 0xffffffU;
119}
120
112#endif /* TARGET_CORE_BACKEND_H */ 121#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0c1dce2ac6f0..516764febeb7 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -188,7 +188,8 @@ enum target_sc_flags_table {
188 TARGET_SCF_BIDI_OP = 0x01, 188 TARGET_SCF_BIDI_OP = 0x01,
189 TARGET_SCF_ACK_KREF = 0x02, 189 TARGET_SCF_ACK_KREF = 0x02,
190 TARGET_SCF_UNKNOWN_SIZE = 0x04, 190 TARGET_SCF_UNKNOWN_SIZE = 0x04,
191 TARGET_SCF_USE_CPUID = 0x08, 191 TARGET_SCF_USE_CPUID = 0x08,
192 TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
192}; 193};
193 194
194/* fabric independent task management function values */ 195/* fabric independent task management function values */
@@ -218,7 +219,6 @@ enum tcm_tmrsp_table {
218 */ 219 */
219typedef enum { 220typedef enum {
220 SCSI_INST_INDEX, 221 SCSI_INST_INDEX,
221 SCSI_DEVICE_INDEX,
222 SCSI_AUTH_INTR_INDEX, 222 SCSI_AUTH_INTR_INDEX,
223 SCSI_INDEX_TYPE_MAX 223 SCSI_INDEX_TYPE_MAX
224} scsi_index_t; 224} scsi_index_t;
@@ -701,8 +701,6 @@ struct scsi_port_stats {
701 701
702struct se_lun { 702struct se_lun {
703 u64 unpacked_lun; 703 u64 unpacked_lun;
704#define SE_LUN_LINK_MAGIC 0xffff7771
705 u32 lun_link_magic;
706 bool lun_shutdown; 704 bool lun_shutdown;
707 bool lun_access_ro; 705 bool lun_access_ro;
708 u32 lun_index; 706 u32 lun_index;
@@ -746,8 +744,6 @@ struct se_dev_stat_grps {
746}; 744};
747 745
748struct se_device { 746struct se_device {
749#define SE_DEV_LINK_MAGIC 0xfeeddeef
750 u32 dev_link_magic;
751 /* RELATIVE TARGET PORT IDENTIFER Counter */ 747 /* RELATIVE TARGET PORT IDENTIFER Counter */
752 u16 dev_rpti_counter; 748 u16 dev_rpti_counter;
753 /* Used for SAM Task Attribute ordering */ 749 /* Used for SAM Task Attribute ordering */
@@ -800,7 +796,6 @@ struct se_device {
800 struct list_head delayed_cmd_list; 796 struct list_head delayed_cmd_list;
801 struct list_head state_list; 797 struct list_head state_list;
802 struct list_head qf_cmd_list; 798 struct list_head qf_cmd_list;
803 struct list_head g_dev_node;
804 /* Pointer to associated SE HBA */ 799 /* Pointer to associated SE HBA */
805 struct se_hba *se_hba; 800 struct se_hba *se_hba;
806 /* T10 Inquiry and VPD WWN Information */ 801 /* T10 Inquiry and VPD WWN Information */
@@ -819,8 +814,6 @@ struct se_device {
819 unsigned char udev_path[SE_UDEV_PATH_LEN]; 814 unsigned char udev_path[SE_UDEV_PATH_LEN];
820 /* Pointer to template of function pointers for transport */ 815 /* Pointer to template of function pointers for transport */
821 const struct target_backend_ops *transport; 816 const struct target_backend_ops *transport;
822 /* Linked list for struct se_hba struct se_device list */
823 struct list_head dev_list;
824 struct se_lun xcopy_lun; 817 struct se_lun xcopy_lun;
825 /* Protection Information */ 818 /* Protection Information */
826 int prot_length; 819 int prot_length;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index d7dd1427fe0d..33d2e3e5773c 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -160,6 +160,7 @@ int target_get_sess_cmd(struct se_cmd *, bool);
160int target_put_sess_cmd(struct se_cmd *); 160int target_put_sess_cmd(struct se_cmd *);
161void target_sess_cmd_list_set_waiting(struct se_session *); 161void target_sess_cmd_list_set_waiting(struct se_session *);
162void target_wait_for_sess_cmds(struct se_session *); 162void target_wait_for_sess_cmds(struct se_session *);
163void target_show_cmd(const char *pfx, struct se_cmd *cmd);
163 164
164int core_alua_check_nonop_delay(struct se_cmd *); 165int core_alua_check_nonop_delay(struct se_cmd *);
165 166
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index af17b4154ef6..24a1c4ec2248 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -130,6 +130,11 @@ enum tcmu_genl_cmd {
130 TCMU_CMD_UNSPEC, 130 TCMU_CMD_UNSPEC,
131 TCMU_CMD_ADDED_DEVICE, 131 TCMU_CMD_ADDED_DEVICE,
132 TCMU_CMD_REMOVED_DEVICE, 132 TCMU_CMD_REMOVED_DEVICE,
133 TCMU_CMD_RECONFIG_DEVICE,
134 TCMU_CMD_ADDED_DEVICE_DONE,
135 TCMU_CMD_REMOVED_DEVICE_DONE,
136 TCMU_CMD_RECONFIG_DEVICE_DONE,
137 TCMU_CMD_SET_FEATURES,
133 __TCMU_CMD_MAX, 138 __TCMU_CMD_MAX,
134}; 139};
135#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1) 140#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
@@ -138,6 +143,13 @@ enum tcmu_genl_attr {
138 TCMU_ATTR_UNSPEC, 143 TCMU_ATTR_UNSPEC,
139 TCMU_ATTR_DEVICE, 144 TCMU_ATTR_DEVICE,
140 TCMU_ATTR_MINOR, 145 TCMU_ATTR_MINOR,
146 TCMU_ATTR_PAD,
147 TCMU_ATTR_DEV_CFG,
148 TCMU_ATTR_DEV_SIZE,
149 TCMU_ATTR_WRITECACHE,
150 TCMU_ATTR_CMD_STATUS,
151 TCMU_ATTR_DEVICE_ID,
152 TCMU_ATTR_SUPP_KERN_CMD_REPLY,
141 __TCMU_ATTR_MAX, 153 __TCMU_ATTR_MAX,
142}; 154};
143#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) 155#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)