aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c34
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c70
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/libiscsi.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c195
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h5
-rw-r--r--drivers/scsi/virtio_scsi.c86
-rw-r--r--drivers/target/iscsi/iscsi_target.c29
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c74
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/target_core_sbc.c68
-rw-r--r--drivers/target/target_core_spc.c18
-rw-r--r--drivers/target/target_core_transport.c37
-rw-r--r--drivers/target/target_core_xcopy.c10
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c19
-rw-r--r--drivers/target/tcm_fc/tfc_io.c17
-rw-r--r--drivers/vhost/scsi.c308
-rw-r--r--include/linux/virtio_scsi.h15
-rw-r--r--include/scsi/scsi_cmnd.h17
-rw-r--r--include/target/iscsi/iscsi_transport.h3
-rw-r--r--include/target/target_core_backend.h1
30 files changed, 728 insertions, 364 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2e2d903db838..8d44a4060634 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -41,11 +41,11 @@
41#include "iscsi_iser.h" 41#include "iscsi_iser.h"
42 42
43/* Register user buffer memory and initialize passive rdma 43/* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Total data size is stored in 44 * dto descriptor. Data size is stored in
45 * iser_task->data[ISER_DIR_IN].data_len 45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
46 */ 47 */
47static int iser_prepare_read_cmd(struct iscsi_task *task, 48static int iser_prepare_read_cmd(struct iscsi_task *task)
48 unsigned int edtl)
49 49
50{ 50{
51 struct iscsi_iser_task *iser_task = task->dd_data; 51 struct iscsi_iser_task *iser_task = task->dd_data;
@@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
73 return err; 73 return err;
74 } 74 }
75 75
76 if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
77 iser_err("Total data length: %ld, less than EDTL: "
78 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
79 iser_task->data[ISER_DIR_IN].data_len, edtl,
80 task->itt, iser_task->ib_conn);
81 return -EINVAL;
82 }
83
84 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); 76 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
85 if (err) { 77 if (err) {
86 iser_err("Failed to set up Data-IN RDMA\n"); 78 iser_err("Failed to set up Data-IN RDMA\n");
@@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
100} 92}
101 93
102/* Register user buffer memory and initialize passive rdma 94/* Register user buffer memory and initialize passive rdma
103 * dto descriptor. Total data size is stored in 95 * dto descriptor. Data size is stored in
104 * task->data[ISER_DIR_OUT].data_len 96 * task->data[ISER_DIR_OUT].data_len, Protection size
97 * is stored at task->prot[ISER_DIR_OUT].data_len
105 */ 98 */
106static int 99static int
107iser_prepare_write_cmd(struct iscsi_task *task, 100iser_prepare_write_cmd(struct iscsi_task *task,
@@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task,
135 return err; 128 return err;
136 } 129 }
137 130
138 if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
139 iser_err("Total data length: %ld, less than EDTL: %d, "
140 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
141 iser_task->data[ISER_DIR_OUT].data_len,
142 edtl, task->itt, task->conn);
143 return -EINVAL;
144 }
145
146 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); 131 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
147 if (err != 0) { 132 if (err != 0) {
148 iser_err("Failed to register write cmd RDMA mem\n"); 133 iser_err("Failed to register write cmd RDMA mem\n");
@@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn,
417 if (scsi_prot_sg_count(sc)) { 402 if (scsi_prot_sg_count(sc)) {
418 prot_buf->buf = scsi_prot_sglist(sc); 403 prot_buf->buf = scsi_prot_sglist(sc);
419 prot_buf->size = scsi_prot_sg_count(sc); 404 prot_buf->size = scsi_prot_sg_count(sc);
420 prot_buf->data_len = sc->prot_sdb->length; 405 prot_buf->data_len = data_buf->data_len >>
406 ilog2(sc->device->sector_size) * 8;
421 } 407 }
422 408
423 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 409 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
424 err = iser_prepare_read_cmd(task, edtl); 410 err = iser_prepare_read_cmd(task);
425 if (err) 411 if (err)
426 goto send_command_error; 412 goto send_command_error;
427 } 413 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b9d647468b99..d4c7928a0f36 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
663 663
664 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 664 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
665 if (pi_support && !device->pi_capable) { 665 if (pi_support && !device->pi_capable) {
666 pr_err("Protection information requested but not supported\n"); 666 pr_err("Protection information requested but not supported, "
667 ret = -EINVAL; 667 "rejecting connect request\n");
668 ret = rdma_reject(cma_id, NULL, 0);
668 goto out_mr; 669 goto out_mr;
669 } 670 }
670 671
@@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work)
787 isert_put_conn(isert_conn); 788 isert_put_conn(isert_conn);
788 return; 789 return;
789 } 790 }
790 if (!isert_conn->logout_posted) { 791
791 pr_debug("Calling rdma_disconnect for !logout_posted from" 792 if (isert_conn->disconnect) {
792 " isert_disconnect_work\n"); 793 /* Send DREQ/DREP towards our initiator */
793 rdma_disconnect(isert_conn->conn_cm_id); 794 rdma_disconnect(isert_conn->conn_cm_id);
794 mutex_unlock(&isert_conn->conn_mutex);
795 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
796 goto wake_up;
797 } 795 }
796
798 mutex_unlock(&isert_conn->conn_mutex); 797 mutex_unlock(&isert_conn->conn_mutex);
799 798
800wake_up: 799wake_up:
@@ -803,10 +802,11 @@ wake_up:
803} 802}
804 803
805static void 804static void
806isert_disconnected_handler(struct rdma_cm_id *cma_id) 805isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
807{ 806{
808 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; 807 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
809 808
809 isert_conn->disconnect = disconnect;
810 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 810 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
811 schedule_work(&isert_conn->conn_logout_work); 811 schedule_work(&isert_conn->conn_logout_work);
812} 812}
@@ -815,29 +815,28 @@ static int
815isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 815isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
816{ 816{
817 int ret = 0; 817 int ret = 0;
818 bool disconnect = false;
818 819
819 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", 820 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
820 event->event, event->status, cma_id->context, cma_id); 821 event->event, event->status, cma_id->context, cma_id);
821 822
822 switch (event->event) { 823 switch (event->event) {
823 case RDMA_CM_EVENT_CONNECT_REQUEST: 824 case RDMA_CM_EVENT_CONNECT_REQUEST:
824 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
825 ret = isert_connect_request(cma_id, event); 825 ret = isert_connect_request(cma_id, event);
826 break; 826 break;
827 case RDMA_CM_EVENT_ESTABLISHED: 827 case RDMA_CM_EVENT_ESTABLISHED:
828 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
829 isert_connected_handler(cma_id); 828 isert_connected_handler(cma_id);
830 break; 829 break;
831 case RDMA_CM_EVENT_DISCONNECTED: 830 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
832 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); 831 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
833 isert_disconnected_handler(cma_id); 832 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
834 break; 833 disconnect = true;
835 case RDMA_CM_EVENT_DEVICE_REMOVAL: 834 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
836 case RDMA_CM_EVENT_ADDR_CHANGE: 835 isert_disconnected_handler(cma_id, disconnect);
837 break; 836 break;
838 case RDMA_CM_EVENT_CONNECT_ERROR: 837 case RDMA_CM_EVENT_CONNECT_ERROR:
839 default: 838 default:
840 pr_err("Unknown RDMA CMA event: %d\n", event->event); 839 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
841 break; 840 break;
842 } 841 }
843 842
@@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1054 } 1053 }
1055 if (!login->login_failed) { 1054 if (!login->login_failed) {
1056 if (login->login_complete) { 1055 if (login->login_complete) {
1057 if (isert_conn->conn_device->use_fastreg) { 1056 if (!conn->sess->sess_ops->SessionType &&
1057 isert_conn->conn_device->use_fastreg) {
1058 /* Normal Session and fastreg is used */
1058 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; 1059 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1059 1060
1060 ret = isert_conn_create_fastreg_pool(isert_conn, 1061 ret = isert_conn_create_fastreg_pool(isert_conn,
@@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work)
1824 break; 1825 break;
1825 case ISTATE_SEND_LOGOUTRSP: 1826 case ISTATE_SEND_LOGOUTRSP:
1826 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1827 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1827 /* 1828
1828 * Call atomic_dec(&isert_conn->post_send_buf_count) 1829 atomic_dec(&isert_conn->post_send_buf_count);
1829 * from isert_wait_conn()
1830 */
1831 isert_conn->logout_posted = true;
1832 iscsit_logout_post_handler(cmd, cmd->conn); 1830 iscsit_logout_post_handler(cmd, cmd->conn);
1833 break; 1831 break;
1834 case ISTATE_SEND_TEXTRSP: 1832 case ISTATE_SEND_TEXTRSP:
@@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2034 isert_conn->state = ISER_CONN_DOWN; 2032 isert_conn->state = ISER_CONN_DOWN;
2035 mutex_unlock(&isert_conn->conn_mutex); 2033 mutex_unlock(&isert_conn->conn_mutex);
2036 2034
2035 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2036
2037 complete(&isert_conn->conn_wait_comp_err); 2037 complete(&isert_conn->conn_wait_comp_err);
2038} 2038}
2039 2039
@@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2320 int rc; 2320 int rc;
2321 2321
2322 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2322 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2323 rc = iscsit_build_text_rsp(cmd, conn, hdr); 2323 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2324 if (rc < 0) 2324 if (rc < 0)
2325 return rc; 2325 return rc;
2326 2326
@@ -3156,9 +3156,14 @@ accept_wait:
3156 return -ENODEV; 3156 return -ENODEV;
3157 3157
3158 spin_lock_bh(&np->np_thread_lock); 3158 spin_lock_bh(&np->np_thread_lock);
3159 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3159 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3160 spin_unlock_bh(&np->np_thread_lock); 3160 spin_unlock_bh(&np->np_thread_lock);
3161 pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3161 pr_debug("np_thread_state %d for isert_accept_np\n",
3162 np->np_thread_state);
3163 /**
3164 * No point in stalling here when np_thread
3165 * is in state RESET/SHUTDOWN/EXIT - bail
3166 **/
3162 return -ENODEV; 3167 return -ENODEV;
3163 } 3168 }
3164 spin_unlock_bh(&np->np_thread_lock); 3169 spin_unlock_bh(&np->np_thread_lock);
@@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3208 struct isert_conn *isert_conn = conn->context; 3213 struct isert_conn *isert_conn = conn->context;
3209 3214
3210 pr_debug("isert_wait_conn: Starting \n"); 3215 pr_debug("isert_wait_conn: Starting \n");
3211 /*
3212 * Decrement post_send_buf_count for special case when called
3213 * from isert_do_control_comp() -> iscsit_logout_post_handler()
3214 */
3215 mutex_lock(&isert_conn->conn_mutex);
3216 if (isert_conn->logout_posted)
3217 atomic_dec(&isert_conn->post_send_buf_count);
3218 3216
3219 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 3217 mutex_lock(&isert_conn->conn_mutex);
3218 if (isert_conn->conn_cm_id) {
3220 pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); 3219 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3221 rdma_disconnect(isert_conn->conn_cm_id); 3220 rdma_disconnect(isert_conn->conn_cm_id);
3222 } 3221 }
@@ -3293,6 +3292,7 @@ destroy_rx_wq:
3293 3292
3294static void __exit isert_exit(void) 3293static void __exit isert_exit(void)
3295{ 3294{
3295 flush_scheduled_work();
3296 destroy_workqueue(isert_comp_wq); 3296 destroy_workqueue(isert_comp_wq);
3297 destroy_workqueue(isert_rx_wq); 3297 destroy_workqueue(isert_rx_wq);
3298 iscsit_unregister_transport(&iser_target_transport); 3298 iscsit_unregister_transport(&iser_target_transport);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index da6612e68000..04f51f7bf614 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -116,7 +116,6 @@ struct isert_device;
116 116
117struct isert_conn { 117struct isert_conn {
118 enum iser_conn_state state; 118 enum iser_conn_state state;
119 bool logout_posted;
120 int post_recv_buf_count; 119 int post_recv_buf_count;
121 atomic_t post_send_buf_count; 120 atomic_t post_send_buf_count;
122 u32 responder_resources; 121 u32 responder_resources;
@@ -151,6 +150,7 @@ struct isert_conn {
151#define ISERT_COMP_BATCH_COUNT 8 150#define ISERT_COMP_BATCH_COUNT 8
152 int conn_comp_batch; 151 int conn_comp_batch;
153 struct llist_head conn_comp_llist; 152 struct llist_head conn_comp_llist;
153 bool disconnect;
154}; 154};
155 155
156#define ISERT_MAX_CQ 64 156#define ISERT_MAX_CQ 64
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 02832d64d918..baca5897039f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC
1773config SCSI_VIRTIO 1773config SCSI_VIRTIO
1774 tristate "virtio-scsi support" 1774 tristate "virtio-scsi support"
1775 depends on VIRTIO 1775 depends on VIRTIO
1776 select BLK_DEV_INTEGRITY
1776 help 1777 help
1777 This is the virtual HBA driver for virtio. If the kernel will 1778 This is the virtual HBA driver for virtio. If the kernel will
1778 be used in a virtual machine, say Y or M. 1779 be used in a virtual machine, say Y or M.
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ecd7bd304efe..3d1bc67bac9d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
338 struct iscsi_session *session = conn->session; 338 struct iscsi_session *session = conn->session;
339 struct scsi_cmnd *sc = task->sc; 339 struct scsi_cmnd *sc = task->sc;
340 struct iscsi_scsi_req *hdr; 340 struct iscsi_scsi_req *hdr;
341 unsigned hdrlength, cmd_len; 341 unsigned hdrlength, cmd_len, transfer_length;
342 itt_t itt; 342 itt_t itt;
343 int rc; 343 int rc;
344 344
@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
391 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) 391 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
392 task->protected = true; 392 task->protected = true;
393 393
394 transfer_length = scsi_transfer_length(sc);
395 hdr->data_length = cpu_to_be32(transfer_length);
394 if (sc->sc_data_direction == DMA_TO_DEVICE) { 396 if (sc->sc_data_direction == DMA_TO_DEVICE) {
395 unsigned out_len = scsi_out(sc)->length;
396 struct iscsi_r2t_info *r2t = &task->unsol_r2t; 397 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
397 398
398 hdr->data_length = cpu_to_be32(out_len);
399 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 399 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
400 /* 400 /*
401 * Write counters: 401 * Write counters:
@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
414 memset(r2t, 0, sizeof(*r2t)); 414 memset(r2t, 0, sizeof(*r2t));
415 415
416 if (session->imm_data_en) { 416 if (session->imm_data_en) {
417 if (out_len >= session->first_burst) 417 if (transfer_length >= session->first_burst)
418 task->imm_count = min(session->first_burst, 418 task->imm_count = min(session->first_burst,
419 conn->max_xmit_dlength); 419 conn->max_xmit_dlength);
420 else 420 else
421 task->imm_count = min(out_len, 421 task->imm_count = min(transfer_length,
422 conn->max_xmit_dlength); 422 conn->max_xmit_dlength);
423 hton24(hdr->dlength, task->imm_count); 423 hton24(hdr->dlength, task->imm_count);
424 } else 424 } else
425 zero_data(hdr->dlength); 425 zero_data(hdr->dlength);
426 426
427 if (!session->initial_r2t_en) { 427 if (!session->initial_r2t_en) {
428 r2t->data_length = min(session->first_burst, out_len) - 428 r2t->data_length = min(session->first_burst,
429 transfer_length) -
429 task->imm_count; 430 task->imm_count;
430 r2t->data_offset = task->imm_count; 431 r2t->data_offset = task->imm_count;
431 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 432 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
438 } else { 439 } else {
439 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 440 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
440 zero_data(hdr->dlength); 441 zero_data(hdr->dlength);
441 hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
442 442
443 if (sc->sc_data_direction == DMA_FROM_DEVICE) 443 if (sc->sc_data_direction == DMA_FROM_DEVICE)
444 hdr->flags |= ISCSI_FLAG_CMD_READ; 444 hdr->flags |= ISCSI_FLAG_CMD_READ;
@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
466 scsi_bidi_cmnd(sc) ? "bidirectional" : 466 scsi_bidi_cmnd(sc) ? "bidirectional" :
467 sc->sc_data_direction == DMA_TO_DEVICE ? 467 sc->sc_data_direction == DMA_TO_DEVICE ?
468 "write" : "read", conn->id, sc, sc->cmnd[0], 468 "write" : "read", conn->id, sc, sc->cmnd[0],
469 task->itt, scsi_bufflen(sc), 469 task->itt, transfer_length,
470 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 470 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
471 session->cmdsn, 471 session->cmdsn,
472 session->max_cmdsn - session->exp_cmdsn + 1); 472 session->max_cmdsn - session->exp_cmdsn + 1);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b1d10f9935c7..8d85ed8d8917 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
104/* 104/*
105 * Global Variables 105 * Global Variables
106 */ 106 */
107static struct kmem_cache *qla_tgt_cmd_cachep;
108static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 107static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109static mempool_t *qla_tgt_mgmt_cmd_mempool; 108static mempool_t *qla_tgt_mgmt_cmd_mempool;
110static struct workqueue_struct *qla_tgt_wq; 109static struct workqueue_struct *qla_tgt_wq;
@@ -2705,6 +2704,8 @@ done:
2705 2704
2706void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2705void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2707{ 2706{
2707 struct qla_tgt_sess *sess = cmd->sess;
2708
2708 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 2709 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
2709 "%s: se_cmd[%p] ox_id %04x\n", 2710 "%s: se_cmd[%p] ox_id %04x\n",
2710 __func__, &cmd->se_cmd, 2711 __func__, &cmd->se_cmd,
@@ -2713,7 +2714,12 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2713 BUG_ON(cmd->sg_mapped); 2714 BUG_ON(cmd->sg_mapped);
2714 if (unlikely(cmd->free_sg)) 2715 if (unlikely(cmd->free_sg))
2715 kfree(cmd->sg); 2716 kfree(cmd->sg);
2716 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2717
2718 if (!sess || !sess->se_sess) {
2719 WARN_ON(1);
2720 return;
2721 }
2722 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2717} 2723}
2718EXPORT_SYMBOL(qlt_free_cmd); 2724EXPORT_SYMBOL(qlt_free_cmd);
2719 2725
@@ -3075,13 +3081,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3075/* 3081/*
3076 * Process context for I/O path into tcm_qla2xxx code 3082 * Process context for I/O path into tcm_qla2xxx code
3077 */ 3083 */
3078static void qlt_do_work(struct work_struct *work) 3084static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3079{ 3085{
3080 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3081 scsi_qla_host_t *vha = cmd->vha; 3086 scsi_qla_host_t *vha = cmd->vha;
3082 struct qla_hw_data *ha = vha->hw; 3087 struct qla_hw_data *ha = vha->hw;
3083 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3088 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3084 struct qla_tgt_sess *sess = NULL; 3089 struct qla_tgt_sess *sess = cmd->sess;
3085 struct atio_from_isp *atio = &cmd->atio; 3090 struct atio_from_isp *atio = &cmd->atio;
3086 unsigned char *cdb; 3091 unsigned char *cdb;
3087 unsigned long flags; 3092 unsigned long flags;
@@ -3091,41 +3096,6 @@ static void qlt_do_work(struct work_struct *work)
3091 if (tgt->tgt_stop) 3096 if (tgt->tgt_stop)
3092 goto out_term; 3097 goto out_term;
3093 3098
3094 spin_lock_irqsave(&ha->hardware_lock, flags);
3095 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3096 atio->u.isp24.fcp_hdr.s_id);
3097 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
3098 if (sess)
3099 kref_get(&sess->se_sess->sess_kref);
3100 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3101
3102 if (unlikely(!sess)) {
3103 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
3104
3105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3106 "qla_target(%d): Unable to find wwn login"
3107 " (s_id %x:%x:%x), trying to create it manually\n",
3108 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3109
3110 if (atio->u.raw.entry_count > 1) {
3111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3112 "Dropping multy entry cmd %p\n", cmd);
3113 goto out_term;
3114 }
3115
3116 mutex_lock(&vha->vha_tgt.tgt_mutex);
3117 sess = qlt_make_local_sess(vha, s_id);
3118 /* sess has an extra creation ref. */
3119 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3120
3121 if (!sess)
3122 goto out_term;
3123 }
3124
3125 cmd->sess = sess;
3126 cmd->loop_id = sess->loop_id;
3127 cmd->conf_compl_supported = sess->conf_compl_supported;
3128
3129 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3099 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3130 cmd->tag = atio->u.isp24.exchange_addr; 3100 cmd->tag = atio->u.isp24.exchange_addr;
3131 cmd->unpacked_lun = scsilun_to_int( 3101 cmd->unpacked_lun = scsilun_to_int(
@@ -3153,8 +3123,8 @@ static void qlt_do_work(struct work_struct *work)
3153 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, 3123 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
3154 cmd->atio.u.isp24.fcp_hdr.ox_id); 3124 cmd->atio.u.isp24.fcp_hdr.ox_id);
3155 3125
3156 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3126 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3157 fcp_task_attr, data_dir, bidi); 3127 fcp_task_attr, data_dir, bidi);
3158 if (ret != 0) 3128 if (ret != 0)
3159 goto out_term; 3129 goto out_term;
3160 /* 3130 /*
@@ -3173,17 +3143,114 @@ out_term:
3173 */ 3143 */
3174 spin_lock_irqsave(&ha->hardware_lock, flags); 3144 spin_lock_irqsave(&ha->hardware_lock, flags);
3175 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3145 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3176 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 3146 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3177 if (sess) 3147 ha->tgt.tgt_ops->put_sess(sess);
3148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3149}
3150
3151static void qlt_do_work(struct work_struct *work)
3152{
3153 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3154
3155 __qlt_do_work(cmd);
3156}
3157
3158static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3159 struct qla_tgt_sess *sess,
3160 struct atio_from_isp *atio)
3161{
3162 struct se_session *se_sess = sess->se_sess;
3163 struct qla_tgt_cmd *cmd;
3164 int tag;
3165
3166 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3167 if (tag < 0)
3168 return NULL;
3169
3170 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3171 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3172
3173 memcpy(&cmd->atio, atio, sizeof(*atio));
3174 cmd->state = QLA_TGT_STATE_NEW;
3175 cmd->tgt = vha->vha_tgt.qla_tgt;
3176 cmd->vha = vha;
3177 cmd->se_cmd.map_tag = tag;
3178 cmd->sess = sess;
3179 cmd->loop_id = sess->loop_id;
3180 cmd->conf_compl_supported = sess->conf_compl_supported;
3181
3182 return cmd;
3183}
3184
3185static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3186 uint16_t);
3187
3188static void qlt_create_sess_from_atio(struct work_struct *work)
3189{
3190 struct qla_tgt_sess_op *op = container_of(work,
3191 struct qla_tgt_sess_op, work);
3192 scsi_qla_host_t *vha = op->vha;
3193 struct qla_hw_data *ha = vha->hw;
3194 struct qla_tgt_sess *sess;
3195 struct qla_tgt_cmd *cmd;
3196 unsigned long flags;
3197 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3198
3199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3200 "qla_target(%d): Unable to find wwn login"
3201 " (s_id %x:%x:%x), trying to create it manually\n",
3202 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3203
3204 if (op->atio.u.raw.entry_count > 1) {
3205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3206 "Dropping multy entry atio %p\n", &op->atio);
3207 goto out_term;
3208 }
3209
3210 mutex_lock(&vha->vha_tgt.tgt_mutex);
3211 sess = qlt_make_local_sess(vha, s_id);
3212 /* sess has an extra creation ref. */
3213 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3214
3215 if (!sess)
3216 goto out_term;
3217 /*
3218 * Now obtain a pre-allocated session tag using the original op->atio
3219 * packet header, and dispatch into __qlt_do_work() using the existing
3220 * process context.
3221 */
3222 cmd = qlt_get_tag(vha, sess, &op->atio);
3223 if (!cmd) {
3224 spin_lock_irqsave(&ha->hardware_lock, flags);
3225 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3178 ha->tgt.tgt_ops->put_sess(sess); 3226 ha->tgt.tgt_ops->put_sess(sess);
3227 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3228 kfree(op);
3229 return;
3230 }
3231 /*
3232 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3233 * the extra reference taken above by qlt_make_local_sess()
3234 */
3235 __qlt_do_work(cmd);
3236 kfree(op);
3237 return;
3238
3239out_term:
3240 spin_lock_irqsave(&ha->hardware_lock, flags);
3241 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
3179 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3242 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3243 kfree(op);
3244
3180} 3245}
3181 3246
3182/* ha->hardware_lock supposed to be held on entry */ 3247/* ha->hardware_lock supposed to be held on entry */
3183static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 3248static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3184 struct atio_from_isp *atio) 3249 struct atio_from_isp *atio)
3185{ 3250{
3251 struct qla_hw_data *ha = vha->hw;
3186 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3252 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3253 struct qla_tgt_sess *sess;
3187 struct qla_tgt_cmd *cmd; 3254 struct qla_tgt_cmd *cmd;
3188 3255
3189 if (unlikely(tgt->tgt_stop)) { 3256 if (unlikely(tgt->tgt_stop)) {
@@ -3192,18 +3259,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3192 return -EFAULT; 3259 return -EFAULT;
3193 } 3260 }
3194 3261
3195 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 3262 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3263 if (unlikely(!sess)) {
3264 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3265 GFP_ATOMIC);
3266 if (!op)
3267 return -ENOMEM;
3268
3269 memcpy(&op->atio, atio, sizeof(*atio));
3270 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3271 queue_work(qla_tgt_wq, &op->work);
3272 return 0;
3273 }
3274 /*
3275 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3276 */
3277 kref_get(&sess->se_sess->sess_kref);
3278
3279 cmd = qlt_get_tag(vha, sess, atio);
3196 if (!cmd) { 3280 if (!cmd) {
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 3281 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
3198 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3282 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3283 ha->tgt.tgt_ops->put_sess(sess);
3199 return -ENOMEM; 3284 return -ENOMEM;
3200 } 3285 }
3201 3286
3202 memcpy(&cmd->atio, atio, sizeof(*atio));
3203 cmd->state = QLA_TGT_STATE_NEW;
3204 cmd->tgt = vha->vha_tgt.qla_tgt;
3205 cmd->vha = vha;
3206
3207 INIT_WORK(&cmd->work, qlt_do_work); 3287 INIT_WORK(&cmd->work, qlt_do_work);
3208 queue_work(qla_tgt_wq, &cmd->work); 3288 queue_work(qla_tgt_wq, &cmd->work);
3209 return 0; 3289 return 0;
@@ -5501,23 +5581,13 @@ int __init qlt_init(void)
5501 if (!QLA_TGT_MODE_ENABLED()) 5581 if (!QLA_TGT_MODE_ENABLED())
5502 return 0; 5582 return 0;
5503 5583
5504 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
5505 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
5506 NULL);
5507 if (!qla_tgt_cmd_cachep) {
5508 ql_log(ql_log_fatal, NULL, 0xe06c,
5509 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
5510 return -ENOMEM;
5511 }
5512
5513 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 5584 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5514 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 5585 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
5515 qla_tgt_mgmt_cmd), 0, NULL); 5586 qla_tgt_mgmt_cmd), 0, NULL);
5516 if (!qla_tgt_mgmt_cmd_cachep) { 5587 if (!qla_tgt_mgmt_cmd_cachep) {
5517 ql_log(ql_log_fatal, NULL, 0xe06d, 5588 ql_log(ql_log_fatal, NULL, 0xe06d,
5518 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 5589 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5519 ret = -ENOMEM; 5590 return -ENOMEM;
5520 goto out;
5521 } 5591 }
5522 5592
5523 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 5593 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
@@ -5545,8 +5615,6 @@ out_cmd_mempool:
5545 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5615 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5546out_mgmt_cmd_cachep: 5616out_mgmt_cmd_cachep:
5547 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5617 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5548out:
5549 kmem_cache_destroy(qla_tgt_cmd_cachep);
5550 return ret; 5618 return ret;
5551} 5619}
5552 5620
@@ -5558,5 +5626,4 @@ void qlt_exit(void)
5558 destroy_workqueue(qla_tgt_wq); 5626 destroy_workqueue(qla_tgt_wq);
5559 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5627 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5560 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5628 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5561 kmem_cache_destroy(qla_tgt_cmd_cachep);
5562} 5629}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f873e10451d2..5c9f185a8ebd 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -870,6 +870,12 @@ struct qla_tgt {
870 struct list_head tgt_list_entry; 870 struct list_head tgt_list_entry;
871}; 871};
872 872
873struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha;
875 struct atio_from_isp atio;
876 struct work_struct work;
877};
878
873/* 879/*
874 * Equivilant to IT Nexus (Initiator-Target) 880 * Equivilant to IT Nexus (Initiator-Target)
875 */ 881 */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 896cb23adb77..e2beab962096 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1501,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1501 struct qla_tgt_sess *sess = qla_tgt_sess; 1501 struct qla_tgt_sess *sess = qla_tgt_sess;
1502 unsigned char port_name[36]; 1502 unsigned char port_name[36];
1503 unsigned long flags; 1503 unsigned long flags;
1504 int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
1505 TCM_QLA2XXX_DEFAULT_TAGS;
1504 1506
1505 lport = vha->vha_tgt.target_lport_ptr; 1507 lport = vha->vha_tgt.target_lport_ptr;
1506 if (!lport) { 1508 if (!lport) {
@@ -1518,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1518 } 1520 }
1519 se_tpg = &tpg->se_tpg; 1521 se_tpg = &tpg->se_tpg;
1520 1522
1521 se_sess = transport_init_session(TARGET_PROT_NORMAL); 1523 se_sess = transport_init_session_tags(num_tags,
1524 sizeof(struct qla_tgt_cmd),
1525 TARGET_PROT_NORMAL);
1522 if (IS_ERR(se_sess)) { 1526 if (IS_ERR(se_sess)) {
1523 pr_err("Unable to initialize struct se_session\n"); 1527 pr_err("Unable to initialize struct se_session\n");
1524 return PTR_ERR(se_sess); 1528 return PTR_ERR(se_sess);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 33aaac8c7d59..10c002145648 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,6 +4,11 @@
4#define TCM_QLA2XXX_VERSION "v0.1" 4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 6#define TCM_QLA2XXX_NAMELEN 32
7/*
8 * Number of pre-allocated per-session tags, based upon the worst-case
9 * per port number of iocbs
10 */
11#define TCM_QLA2XXX_DEFAULT_TAGS 2088
7 12
8#include "qla_target.h" 13#include "qla_target.h"
9 14
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 99fdb9403944..89ee5929eb6d 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -23,6 +23,7 @@
23#include <linux/virtio_config.h> 23#include <linux/virtio_config.h>
24#include <linux/virtio_scsi.h> 24#include <linux/virtio_scsi.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/blkdev.h>
26#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -37,6 +38,7 @@ struct virtio_scsi_cmd {
37 struct completion *comp; 38 struct completion *comp;
38 union { 39 union {
39 struct virtio_scsi_cmd_req cmd; 40 struct virtio_scsi_cmd_req cmd;
41 struct virtio_scsi_cmd_req_pi cmd_pi;
40 struct virtio_scsi_ctrl_tmf_req tmf; 42 struct virtio_scsi_ctrl_tmf_req tmf;
41 struct virtio_scsi_ctrl_an_req an; 43 struct virtio_scsi_ctrl_an_req an;
42 } req; 44 } req;
@@ -399,7 +401,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
399 size_t req_size, size_t resp_size) 401 size_t req_size, size_t resp_size)
400{ 402{
401 struct scsi_cmnd *sc = cmd->sc; 403 struct scsi_cmnd *sc = cmd->sc;
402 struct scatterlist *sgs[4], req, resp; 404 struct scatterlist *sgs[6], req, resp;
403 struct sg_table *out, *in; 405 struct sg_table *out, *in;
404 unsigned out_num = 0, in_num = 0; 406 unsigned out_num = 0, in_num = 0;
405 407
@@ -417,16 +419,24 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
417 sgs[out_num++] = &req; 419 sgs[out_num++] = &req;
418 420
419 /* Data-out buffer. */ 421 /* Data-out buffer. */
420 if (out) 422 if (out) {
423 /* Place WRITE protection SGLs before Data OUT payload */
424 if (scsi_prot_sg_count(sc))
425 sgs[out_num++] = scsi_prot_sglist(sc);
421 sgs[out_num++] = out->sgl; 426 sgs[out_num++] = out->sgl;
427 }
422 428
423 /* Response header. */ 429 /* Response header. */
424 sg_init_one(&resp, &cmd->resp, resp_size); 430 sg_init_one(&resp, &cmd->resp, resp_size);
425 sgs[out_num + in_num++] = &resp; 431 sgs[out_num + in_num++] = &resp;
426 432
427 /* Data-in buffer */ 433 /* Data-in buffer */
428 if (in) 434 if (in) {
435 /* Place READ protection SGLs before Data IN payload */
436 if (scsi_prot_sg_count(sc))
437 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
429 sgs[out_num + in_num++] = in->sgl; 438 sgs[out_num + in_num++] = in->sgl;
439 }
430 440
431 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); 441 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
432} 442}
@@ -451,12 +461,45 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
451 return err; 461 return err;
452} 462}
453 463
464static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
465 struct scsi_cmnd *sc)
466{
467 cmd->lun[0] = 1;
468 cmd->lun[1] = sc->device->id;
469 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
470 cmd->lun[3] = sc->device->lun & 0xff;
471 cmd->tag = (unsigned long)sc;
472 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
473 cmd->prio = 0;
474 cmd->crn = 0;
475}
476
477static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
478 struct scsi_cmnd *sc)
479{
480 struct request *rq = sc->request;
481 struct blk_integrity *bi;
482
483 virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
484
485 if (!rq || !scsi_prot_sg_count(sc))
486 return;
487
488 bi = blk_get_integrity(rq->rq_disk);
489
490 if (sc->sc_data_direction == DMA_TO_DEVICE)
491 cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
492 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
493 cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
494}
495
454static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 496static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
455 struct virtio_scsi_vq *req_vq, 497 struct virtio_scsi_vq *req_vq,
456 struct scsi_cmnd *sc) 498 struct scsi_cmnd *sc)
457{ 499{
458 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 500 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
459 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 501 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
502 int req_size;
460 503
461 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 504 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
462 505
@@ -468,22 +511,20 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
468 511
469 memset(cmd, 0, sizeof(*cmd)); 512 memset(cmd, 0, sizeof(*cmd));
470 cmd->sc = sc; 513 cmd->sc = sc;
471 cmd->req.cmd = (struct virtio_scsi_cmd_req){
472 .lun[0] = 1,
473 .lun[1] = sc->device->id,
474 .lun[2] = (sc->device->lun >> 8) | 0x40,
475 .lun[3] = sc->device->lun & 0xff,
476 .tag = (unsigned long)sc,
477 .task_attr = VIRTIO_SCSI_S_SIMPLE,
478 .prio = 0,
479 .crn = 0,
480 };
481 514
482 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 515 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
483 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
484 516
485 if (virtscsi_kick_cmd(req_vq, cmd, 517 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
486 sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0) 518 virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
519 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
520 req_size = sizeof(cmd->req.cmd_pi);
521 } else {
522 virtio_scsi_init_hdr(&cmd->req.cmd, sc);
523 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
524 req_size = sizeof(cmd->req.cmd);
525 }
526
527 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
487 return SCSI_MLQUEUE_HOST_BUSY; 528 return SCSI_MLQUEUE_HOST_BUSY;
488 return 0; 529 return 0;
489} 530}
@@ -820,7 +861,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
820{ 861{
821 struct Scsi_Host *shost; 862 struct Scsi_Host *shost;
822 struct virtio_scsi *vscsi; 863 struct virtio_scsi *vscsi;
823 int err; 864 int err, host_prot;
824 u32 sg_elems, num_targets; 865 u32 sg_elems, num_targets;
825 u32 cmd_per_lun; 866 u32 cmd_per_lun;
826 u32 num_queues; 867 u32 num_queues;
@@ -870,6 +911,16 @@ static int virtscsi_probe(struct virtio_device *vdev)
870 shost->max_id = num_targets; 911 shost->max_id = num_targets;
871 shost->max_channel = 0; 912 shost->max_channel = 0;
872 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 913 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
914
915 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
916 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
917 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
918 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
919
920 scsi_host_set_prot(shost, host_prot);
921 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
922 }
923
873 err = scsi_add_host(shost, &vdev->dev); 924 err = scsi_add_host(shost, &vdev->dev);
874 if (err) 925 if (err)
875 goto scsi_add_host_failed; 926 goto scsi_add_host_failed;
@@ -939,6 +990,7 @@ static struct virtio_device_id id_table[] = {
939static unsigned int features[] = { 990static unsigned int features[] = {
940 VIRTIO_SCSI_F_HOTPLUG, 991 VIRTIO_SCSI_F_HOTPLUG,
941 VIRTIO_SCSI_F_CHANGE, 992 VIRTIO_SCSI_F_CHANGE,
993 VIRTIO_SCSI_F_T10_PI,
942}; 994};
943 995
944static struct virtio_driver virtio_scsi_driver = { 996static struct virtio_driver virtio_scsi_driver = {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9189bc0a87ae..5663f4d19d02 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -300,7 +300,7 @@ bool iscsit_check_np_match(
300 port = ntohs(sock_in->sin_port); 300 port = ntohs(sock_in->sin_port);
301 } 301 }
302 302
303 if ((ip_match == true) && (np->np_port == port) && 303 if (ip_match && (np->np_port == port) &&
304 (np->np_network_transport == network_transport)) 304 (np->np_network_transport == network_transport))
305 return true; 305 return true;
306 306
@@ -325,7 +325,7 @@ static struct iscsi_np *iscsit_get_np(
325 } 325 }
326 326
327 match = iscsit_check_np_match(sockaddr, np, network_transport); 327 match = iscsit_check_np_match(sockaddr, np, network_transport);
328 if (match == true) { 328 if (match) {
329 /* 329 /*
330 * Increment the np_exports reference count now to 330 * Increment the np_exports reference count now to
331 * prevent iscsit_del_np() below from being called 331 * prevent iscsit_del_np() below from being called
@@ -1121,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1121 /* 1121 /*
1122 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. 1122 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1123 */ 1123 */
1124 if (dump_payload == true) 1124 if (dump_payload)
1125 goto after_immediate_data; 1125 goto after_immediate_data;
1126 1126
1127 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1127 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
@@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3390 3390
3391#define SENDTARGETS_BUF_LIMIT 32768U 3391#define SENDTARGETS_BUF_LIMIT 32768U
3392 3392
3393static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3393static int
3394iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3395 enum iscsit_transport_type network_transport)
3394{ 3396{
3395 char *payload = NULL; 3397 char *payload = NULL;
3396 struct iscsi_conn *conn = cmd->conn; 3398 struct iscsi_conn *conn = cmd->conn;
@@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3467 struct iscsi_np *np = tpg_np->tpg_np; 3469 struct iscsi_np *np = tpg_np->tpg_np;
3468 bool inaddr_any = iscsit_check_inaddr_any(np); 3470 bool inaddr_any = iscsit_check_inaddr_any(np);
3469 3471
3472 if (np->np_network_transport != network_transport)
3473 continue;
3474
3470 if (!target_name_printed) { 3475 if (!target_name_printed) {
3471 len = sprintf(buf, "TargetName=%s", 3476 len = sprintf(buf, "TargetName=%s",
3472 tiqn->tiqn); 3477 tiqn->tiqn);
@@ -3485,10 +3490,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3485 3490
3486 len = sprintf(buf, "TargetAddress=" 3491 len = sprintf(buf, "TargetAddress="
3487 "%s:%hu,%hu", 3492 "%s:%hu,%hu",
3488 (inaddr_any == false) ? 3493 inaddr_any ? conn->local_ip : np->np_ip,
3489 np->np_ip : conn->local_ip, 3494 inaddr_any ? conn->local_port : np->np_port,
3490 (inaddr_any == false) ?
3491 np->np_port : conn->local_port,
3492 tpg->tpgt); 3495 tpg->tpgt);
3493 len += 1; 3496 len += 1;
3494 3497
@@ -3520,11 +3523,12 @@ eob:
3520 3523
3521int 3524int
3522iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3525iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3523 struct iscsi_text_rsp *hdr) 3526 struct iscsi_text_rsp *hdr,
3527 enum iscsit_transport_type network_transport)
3524{ 3528{
3525 int text_length, padding; 3529 int text_length, padding;
3526 3530
3527 text_length = iscsit_build_sendtargets_response(cmd); 3531 text_length = iscsit_build_sendtargets_response(cmd, network_transport);
3528 if (text_length < 0) 3532 if (text_length < 0)
3529 return text_length; 3533 return text_length;
3530 3534
@@ -3562,7 +3566,7 @@ static int iscsit_send_text_rsp(
3562 u32 tx_size = 0; 3566 u32 tx_size = 0;
3563 int text_length, iov_count = 0, rc; 3567 int text_length, iov_count = 0, rc;
3564 3568
3565 rc = iscsit_build_text_rsp(cmd, conn, hdr); 3569 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
3566 if (rc < 0) 3570 if (rc < 0)
3567 return rc; 3571 return rc;
3568 3572
@@ -4234,8 +4238,6 @@ int iscsit_close_connection(
4234 if (conn->conn_transport->iscsit_wait_conn) 4238 if (conn->conn_transport->iscsit_wait_conn)
4235 conn->conn_transport->iscsit_wait_conn(conn); 4239 conn->conn_transport->iscsit_wait_conn(conn);
4236 4240
4237 iscsit_free_queue_reqs_for_conn(conn);
4238
4239 /* 4241 /*
4240 * During Connection recovery drop unacknowledged out of order 4242 * During Connection recovery drop unacknowledged out of order
4241 * commands for this connection, and prepare the other commands 4243 * commands for this connection, and prepare the other commands
@@ -4252,6 +4254,7 @@ int iscsit_close_connection(
4252 iscsit_clear_ooo_cmdsns_for_conn(conn); 4254 iscsit_clear_ooo_cmdsns_for_conn(conn);
4253 iscsit_release_commands_from_conn(conn); 4255 iscsit_release_commands_from_conn(conn);
4254 } 4256 }
4257 iscsit_free_queue_reqs_for_conn(conn);
4255 4258
4256 /* 4259 /*
4257 * Handle decrementing session or connection usage count if 4260 * Handle decrementing session or connection usage count if
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index de77d9aa22c6..19b842c3e0b3 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -71,6 +71,40 @@ static void chap_gen_challenge(
71 challenge_asciihex); 71 challenge_asciihex);
72} 72}
73 73
74static int chap_check_algorithm(const char *a_str)
75{
76 char *tmp, *orig, *token;
77
78 tmp = kstrdup(a_str, GFP_KERNEL);
79 if (!tmp) {
80 pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
81 return CHAP_DIGEST_UNKNOWN;
82 }
83 orig = tmp;
84
85 token = strsep(&tmp, "=");
86 if (!token)
87 goto out;
88
89 if (strcmp(token, "CHAP_A")) {
90 pr_err("Unable to locate CHAP_A key\n");
91 goto out;
92 }
93 while (token) {
94 token = strsep(&tmp, ",");
95 if (!token)
96 goto out;
97
98 if (!strncmp(token, "5", 1)) {
99 pr_debug("Selected MD5 Algorithm\n");
100 kfree(orig);
101 return CHAP_DIGEST_MD5;
102 }
103 }
104out:
105 kfree(orig);
106 return CHAP_DIGEST_UNKNOWN;
107}
74 108
75static struct iscsi_chap *chap_server_open( 109static struct iscsi_chap *chap_server_open(
76 struct iscsi_conn *conn, 110 struct iscsi_conn *conn,
@@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open(
79 char *aic_str, 113 char *aic_str,
80 unsigned int *aic_len) 114 unsigned int *aic_len)
81{ 115{
116 int ret;
82 struct iscsi_chap *chap; 117 struct iscsi_chap *chap;
83 118
84 if (!(auth->naf_flags & NAF_USERID_SET) || 119 if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open(
93 return NULL; 128 return NULL;
94 129
95 chap = conn->auth_protocol; 130 chap = conn->auth_protocol;
96 /* 131 ret = chap_check_algorithm(a_str);
97 * We only support MD5 MDA presently. 132 switch (ret) {
98 */ 133 case CHAP_DIGEST_MD5:
99 if (strncmp(a_str, "CHAP_A=5", 8)) { 134 pr_debug("[server] Got CHAP_A=5\n");
100 pr_err("CHAP_A is not MD5.\n"); 135 /*
136 * Send back CHAP_A set to MD5.
137 */
138 *aic_len = sprintf(aic_str, "CHAP_A=5");
139 *aic_len += 1;
140 chap->digest_type = CHAP_DIGEST_MD5;
141 pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
142 break;
143 case CHAP_DIGEST_UNKNOWN:
144 default:
145 pr_err("Unsupported CHAP_A value\n");
101 return NULL; 146 return NULL;
102 } 147 }
103 pr_debug("[server] Got CHAP_A=5\n"); 148
104 /*
105 * Send back CHAP_A set to MD5.
106 */
107 *aic_len = sprintf(aic_str, "CHAP_A=5");
108 *aic_len += 1;
109 chap->digest_type = CHAP_DIGEST_MD5;
110 pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
111 /* 149 /*
112 * Set Identifier. 150 * Set Identifier.
113 */ 151 */
@@ -314,6 +352,16 @@ static int chap_server_compute_md5(
314 goto out; 352 goto out;
315 } 353 }
316 /* 354 /*
355 * During mutual authentication, the CHAP_C generated by the
356 * initiator must not match the original CHAP_C generated by
357 * the target.
358 */
359 if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
360 pr_err("initiator CHAP_C matches target CHAP_C, failing"
361 " login attempt\n");
362 goto out;
363 }
364 /*
317 * Generate CHAP_N and CHAP_R for mutual authentication. 365 * Generate CHAP_N and CHAP_R for mutual authentication.
318 */ 366 */
319 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 367 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index 2f463c09626d..d22f7b96a06c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,7 @@
1#ifndef _ISCSI_CHAP_H_ 1#ifndef _ISCSI_CHAP_H_
2#define _ISCSI_CHAP_H_ 2#define _ISCSI_CHAP_H_
3 3
4#define CHAP_DIGEST_UNKNOWN 0
4#define CHAP_DIGEST_MD5 5 5#define CHAP_DIGEST_MD5 5
5#define CHAP_DIGEST_SHA 6 6#define CHAP_DIGEST_SHA 6
6 7
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d9b1d88e1ad3..fecb69535a15 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1145,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
1145void iscsi_target_login_sess_out(struct iscsi_conn *conn, 1145void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1146 struct iscsi_np *np, bool zero_tsih, bool new_sess) 1146 struct iscsi_np *np, bool zero_tsih, bool new_sess)
1147{ 1147{
1148 if (new_sess == false) 1148 if (!new_sess)
1149 goto old_sess_out; 1149 goto old_sess_out;
1150 1150
1151 pr_err("iSCSI Login negotiation failed.\n"); 1151 pr_err("iSCSI Login negotiation failed.\n");
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 75b685960e80..62a095f36bf2 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
404 } 404 }
405 405
406 rc = schedule_delayed_work(&conn->login_work, 0); 406 rc = schedule_delayed_work(&conn->login_work, 0);
407 if (rc == false) { 407 if (!rc) {
408 pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" 408 pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
409 " got false\n"); 409 " got false\n");
410 } 410 }
@@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
513 state = (tpg->tpg_state == TPG_STATE_ACTIVE); 513 state = (tpg->tpg_state == TPG_STATE_ACTIVE);
514 spin_unlock(&tpg->tpg_state_lock); 514 spin_unlock(&tpg->tpg_state_lock);
515 515
516 if (state == false) { 516 if (!state) {
517 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 517 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
518 iscsi_target_restore_sock_callbacks(conn); 518 iscsi_target_restore_sock_callbacks(conn);
519 iscsi_target_login_drop(conn, login); 519 iscsi_target_login_drop(conn, login);
@@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
528 state = iscsi_target_sk_state_check(sk); 528 state = iscsi_target_sk_state_check(sk);
529 read_unlock_bh(&sk->sk_callback_lock); 529 read_unlock_bh(&sk->sk_callback_lock);
530 530
531 if (state == false) { 531 if (!state) {
532 pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 532 pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
533 iscsi_target_restore_sock_callbacks(conn); 533 iscsi_target_restore_sock_callbacks(conn);
534 iscsi_target_login_drop(conn, login); 534 iscsi_target_login_drop(conn, login);
@@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero(
773 } 773 }
774 774
775 goto do_auth; 775 goto do_auth;
776 } else if (!payload_length) {
777 pr_err("Initiator sent zero length security payload,"
778 " login failed\n");
779 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
780 ISCSI_LOGIN_STATUS_AUTH_FAILED);
781 return -1;
776 } 782 }
777 783
778 if (login->first_request) 784 if (login->first_request)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 4d2e23fc76fd..02f9de26f38a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate(
474 if (!strcmp(param->name, AUTHMETHOD)) { 474 if (!strcmp(param->name, AUTHMETHOD)) {
475 SET_PSTATE_NEGOTIATE(param); 475 SET_PSTATE_NEGOTIATE(param);
476 } else if (!strcmp(param->name, HEADERDIGEST)) { 476 } else if (!strcmp(param->name, HEADERDIGEST)) {
477 if (iser == false) 477 if (!iser)
478 SET_PSTATE_NEGOTIATE(param); 478 SET_PSTATE_NEGOTIATE(param);
479 } else if (!strcmp(param->name, DATADIGEST)) { 479 } else if (!strcmp(param->name, DATADIGEST)) {
480 if (iser == false) 480 if (!iser)
481 SET_PSTATE_NEGOTIATE(param); 481 SET_PSTATE_NEGOTIATE(param);
482 } else if (!strcmp(param->name, MAXCONNECTIONS)) { 482 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
483 SET_PSTATE_NEGOTIATE(param); 483 SET_PSTATE_NEGOTIATE(param);
@@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate(
497 } else if (!strcmp(param->name, IMMEDIATEDATA)) { 497 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
498 SET_PSTATE_NEGOTIATE(param); 498 SET_PSTATE_NEGOTIATE(param);
499 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { 499 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
500 if (iser == false) 500 if (!iser)
501 SET_PSTATE_NEGOTIATE(param); 501 SET_PSTATE_NEGOTIATE(param);
502 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { 502 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
503 continue; 503 continue;
@@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate(
528 } else if (!strcmp(param->name, OFMARKINT)) { 528 } else if (!strcmp(param->name, OFMARKINT)) {
529 SET_PSTATE_NEGOTIATE(param); 529 SET_PSTATE_NEGOTIATE(param);
530 } else if (!strcmp(param->name, RDMAEXTENSIONS)) { 530 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
531 if (iser == true) 531 if (iser)
532 SET_PSTATE_NEGOTIATE(param); 532 SET_PSTATE_NEGOTIATE(param);
533 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 533 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
534 if (iser == true) 534 if (iser)
535 SET_PSTATE_NEGOTIATE(param); 535 SET_PSTATE_NEGOTIATE(param);
536 } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { 536 } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
537 if (iser == true) 537 if (iser)
538 SET_PSTATE_NEGOTIATE(param); 538 SET_PSTATE_NEGOTIATE(param);
539 } 539 }
540 } 540 }
@@ -1605,7 +1605,7 @@ int iscsi_decode_text_input(
1605 1605
1606 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1606 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1607 if (!tmpbuf) { 1607 if (!tmpbuf) {
1608 pr_err("Unable to allocate memory for tmpbuf.\n"); 1608 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
1609 return -1; 1609 return -1;
1610 } 1610 }
1611 1611
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 1431e8400d28..c3cb5c15efda 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -189,7 +189,7 @@ static void iscsit_clear_tpg_np_login_thread(
189 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 189 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
190} 190}
191 191
192void iscsit_clear_tpg_np_login_threads( 192static void iscsit_clear_tpg_np_login_threads(
193 struct iscsi_portal_group *tpg, 193 struct iscsi_portal_group *tpg,
194 bool shutdown) 194 bool shutdown)
195{ 195{
@@ -276,8 +276,6 @@ int iscsit_tpg_del_portal_group(
276 tpg->tpg_state = TPG_STATE_INACTIVE; 276 tpg->tpg_state = TPG_STATE_INACTIVE;
277 spin_unlock(&tpg->tpg_state_lock); 277 spin_unlock(&tpg->tpg_state_lock);
278 278
279 iscsit_clear_tpg_np_login_threads(tpg, true);
280
281 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 279 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
282 pr_err("Unable to delete iSCSI Target Portal Group:" 280 pr_err("Unable to delete iSCSI Target Portal Group:"
283 " %hu while active sessions exist, and force=0\n", 281 " %hu while active sessions exist, and force=0\n",
@@ -453,7 +451,7 @@ static bool iscsit_tpg_check_network_portal(
453 451
454 match = iscsit_check_np_match(sockaddr, np, 452 match = iscsit_check_np_match(sockaddr, np,
455 network_transport); 453 network_transport);
456 if (match == true) 454 if (match)
457 break; 455 break;
458 } 456 }
459 spin_unlock(&tpg->tpg_np_lock); 457 spin_unlock(&tpg->tpg_np_lock);
@@ -475,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
475 473
476 if (!tpg_np_parent) { 474 if (!tpg_np_parent) {
477 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, 475 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
478 network_transport) == true) { 476 network_transport)) {
479 pr_err("Network Portal: %s already exists on a" 477 pr_err("Network Portal: %s already exists on a"
480 " different TPG on %s\n", ip_str, 478 " different TPG on %s\n", ip_str,
481 tpg->tpg_tiqn->tiqn); 479 tpg->tpg_tiqn->tiqn);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 0a182f2aa8a2..e7265337bc43 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
8 struct iscsi_np *, struct iscsi_tpg_np **); 8 struct iscsi_np *, struct iscsi_tpg_np **);
9extern int iscsit_get_tpg(struct iscsi_portal_group *); 9extern int iscsit_get_tpg(struct iscsi_portal_group *);
10extern void iscsit_put_tpg(struct iscsi_portal_group *); 10extern void iscsit_put_tpg(struct iscsi_portal_group *);
11extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);
12extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); 11extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
13extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); 12extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
14extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, 13extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 73ab75ddaf42..6d2f37578b29 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
179 struct tcm_loop_hba *tl_hba; 179 struct tcm_loop_hba *tl_hba;
180 struct tcm_loop_tpg *tl_tpg; 180 struct tcm_loop_tpg *tl_tpg;
181 struct scatterlist *sgl_bidi = NULL; 181 struct scatterlist *sgl_bidi = NULL;
182 u32 sgl_bidi_count = 0; 182 u32 sgl_bidi_count = 0, transfer_length;
183 int rc; 183 int rc;
184 184
185 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 185 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
@@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work)
213 213
214 } 214 }
215 215
216 if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) 216 transfer_length = scsi_transfer_length(sc);
217 if (!scsi_prot_sg_count(sc) &&
218 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
217 se_cmd->prot_pto = true; 219 se_cmd->prot_pto = true;
220 /*
221 * loopback transport doesn't support
222 * WRITE_GENERATE, READ_STRIP protection
223 * information operations, go ahead unprotected.
224 */
225 transfer_length = scsi_bufflen(sc);
226 }
218 227
219 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 228 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
220 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 229 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
221 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 230 transfer_length, tcm_loop_sam_attr(sc),
222 sc->sc_data_direction, 0, 231 sc->sc_data_direction, 0,
223 scsi_sglist(sc), scsi_sg_count(sc), 232 scsi_sglist(sc), scsi_sg_count(sc),
224 sgl_bidi, sgl_bidi_count, 233 sgl_bidi, sgl_bidi_count,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index e0229592ec55..bd78d9235ac6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
81 transport_kunmap_data_sg(cmd); 81 transport_kunmap_data_sg(cmd);
82 } 82 }
83 83
84 target_complete_cmd(cmd, GOOD); 84 target_complete_cmd_with_length(cmd, GOOD, 8);
85 return 0; 85 return 0;
86} 86}
87 87
@@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
137 transport_kunmap_data_sg(cmd); 137 transport_kunmap_data_sg(cmd);
138 } 138 }
139 139
140 target_complete_cmd(cmd, GOOD); 140 target_complete_cmd_with_length(cmd, GOOD, 32);
141 return 0; 141 return 0;
142} 142}
143 143
@@ -176,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
176 return cmd->se_dev->dev_attrib.block_size * sectors; 176 return cmd->se_dev->dev_attrib.block_size * sectors;
177} 177}
178 178
179static int sbc_check_valid_sectors(struct se_cmd *cmd)
180{
181 struct se_device *dev = cmd->se_dev;
182 unsigned long long end_lba;
183 u32 sectors;
184
185 sectors = cmd->data_length / dev->dev_attrib.block_size;
186 end_lba = dev->transport->get_blocks(dev) + 1;
187
188 if (cmd->t_task_lba + sectors > end_lba) {
189 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
190 cmd->t_task_lba, sectors, end_lba);
191 return -EINVAL;
192 }
193
194 return 0;
195}
196
197static inline u32 transport_get_sectors_6(unsigned char *cdb) 179static inline u32 transport_get_sectors_6(unsigned char *cdb)
198{ 180{
199 /* 181 /*
@@ -665,8 +647,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
665 647
666 cmd->prot_type = dev->dev_attrib.pi_prot_type; 648 cmd->prot_type = dev->dev_attrib.pi_prot_type;
667 cmd->prot_length = dev->prot_length * sectors; 649 cmd->prot_length = dev->prot_length * sectors;
668 pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n", 650
669 __func__, cmd->prot_type, cmd->prot_length, 651 /**
652 * In case protection information exists over the wire
653 * we modify command data length to describe pure data.
654 * The actual transfer length is data length + protection
655 * length
656 **/
657 if (protect)
658 cmd->data_length = sectors * dev->dev_attrib.block_size;
659
660 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
661 "prot_op=%d prot_checks=%d\n",
662 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
670 cmd->prot_op, cmd->prot_checks); 663 cmd->prot_op, cmd->prot_checks);
671 664
672 return true; 665 return true;
@@ -877,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
877 break; 870 break;
878 case SYNCHRONIZE_CACHE: 871 case SYNCHRONIZE_CACHE:
879 case SYNCHRONIZE_CACHE_16: 872 case SYNCHRONIZE_CACHE_16:
880 if (!ops->execute_sync_cache) {
881 size = 0;
882 cmd->execute_cmd = sbc_emulate_noop;
883 break;
884 }
885
886 /*
887 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
888 */
889 if (cdb[0] == SYNCHRONIZE_CACHE) { 873 if (cdb[0] == SYNCHRONIZE_CACHE) {
890 sectors = transport_get_sectors_10(cdb); 874 sectors = transport_get_sectors_10(cdb);
891 cmd->t_task_lba = transport_lba_32(cdb); 875 cmd->t_task_lba = transport_lba_32(cdb);
@@ -893,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
893 sectors = transport_get_sectors_16(cdb); 877 sectors = transport_get_sectors_16(cdb);
894 cmd->t_task_lba = transport_lba_64(cdb); 878 cmd->t_task_lba = transport_lba_64(cdb);
895 } 879 }
896 880 if (ops->execute_sync_cache) {
897 size = sbc_get_size(cmd, sectors); 881 cmd->execute_cmd = ops->execute_sync_cache;
898 882 goto check_lba;
899 /*
900 * Check to ensure that LBA + Range does not exceed past end of
901 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
902 */
903 if (cmd->t_task_lba || sectors) {
904 if (sbc_check_valid_sectors(cmd) < 0)
905 return TCM_ADDRESS_OUT_OF_RANGE;
906 } 883 }
907 cmd->execute_cmd = ops->execute_sync_cache; 884 size = 0;
885 cmd->execute_cmd = sbc_emulate_noop;
908 break; 886 break;
909 case UNMAP: 887 case UNMAP:
910 if (!ops->execute_unmap) 888 if (!ops->execute_unmap)
@@ -947,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
947 break; 925 break;
948 case VERIFY: 926 case VERIFY:
949 size = 0; 927 size = 0;
928 sectors = transport_get_sectors_10(cdb);
929 cmd->t_task_lba = transport_lba_32(cdb);
950 cmd->execute_cmd = sbc_emulate_noop; 930 cmd->execute_cmd = sbc_emulate_noop;
951 break; 931 goto check_lba;
952 case REZERO_UNIT: 932 case REZERO_UNIT:
953 case SEEK_6: 933 case SEEK_6:
954 case SEEK_10: 934 case SEEK_10:
@@ -988,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
988 dev->dev_attrib.hw_max_sectors); 968 dev->dev_attrib.hw_max_sectors);
989 return TCM_INVALID_CDB_FIELD; 969 return TCM_INVALID_CDB_FIELD;
990 } 970 }
991 971check_lba:
992 end_lba = dev->transport->get_blocks(dev) + 1; 972 end_lba = dev->transport->get_blocks(dev) + 1;
993 if (cmd->t_task_lba + sectors > end_lba) { 973 if (cmd->t_task_lba + sectors > end_lba) {
994 pr_err("cmd exceeds last lba %llu " 974 pr_err("cmd exceeds last lba %llu "
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 8653666612a8..6cd7222738fc 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -129,15 +129,10 @@ static sense_reason_t
129spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 129spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
130{ 130{
131 struct se_device *dev = cmd->se_dev; 131 struct se_device *dev = cmd->se_dev;
132 u16 len = 0; 132 u16 len;
133 133
134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
135 u32 unit_serial_len; 135 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
136
137 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
138 unit_serial_len++; /* For NULL Terminator */
139
140 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
141 len++; /* Extra Byte for NULL Terminator */ 136 len++; /* Extra Byte for NULL Terminator */
142 buf[3] = len; 137 buf[3] = len;
143 } 138 }
@@ -721,6 +716,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
721 unsigned char *buf; 716 unsigned char *buf;
722 sense_reason_t ret; 717 sense_reason_t ret;
723 int p; 718 int p;
719 int len = 0;
724 720
725 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 721 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
726 if (!buf) { 722 if (!buf) {
@@ -742,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
742 } 738 }
743 739
744 ret = spc_emulate_inquiry_std(cmd, buf); 740 ret = spc_emulate_inquiry_std(cmd, buf);
741 len = buf[4] + 5;
745 goto out; 742 goto out;
746 } 743 }
747 744
@@ -749,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
749 if (cdb[2] == evpd_handlers[p].page) { 746 if (cdb[2] == evpd_handlers[p].page) {
750 buf[1] = cdb[2]; 747 buf[1] = cdb[2];
751 ret = evpd_handlers[p].emulate(cmd, buf); 748 ret = evpd_handlers[p].emulate(cmd, buf);
749 len = get_unaligned_be16(&buf[2]) + 4;
752 goto out; 750 goto out;
753 } 751 }
754 } 752 }
@@ -765,7 +763,7 @@ out:
765 kfree(buf); 763 kfree(buf);
766 764
767 if (!ret) 765 if (!ret)
768 target_complete_cmd(cmd, GOOD); 766 target_complete_cmd_with_length(cmd, GOOD, len);
769 return ret; 767 return ret;
770} 768}
771 769
@@ -1103,7 +1101,7 @@ set_length:
1103 transport_kunmap_data_sg(cmd); 1101 transport_kunmap_data_sg(cmd);
1104 } 1102 }
1105 1103
1106 target_complete_cmd(cmd, GOOD); 1104 target_complete_cmd_with_length(cmd, GOOD, length);
1107 return 0; 1105 return 0;
1108} 1106}
1109 1107
@@ -1279,7 +1277,7 @@ done:
1279 buf[3] = (lun_count & 0xff); 1277 buf[3] = (lun_count & 0xff);
1280 transport_kunmap_data_sg(cmd); 1278 transport_kunmap_data_sg(cmd);
1281 1279
1282 target_complete_cmd(cmd, GOOD); 1280 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1283 return 0; 1281 return 0;
1284} 1282}
1285EXPORT_SYMBOL(spc_emulate_report_luns); 1283EXPORT_SYMBOL(spc_emulate_report_luns);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2179feed0d63..7fa62fc93e0b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -504,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess)
504 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 504 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
505 * removal context. 505 * removal context.
506 */ 506 */
507 if (se_nacl && comp_nacl == true) 507 if (se_nacl && comp_nacl)
508 target_put_nacl(se_nacl); 508 target_put_nacl(se_nacl);
509 509
510 transport_free_session(se_sess); 510 transport_free_session(se_sess);
@@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
562 562
563 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 563 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
564 564
565 complete(&cmd->t_transport_stop_comp); 565 complete_all(&cmd->t_transport_stop_comp);
566 return 1; 566 return 1;
567 } 567 }
568 568
@@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
687 if (cmd->transport_state & CMD_T_ABORTED && 687 if (cmd->transport_state & CMD_T_ABORTED &&
688 cmd->transport_state & CMD_T_STOP) { 688 cmd->transport_state & CMD_T_STOP) {
689 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 689 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
690 complete(&cmd->t_transport_stop_comp); 690 complete_all(&cmd->t_transport_stop_comp);
691 return; 691 return;
692 } else if (!success) { 692 } else if (!success) {
693 INIT_WORK(&cmd->work, target_complete_failure_work); 693 INIT_WORK(&cmd->work, target_complete_failure_work);
@@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
703} 703}
704EXPORT_SYMBOL(target_complete_cmd); 704EXPORT_SYMBOL(target_complete_cmd);
705 705
706void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
707{
708 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
709 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
710 cmd->residual_count += cmd->data_length - length;
711 } else {
712 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
713 cmd->residual_count = cmd->data_length - length;
714 }
715
716 cmd->data_length = length;
717 }
718
719 target_complete_cmd(cmd, scsi_status);
720}
721EXPORT_SYMBOL(target_complete_cmd_with_length);
722
706static void target_add_to_state_list(struct se_cmd *cmd) 723static void target_add_to_state_list(struct se_cmd *cmd)
707{ 724{
708 struct se_device *dev = cmd->se_dev; 725 struct se_device *dev = cmd->se_dev;
@@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd)
1761 cmd->se_tfo->get_task_tag(cmd)); 1778 cmd->se_tfo->get_task_tag(cmd));
1762 1779
1763 spin_unlock_irq(&cmd->t_state_lock); 1780 spin_unlock_irq(&cmd->t_state_lock);
1764 complete(&cmd->t_transport_stop_comp); 1781 complete_all(&cmd->t_transport_stop_comp);
1765 return; 1782 return;
1766 } 1783 }
1767 1784
@@ -2363,7 +2380,7 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2363 * fabric acknowledgement that requires two target_put_sess_cmd() 2380 * fabric acknowledgement that requires two target_put_sess_cmd()
2364 * invocations before se_cmd descriptor release. 2381 * invocations before se_cmd descriptor release.
2365 */ 2382 */
2366 if (ack_kref == true) { 2383 if (ack_kref) {
2367 kref_get(&se_cmd->cmd_kref); 2384 kref_get(&se_cmd->cmd_kref);
2368 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2385 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2369 } 2386 }
@@ -2407,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)
2407 */ 2424 */
2408int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2425int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2409{ 2426{
2427 if (!se_sess) {
2428 se_cmd->se_tfo->release_cmd(se_cmd);
2429 return 1;
2430 }
2410 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2431 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2411 &se_sess->sess_cmd_lock); 2432 &se_sess->sess_cmd_lock);
2412} 2433}
@@ -2934,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)
2934int transport_generic_handle_tmr( 2955int transport_generic_handle_tmr(
2935 struct se_cmd *cmd) 2956 struct se_cmd *cmd)
2936{ 2957{
2958 unsigned long flags;
2959
2960 spin_lock_irqsave(&cmd->t_state_lock, flags);
2961 cmd->transport_state |= CMD_T_ACTIVE;
2962 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2963
2937 INIT_WORK(&cmd->work, target_tmr_work); 2964 INIT_WORK(&cmd->work, target_tmr_work);
2938 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2965 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
2939 return 0; 2966 return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 669c536fd959..e9186cdf35e9 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -70,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
70 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 70 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
71 int rc; 71 int rc;
72 72
73 if (src == true) 73 if (src)
74 dev_wwn = &xop->dst_tid_wwn[0]; 74 dev_wwn = &xop->dst_tid_wwn[0];
75 else 75 else
76 dev_wwn = &xop->src_tid_wwn[0]; 76 dev_wwn = &xop->src_tid_wwn[0];
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
88 if (rc != 0) 88 if (rc != 0)
89 continue; 89 continue;
90 90
91 if (src == true) { 91 if (src) {
92 xop->dst_dev = se_dev; 92 xop->dst_dev = se_dev;
93 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" 93 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
94 " se_dev\n", xop->dst_dev); 94 " se_dev\n", xop->dst_dev);
@@ -166,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
166 return -EINVAL; 166 return -EINVAL;
167 } 167 }
168 168
169 if (src == true) { 169 if (src) {
170 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 170 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
171 /* 171 /*
172 * Determine if the source designator matches the local device 172 * Determine if the source designator matches the local device
@@ -236,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
236 /* 236 /*
237 * Assume target descriptors are in source -> destination order.. 237 * Assume target descriptors are in source -> destination order..
238 */ 238 */
239 if (src == true) 239 if (src)
240 src = false; 240 src = false;
241 else 241 else
242 src = true; 242 src = true;
@@ -560,7 +560,7 @@ static int target_xcopy_init_pt_lun(
560 * reservations. The pt_cmd->se_lun pointer will be setup from within 560 * reservations. The pt_cmd->se_lun pointer will be setup from within
561 * target_xcopy_setup_pt_port() 561 * target_xcopy_setup_pt_port()
562 */ 562 */
563 if (remote_port == false) { 563 if (!remote_port) {
564 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 564 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
565 return 0; 565 return 0;
566 } 566 }
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index f5fd515b2bee..be0c0d08c56a 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
128 struct fc_lport *lport; 128 struct fc_lport *lport;
129 struct fc_exch *ep; 129 struct fc_exch *ep;
130 size_t len; 130 size_t len;
131 int rc;
131 132
132 if (cmd->aborted) 133 if (cmd->aborted)
133 return 0; 134 return 0;
@@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd)
137 len = sizeof(*fcp) + se_cmd->scsi_sense_length; 138 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
138 fp = fc_frame_alloc(lport, len); 139 fp = fc_frame_alloc(lport, len);
139 if (!fp) { 140 if (!fp) {
140 /* XXX shouldn't just drop it - requeue and retry? */ 141 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
141 return 0; 142 return -ENOMEM;
142 } 143 }
144
143 fcp = fc_frame_payload_get(fp, len); 145 fcp = fc_frame_payload_get(fp, len);
144 memset(fcp, 0, len); 146 memset(fcp, 0, len);
145 fcp->resp.fr_status = se_cmd->scsi_status; 147 fcp->resp.fr_status = se_cmd->scsi_status;
@@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd)
170 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, 172 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
171 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); 173 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
172 174
173 lport->tt.seq_send(lport, cmd->seq, fp); 175 rc = lport->tt.seq_send(lport, cmd->seq, fp);
176 if (rc) {
177 pr_info_ratelimited("%s: Failed to send response frame %p, "
178 "xid <0x%x>\n", __func__, fp, ep->xid);
179 /*
180 * Generate a TASK_SET_FULL status to notify the initiator
181 * to reduce it's queue_depth after the se_cmd response has
182 * been re-queued by target-core.
183 */
184 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
185 return -ENOMEM;
186 }
174 lport->tt.exch_done(cmd->seq); 187 lport->tt.exch_done(cmd->seq);
175 return 0; 188 return 0;
176} 189}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index e415af32115a..97b486c3dda1 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
82 82
83 if (cmd->aborted) 83 if (cmd->aborted)
84 return 0; 84 return 0;
85
86 if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
87 goto queue_status;
88
85 ep = fc_seq_exch(cmd->seq); 89 ep = fc_seq_exch(cmd->seq);
86 lport = ep->lp; 90 lport = ep->lp;
87 cmd->seq = lport->tt.seq_start_next(cmd->seq); 91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
@@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
178 FC_TYPE_FCP, f_ctl, fh_off); 182 FC_TYPE_FCP, f_ctl, fh_off);
179 error = lport->tt.seq_send(lport, seq, fp); 183 error = lport->tt.seq_send(lport, seq, fp);
180 if (error) { 184 if (error) {
181 /* XXX For now, initiator will retry */ 185 pr_info_ratelimited("%s: Failed to send frame %p, "
182 pr_err_ratelimited("%s: Failed to send frame %p, "
183 "xid <0x%x>, remaining %zu, " 186 "xid <0x%x>, remaining %zu, "
184 "lso_max <0x%x>\n", 187 "lso_max <0x%x>\n",
185 __func__, fp, ep->xid, 188 __func__, fp, ep->xid,
186 remaining, lport->lso_max); 189 remaining, lport->lso_max);
190 /*
191 * Go ahead and set TASK_SET_FULL status ignoring the
192 * rest of the DataIN, and immediately attempt to
193 * send the response via ft_queue_status() in order
194 * to notify the initiator that it should reduce it's
195 * per LUN queue_depth.
196 */
197 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
198 break;
187 } 199 }
188 } 200 }
201queue_status:
189 return ft_queue_status(se_cmd); 202 return ft_queue_status(se_cmd);
190} 203}
191 204
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e9c280f55819..4f4ffa4c604e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,7 +57,8 @@
57#define TCM_VHOST_MAX_CDB_SIZE 32 57#define TCM_VHOST_MAX_CDB_SIZE 32
58#define TCM_VHOST_DEFAULT_TAGS 256 58#define TCM_VHOST_DEFAULT_TAGS 256
59#define TCM_VHOST_PREALLOC_SGLS 2048 59#define TCM_VHOST_PREALLOC_SGLS 2048
60#define TCM_VHOST_PREALLOC_PAGES 2048 60#define TCM_VHOST_PREALLOC_UPAGES 2048
61#define TCM_VHOST_PREALLOC_PROT_SGLS 512
61 62
62struct vhost_scsi_inflight { 63struct vhost_scsi_inflight {
63 /* Wait for the flush operation to finish */ 64 /* Wait for the flush operation to finish */
@@ -79,10 +80,12 @@ struct tcm_vhost_cmd {
79 u64 tvc_tag; 80 u64 tvc_tag;
80 /* The number of scatterlists associated with this cmd */ 81 /* The number of scatterlists associated with this cmd */
81 u32 tvc_sgl_count; 82 u32 tvc_sgl_count;
83 u32 tvc_prot_sgl_count;
82 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ 84 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
83 u32 tvc_lun; 85 u32 tvc_lun;
84 /* Pointer to the SGL formatted memory from virtio-scsi */ 86 /* Pointer to the SGL formatted memory from virtio-scsi */
85 struct scatterlist *tvc_sgl; 87 struct scatterlist *tvc_sgl;
88 struct scatterlist *tvc_prot_sgl;
86 struct page **tvc_upages; 89 struct page **tvc_upages;
87 /* Pointer to response */ 90 /* Pointer to response */
88 struct virtio_scsi_cmd_resp __user *tvc_resp; 91 struct virtio_scsi_cmd_resp __user *tvc_resp;
@@ -166,7 +169,8 @@ enum {
166}; 169};
167 170
168enum { 171enum {
169 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) 172 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173 (1ULL << VIRTIO_SCSI_F_T10_PI)
170}; 174};
171 175
172#define VHOST_SCSI_MAX_TARGET 256 176#define VHOST_SCSI_MAX_TARGET 256
@@ -456,12 +460,16 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
456 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 460 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
457 struct tcm_vhost_cmd, tvc_se_cmd); 461 struct tcm_vhost_cmd, tvc_se_cmd);
458 struct se_session *se_sess = se_cmd->se_sess; 462 struct se_session *se_sess = se_cmd->se_sess;
463 int i;
459 464
460 if (tv_cmd->tvc_sgl_count) { 465 if (tv_cmd->tvc_sgl_count) {
461 u32 i;
462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 466 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 467 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
464 } 468 }
469 if (tv_cmd->tvc_prot_sgl_count) {
470 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
471 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
472 }
465 473
466 tcm_vhost_put_inflight(tv_cmd->inflight); 474 tcm_vhost_put_inflight(tv_cmd->inflight);
467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 475 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -713,16 +721,14 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
713} 721}
714 722
715static struct tcm_vhost_cmd * 723static struct tcm_vhost_cmd *
716vhost_scsi_get_tag(struct vhost_virtqueue *vq, 724vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
717 struct tcm_vhost_tpg *tpg, 725 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
718 struct virtio_scsi_cmd_req *v_req, 726 u32 exp_data_len, int data_direction)
719 u32 exp_data_len,
720 int data_direction)
721{ 727{
722 struct tcm_vhost_cmd *cmd; 728 struct tcm_vhost_cmd *cmd;
723 struct tcm_vhost_nexus *tv_nexus; 729 struct tcm_vhost_nexus *tv_nexus;
724 struct se_session *se_sess; 730 struct se_session *se_sess;
725 struct scatterlist *sg; 731 struct scatterlist *sg, *prot_sg;
726 struct page **pages; 732 struct page **pages;
727 int tag; 733 int tag;
728 734
@@ -741,19 +747,24 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
741 747
742 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 748 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
743 sg = cmd->tvc_sgl; 749 sg = cmd->tvc_sgl;
750 prot_sg = cmd->tvc_prot_sgl;
744 pages = cmd->tvc_upages; 751 pages = cmd->tvc_upages;
745 memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); 752 memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
746 753
747 cmd->tvc_sgl = sg; 754 cmd->tvc_sgl = sg;
755 cmd->tvc_prot_sgl = prot_sg;
748 cmd->tvc_upages = pages; 756 cmd->tvc_upages = pages;
749 cmd->tvc_se_cmd.map_tag = tag; 757 cmd->tvc_se_cmd.map_tag = tag;
750 cmd->tvc_tag = v_req->tag; 758 cmd->tvc_tag = scsi_tag;
751 cmd->tvc_task_attr = v_req->task_attr; 759 cmd->tvc_lun = lun;
760 cmd->tvc_task_attr = task_attr;
752 cmd->tvc_exp_data_len = exp_data_len; 761 cmd->tvc_exp_data_len = exp_data_len;
753 cmd->tvc_data_direction = data_direction; 762 cmd->tvc_data_direction = data_direction;
754 cmd->tvc_nexus = tv_nexus; 763 cmd->tvc_nexus = tv_nexus;
755 cmd->inflight = tcm_vhost_get_inflight(vq); 764 cmd->inflight = tcm_vhost_get_inflight(vq);
756 765
766 memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
767
757 return cmd; 768 return cmd;
758} 769}
759 770
@@ -767,35 +778,28 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
767 struct scatterlist *sgl, 778 struct scatterlist *sgl,
768 unsigned int sgl_count, 779 unsigned int sgl_count,
769 struct iovec *iov, 780 struct iovec *iov,
770 int write) 781 struct page **pages,
782 bool write)
771{ 783{
772 unsigned int npages = 0, pages_nr, offset, nbytes; 784 unsigned int npages = 0, pages_nr, offset, nbytes;
773 struct scatterlist *sg = sgl; 785 struct scatterlist *sg = sgl;
774 void __user *ptr = iov->iov_base; 786 void __user *ptr = iov->iov_base;
775 size_t len = iov->iov_len; 787 size_t len = iov->iov_len;
776 struct page **pages;
777 int ret, i; 788 int ret, i;
778 789
779 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
780 pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
781 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
782 sgl_count, TCM_VHOST_PREALLOC_SGLS);
783 return -ENOBUFS;
784 }
785
786 pages_nr = iov_num_pages(iov); 790 pages_nr = iov_num_pages(iov);
787 if (pages_nr > sgl_count) 791 if (pages_nr > sgl_count) {
792 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
793 " sgl_count: %u\n", pages_nr, sgl_count);
788 return -ENOBUFS; 794 return -ENOBUFS;
789 795 }
790 if (pages_nr > TCM_VHOST_PREALLOC_PAGES) { 796 if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
791 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 797 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
792 " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n", 798 " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
793 pages_nr, TCM_VHOST_PREALLOC_PAGES); 799 pages_nr, TCM_VHOST_PREALLOC_UPAGES);
794 return -ENOBUFS; 800 return -ENOBUFS;
795 } 801 }
796 802
797 pages = tv_cmd->tvc_upages;
798
799 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 803 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
800 /* No pages were pinned */ 804 /* No pages were pinned */
801 if (ret < 0) 805 if (ret < 0)
@@ -825,33 +829,32 @@ out:
825static int 829static int
826vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, 830vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
827 struct iovec *iov, 831 struct iovec *iov,
828 unsigned int niov, 832 int niov,
829 int write) 833 bool write)
830{ 834{
831 int ret; 835 struct scatterlist *sg = cmd->tvc_sgl;
832 unsigned int i; 836 unsigned int sgl_count = 0;
833 u32 sgl_count; 837 int ret, i;
834 struct scatterlist *sg;
835 838
836 /*
837 * Find out how long sglist needs to be
838 */
839 sgl_count = 0;
840 for (i = 0; i < niov; i++) 839 for (i = 0; i < niov; i++)
841 sgl_count += iov_num_pages(&iov[i]); 840 sgl_count += iov_num_pages(&iov[i]);
842 841
843 /* TODO overflow checking */ 842 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
843 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
844 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
845 sgl_count, TCM_VHOST_PREALLOC_SGLS);
846 return -ENOBUFS;
847 }
844 848
845 sg = cmd->tvc_sgl;
846 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); 849 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
847 sg_init_table(sg, sgl_count); 850 sg_init_table(sg, sgl_count);
848
849 cmd->tvc_sgl_count = sgl_count; 851 cmd->tvc_sgl_count = sgl_count;
850 852
851 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 853 pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
854
852 for (i = 0; i < niov; i++) { 855 for (i = 0; i < niov; i++) {
853 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], 856 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
854 write); 857 cmd->tvc_upages, write);
855 if (ret < 0) { 858 if (ret < 0) {
856 for (i = 0; i < cmd->tvc_sgl_count; i++) 859 for (i = 0; i < cmd->tvc_sgl_count; i++)
857 put_page(sg_page(&cmd->tvc_sgl[i])); 860 put_page(sg_page(&cmd->tvc_sgl[i]));
@@ -859,31 +862,70 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
859 cmd->tvc_sgl_count = 0; 862 cmd->tvc_sgl_count = 0;
860 return ret; 863 return ret;
861 } 864 }
862
863 sg += ret; 865 sg += ret;
864 sgl_count -= ret; 866 sgl_count -= ret;
865 } 867 }
866 return 0; 868 return 0;
867} 869}
868 870
871static int
872vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
873 struct iovec *iov,
874 int niov,
875 bool write)
876{
877 struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
878 unsigned int prot_sgl_count = 0;
879 int ret, i;
880
881 for (i = 0; i < niov; i++)
882 prot_sgl_count += iov_num_pages(&iov[i]);
883
884 if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
885 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
886 " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
887 prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
888 return -ENOBUFS;
889 }
890
891 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
892 prot_sg, prot_sgl_count);
893 sg_init_table(prot_sg, prot_sgl_count);
894 cmd->tvc_prot_sgl_count = prot_sgl_count;
895
896 for (i = 0; i < niov; i++) {
897 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
898 cmd->tvc_upages, write);
899 if (ret < 0) {
900 for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
901 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
902
903 cmd->tvc_prot_sgl_count = 0;
904 return ret;
905 }
906 prot_sg += ret;
907 prot_sgl_count -= ret;
908 }
909 return 0;
910}
911
869static void tcm_vhost_submission_work(struct work_struct *work) 912static void tcm_vhost_submission_work(struct work_struct *work)
870{ 913{
871 struct tcm_vhost_cmd *cmd = 914 struct tcm_vhost_cmd *cmd =
872 container_of(work, struct tcm_vhost_cmd, work); 915 container_of(work, struct tcm_vhost_cmd, work);
873 struct tcm_vhost_nexus *tv_nexus; 916 struct tcm_vhost_nexus *tv_nexus;
874 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 917 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
875 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; 918 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
876 int rc, sg_no_bidi = 0; 919 int rc;
877 920
921 /* FIXME: BIDI operation */
878 if (cmd->tvc_sgl_count) { 922 if (cmd->tvc_sgl_count) {
879 sg_ptr = cmd->tvc_sgl; 923 sg_ptr = cmd->tvc_sgl;
880/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ 924
881#if 0 925 if (cmd->tvc_prot_sgl_count)
882 if (se_cmd->se_cmd_flags & SCF_BIDI) { 926 sg_prot_ptr = cmd->tvc_prot_sgl;
883 sg_bidi_ptr = NULL; 927 else
884 sg_no_bidi = 0; 928 se_cmd->prot_pto = true;
885 }
886#endif
887 } else { 929 } else {
888 sg_ptr = NULL; 930 sg_ptr = NULL;
889 } 931 }
@@ -894,7 +936,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
894 cmd->tvc_lun, cmd->tvc_exp_data_len, 936 cmd->tvc_lun, cmd->tvc_exp_data_len,
895 cmd->tvc_task_attr, cmd->tvc_data_direction, 937 cmd->tvc_task_attr, cmd->tvc_data_direction,
896 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 938 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
897 sg_bidi_ptr, sg_no_bidi, NULL, 0); 939 NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
898 if (rc < 0) { 940 if (rc < 0) {
899 transport_send_check_condition_and_sense(se_cmd, 941 transport_send_check_condition_and_sense(se_cmd,
900 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 942 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -926,12 +968,18 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
926{ 968{
927 struct tcm_vhost_tpg **vs_tpg; 969 struct tcm_vhost_tpg **vs_tpg;
928 struct virtio_scsi_cmd_req v_req; 970 struct virtio_scsi_cmd_req v_req;
971 struct virtio_scsi_cmd_req_pi v_req_pi;
929 struct tcm_vhost_tpg *tpg; 972 struct tcm_vhost_tpg *tpg;
930 struct tcm_vhost_cmd *cmd; 973 struct tcm_vhost_cmd *cmd;
931 u32 exp_data_len, data_first, data_num, data_direction; 974 u64 tag;
975 u32 exp_data_len, data_first, data_num, data_direction, prot_first;
932 unsigned out, in, i; 976 unsigned out, in, i;
933 int head, ret; 977 int head, ret, data_niov, prot_niov, prot_bytes;
934 u8 target; 978 size_t req_size;
979 u16 lun;
980 u8 *target, *lunp, task_attr;
981 bool hdr_pi;
982 void *req, *cdb;
935 983
936 mutex_lock(&vq->mutex); 984 mutex_lock(&vq->mutex);
937 /* 985 /*
@@ -962,7 +1010,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
962 break; 1010 break;
963 } 1011 }
964 1012
965/* FIXME: BIDI operation */ 1013 /* FIXME: BIDI operation */
966 if (out == 1 && in == 1) { 1014 if (out == 1 && in == 1) {
967 data_direction = DMA_NONE; 1015 data_direction = DMA_NONE;
968 data_first = 0; 1016 data_first = 0;
@@ -992,29 +1040,38 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
992 break; 1040 break;
993 } 1041 }
994 1042
995 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { 1043 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
996 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" 1044 req = &v_req_pi;
997 " bytes\n", vq->iov[0].iov_len); 1045 lunp = &v_req_pi.lun[0];
1046 target = &v_req_pi.lun[1];
1047 req_size = sizeof(v_req_pi);
1048 hdr_pi = true;
1049 } else {
1050 req = &v_req;
1051 lunp = &v_req.lun[0];
1052 target = &v_req.lun[1];
1053 req_size = sizeof(v_req);
1054 hdr_pi = false;
1055 }
1056
1057 if (unlikely(vq->iov[0].iov_len < req_size)) {
1058 pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1059 req_size, vq->iov[0].iov_len);
998 break; 1060 break;
999 } 1061 }
1000 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," 1062 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1001 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
1002 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
1003 sizeof(v_req));
1004 if (unlikely(ret)) { 1063 if (unlikely(ret)) {
1005 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 1064 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1006 break; 1065 break;
1007 } 1066 }
1008 1067
1009 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1068 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1010 if (unlikely(v_req.lun[0] != 1)) { 1069 if (unlikely(*lunp != 1)) {
1011 vhost_scsi_send_bad_target(vs, vq, head, out); 1070 vhost_scsi_send_bad_target(vs, vq, head, out);
1012 continue; 1071 continue;
1013 } 1072 }
1014 1073
1015 /* Extract the tpgt */ 1074 tpg = ACCESS_ONCE(vs_tpg[*target]);
1016 target = v_req.lun[1];
1017 tpg = ACCESS_ONCE(vs_tpg[target]);
1018 1075
1019 /* Target does not exist, fail the request */ 1076 /* Target does not exist, fail the request */
1020 if (unlikely(!tpg)) { 1077 if (unlikely(!tpg)) {
@@ -1022,17 +1079,79 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1022 continue; 1079 continue;
1023 } 1080 }
1024 1081
1082 data_niov = data_num;
1083 prot_niov = prot_first = prot_bytes = 0;
1084 /*
1085 * Determine if any protection information iovecs are preceeding
1086 * the actual data payload, and adjust data_first + data_niov
1087 * values accordingly for vhost_scsi_map_iov_to_sgl() below.
1088 *
1089 * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
1090 */
1091 if (hdr_pi) {
1092 if (v_req_pi.pi_bytesout) {
1093 if (data_direction != DMA_TO_DEVICE) {
1094 vq_err(vq, "Received non zero do_pi_niov"
1095 ", but wrong data_direction\n");
1096 goto err_cmd;
1097 }
1098 prot_bytes = v_req_pi.pi_bytesout;
1099 } else if (v_req_pi.pi_bytesin) {
1100 if (data_direction != DMA_FROM_DEVICE) {
1101 vq_err(vq, "Received non zero di_pi_niov"
1102 ", but wrong data_direction\n");
1103 goto err_cmd;
1104 }
1105 prot_bytes = v_req_pi.pi_bytesin;
1106 }
1107 if (prot_bytes) {
1108 int tmp = 0;
1109
1110 for (i = 0; i < data_num; i++) {
1111 tmp += vq->iov[data_first + i].iov_len;
1112 prot_niov++;
1113 if (tmp >= prot_bytes)
1114 break;
1115 }
1116 prot_first = data_first;
1117 data_first += prot_niov;
1118 data_niov = data_num - prot_niov;
1119 }
1120 tag = v_req_pi.tag;
1121 task_attr = v_req_pi.task_attr;
1122 cdb = &v_req_pi.cdb[0];
1123 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1124 } else {
1125 tag = v_req.tag;
1126 task_attr = v_req.task_attr;
1127 cdb = &v_req.cdb[0];
1128 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1129 }
1025 exp_data_len = 0; 1130 exp_data_len = 0;
1026 for (i = 0; i < data_num; i++) 1131 for (i = 0; i < data_niov; i++)
1027 exp_data_len += vq->iov[data_first + i].iov_len; 1132 exp_data_len += vq->iov[data_first + i].iov_len;
1133 /*
1134 * Check that the recieved CDB size does not exceeded our
1135 * hardcoded max for vhost-scsi
1136 *
1137 * TODO what if cdb was too small for varlen cdb header?
1138 */
1139 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1140 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1141 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1142 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1143 goto err_cmd;
1144 }
1028 1145
1029 cmd = vhost_scsi_get_tag(vq, tpg, &v_req, 1146 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1030 exp_data_len, data_direction); 1147 exp_data_len + prot_bytes,
1148 data_direction);
1031 if (IS_ERR(cmd)) { 1149 if (IS_ERR(cmd)) {
1032 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1150 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1033 PTR_ERR(cmd)); 1151 PTR_ERR(cmd));
1034 goto err_cmd; 1152 goto err_cmd;
1035 } 1153 }
1154
1036 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 1155 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1037 ": %d\n", cmd, exp_data_len, data_direction); 1156 ": %d\n", cmd, exp_data_len, data_direction);
1038 1157
@@ -1040,40 +1159,28 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1040 cmd->tvc_vq = vq; 1159 cmd->tvc_vq = vq;
1041 cmd->tvc_resp = vq->iov[out].iov_base; 1160 cmd->tvc_resp = vq->iov[out].iov_base;
1042 1161
1043 /*
1044 * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1045 * that will be used by tcm_vhost_new_cmd_map() and down into
1046 * target_setup_cmd_from_cdb()
1047 */
1048 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1049 /*
1050 * Check that the recieved CDB size does not exceeded our
1051 * hardcoded max for tcm_vhost
1052 */
1053 /* TODO what if cdb was too small for varlen cdb header? */
1054 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1055 TCM_VHOST_MAX_CDB_SIZE)) {
1056 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1057 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1058 scsi_command_size(cmd->tvc_cdb),
1059 TCM_VHOST_MAX_CDB_SIZE);
1060 goto err_free;
1061 }
1062 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1063
1064 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1162 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1065 cmd->tvc_cdb[0], cmd->tvc_lun); 1163 cmd->tvc_cdb[0], cmd->tvc_lun);
1066 1164
1165 if (prot_niov) {
1166 ret = vhost_scsi_map_iov_to_prot(cmd,
1167 &vq->iov[prot_first], prot_niov,
1168 data_direction == DMA_FROM_DEVICE);
1169 if (unlikely(ret)) {
1170 vq_err(vq, "Failed to map iov to"
1171 " prot_sgl\n");
1172 goto err_free;
1173 }
1174 }
1067 if (data_direction != DMA_NONE) { 1175 if (data_direction != DMA_NONE) {
1068 ret = vhost_scsi_map_iov_to_sgl(cmd, 1176 ret = vhost_scsi_map_iov_to_sgl(cmd,
1069 &vq->iov[data_first], data_num, 1177 &vq->iov[data_first], data_niov,
1070 data_direction == DMA_FROM_DEVICE); 1178 data_direction == DMA_FROM_DEVICE);
1071 if (unlikely(ret)) { 1179 if (unlikely(ret)) {
1072 vq_err(vq, "Failed to map iov to sgl\n"); 1180 vq_err(vq, "Failed to map iov to sgl\n");
1073 goto err_free; 1181 goto err_free;
1074 } 1182 }
1075 } 1183 }
1076
1077 /* 1184 /*
1078 * Save the descriptor from vhost_get_vq_desc() to be used to 1185 * Save the descriptor from vhost_get_vq_desc() to be used to
1079 * complete the virtio-scsi request in TCM callback context via 1186 * complete the virtio-scsi request in TCM callback context via
@@ -1716,6 +1823,7 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1716 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; 1823 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1717 1824
1718 kfree(tv_cmd->tvc_sgl); 1825 kfree(tv_cmd->tvc_sgl);
1826 kfree(tv_cmd->tvc_prot_sgl);
1719 kfree(tv_cmd->tvc_upages); 1827 kfree(tv_cmd->tvc_upages);
1720 } 1828 }
1721} 1829}
@@ -1750,7 +1858,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1750 tv_nexus->tvn_se_sess = transport_init_session_tags( 1858 tv_nexus->tvn_se_sess = transport_init_session_tags(
1751 TCM_VHOST_DEFAULT_TAGS, 1859 TCM_VHOST_DEFAULT_TAGS,
1752 sizeof(struct tcm_vhost_cmd), 1860 sizeof(struct tcm_vhost_cmd),
1753 TARGET_PROT_NORMAL); 1861 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1754 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1862 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1755 mutex_unlock(&tpg->tv_tpg_mutex); 1863 mutex_unlock(&tpg->tv_tpg_mutex);
1756 kfree(tv_nexus); 1864 kfree(tv_nexus);
@@ -1769,12 +1877,20 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1769 } 1877 }
1770 1878
1771 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1879 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1772 TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL); 1880 TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1773 if (!tv_cmd->tvc_upages) { 1881 if (!tv_cmd->tvc_upages) {
1774 mutex_unlock(&tpg->tv_tpg_mutex); 1882 mutex_unlock(&tpg->tv_tpg_mutex);
1775 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1883 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1776 goto out; 1884 goto out;
1777 } 1885 }
1886
1887 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1888 TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
1889 if (!tv_cmd->tvc_prot_sgl) {
1890 mutex_unlock(&tpg->tv_tpg_mutex);
1891 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1892 goto out;
1893 }
1778 } 1894 }
1779 /* 1895 /*
1780 * Since we are running in 'demo mode' this call with generate a 1896 * Since we are running in 'demo mode' this call with generate a
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
index 4195b97a3def..de429d1f4357 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/linux/virtio_scsi.h
@@ -35,11 +35,23 @@ struct virtio_scsi_cmd_req {
35 u8 lun[8]; /* Logical Unit Number */ 35 u8 lun[8]; /* Logical Unit Number */
36 u64 tag; /* Command identifier */ 36 u64 tag; /* Command identifier */
37 u8 task_attr; /* Task attribute */ 37 u8 task_attr; /* Task attribute */
38 u8 prio; 38 u8 prio; /* SAM command priority field */
39 u8 crn; 39 u8 crn;
40 u8 cdb[VIRTIO_SCSI_CDB_SIZE]; 40 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
41} __packed; 41} __packed;
42 42
43/* SCSI command request, followed by protection information */
44struct virtio_scsi_cmd_req_pi {
45 u8 lun[8]; /* Logical Unit Number */
46 u64 tag; /* Command identifier */
47 u8 task_attr; /* Task attribute */
48 u8 prio; /* SAM command priority field */
49 u8 crn;
50 u32 pi_bytesout; /* DataOUT PI Number of bytes */
51 u32 pi_bytesin; /* DataIN PI Number of bytes */
52 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
53} __packed;
54
43/* Response, followed by sense data and data-in */ 55/* Response, followed by sense data and data-in */
44struct virtio_scsi_cmd_resp { 56struct virtio_scsi_cmd_resp {
45 u32 sense_len; /* Sense data length */ 57 u32 sense_len; /* Sense data length */
@@ -97,6 +109,7 @@ struct virtio_scsi_config {
97#define VIRTIO_SCSI_F_INOUT 0 109#define VIRTIO_SCSI_F_INOUT 0
98#define VIRTIO_SCSI_F_HOTPLUG 1 110#define VIRTIO_SCSI_F_HOTPLUG 1
99#define VIRTIO_SCSI_F_CHANGE 2 111#define VIRTIO_SCSI_F_CHANGE 2
112#define VIRTIO_SCSI_F_T10_PI 3
100 113
101/* Response codes */ 114/* Response codes */
102#define VIRTIO_SCSI_S_OK 0 115#define VIRTIO_SCSI_S_OK 0
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index e016e2ac38df..42ed789ebafc 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -7,6 +7,7 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <scsi/scsi_device.h>
10 11
11struct Scsi_Host; 12struct Scsi_Host;
12struct scsi_device; 13struct scsi_device;
@@ -315,4 +316,20 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
315 cmd->result = (cmd->result & 0x00ffffff) | (status << 24); 316 cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
316} 317}
317 318
319static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
320{
321 unsigned int xfer_len = blk_rq_bytes(scmd->request);
322 unsigned int prot_op = scsi_get_prot_op(scmd);
323 unsigned int sector_size = scmd->device->sector_size;
324
325 switch (prot_op) {
326 case SCSI_PROT_NORMAL:
327 case SCSI_PROT_WRITE_STRIP:
328 case SCSI_PROT_READ_INSERT:
329 return xfer_len;
330 }
331
332 return xfer_len + (xfer_len >> ilog2(sector_size)) * 8;
333}
334
318#endif /* _SCSI_SCSI_CMND_H */ 335#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 33b487b5da92..daef9daa500c 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
70extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, 70extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
71 struct iscsi_tm_rsp *); 71 struct iscsi_tm_rsp *);
72extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *, 72extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
73 struct iscsi_text_rsp *); 73 struct iscsi_text_rsp *,
74 enum iscsit_transport_type);
74extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, 75extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
75 struct iscsi_reject *); 76 struct iscsi_reject *);
76extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, 77extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 3a1c1eea1fff..9adc1bca1178 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *);
59void transport_subsystem_release(struct se_subsystem_api *); 59void transport_subsystem_release(struct se_subsystem_api *);
60 60
61void target_complete_cmd(struct se_cmd *, u8); 61void target_complete_cmd(struct se_cmd *, u8);
62void target_complete_cmd_with_length(struct se_cmd *, u8, int);
62 63
63sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); 64sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
64sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); 65sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);