aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-22 15:41:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-22 15:41:14 -0400
commit5266e5b12c8b73587130325f7074d2f49ef9e427 (patch)
tree79abfbf1b047de05860fa832f66fce5078cc6434
parentfc739eba99dc8655369a07ead098de9960e48fff (diff)
parent5e47f1985d7107331c3f64fb3ec83d66fd73577e (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - Add target_alloc_session() w/ callback helper for doing se_session allocation + tag + se_node_acl lookup. (HCH + nab) - Tree-wide fabric driver conversion to use target_alloc_session() - Convert sbp-target to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab) - Convert usb-gadget to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab) - Convert xen-scsiback to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab) - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs - Convert ib_srpt to use percpu_ida tag pre-allocation - Add DebugFS node for qla2xxx target sess list (Quinn) - Rework iser-target connection termination (Jenny + Sagi) - Convert iser-target to new CQ API (HCH) - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie) - Introduce data_bitmap for asynchronous access of data area (Sheng Yang + Andy) - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani) Also, there is a separate PULL request coming for cxgb4 NIC driver prerequisites for supporting hw iscsi segmentation offload (ISO), that will be the base for a number of v4.7 developments involving iscsi-target hw offloads" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits) target: Fix target_release_cmd_kref shutdown comp leak target: Avoid DataIN transfers for non-GOOD SAM status target/user: Report capability of handling out-of-order completions to userspace target/user: Fix size_t format-spec build warning target/user: Don't free expired command when time out target/user: Introduce data_bitmap, replace data_length/data_head/data_tail target/user: Free data ring in unified function target/user: Use iovec[] to describe continuous area target: Remove enum transport_lunflags_table target/iblock: pass WRITE_SAME to device if possible iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc iser-target: Kill struct isert_rdma_wr iser-target: Convert to new CQ API iser-target: Split and properly type the login buffer iser-target: Remove ISER_RECV_DATA_SEG_LEN iser-target: Remove impossible condition from isert_wait_conn iser-target: Remove redundant wait in release_conn iser-target: Rework connection termination iser-target: Separate flows for np listeners and connections cma events iser-target: Add new state ISER_CONN_BOUND to isert_conn ...
-rw-r--r--Documentation/target/tcmu-design.txt11
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c810
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h72
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c76
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c79
-rw-r--r--drivers/target/loopback/tcm_loop.c46
-rw-r--r--drivers/target/sbp/sbp_target.c95
-rw-r--r--drivers/target/target_core_device.c41
-rw-r--r--drivers/target/target_core_fabric_configfs.c32
-rw-r--r--drivers/target/target_core_iblock.c34
-rw-r--r--drivers/target/target_core_internal.h8
-rw-r--r--drivers/target/target_core_spc.c3
-rw-r--r--drivers/target/target_core_tpg.c21
-rw-r--r--drivers/target/target_core_transport.c66
-rw-r--r--drivers/target/target_core_user.c267
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c44
-rw-r--r--drivers/usb/gadget/function/f_tcm.c193
-rw-r--r--drivers/usb/gadget/function/tcm.h2
-rw-r--r--drivers/vhost/scsi.c99
-rw-r--r--drivers/xen/xen-scsiback.c281
-rw-r--r--include/target/target_core_base.h11
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/uapi/linux/target_core_user.h1
29 files changed, 1240 insertions, 1195 deletions
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt
index bef81e42788f..4cebc1ebf99a 100644
--- a/Documentation/target/tcmu-design.txt
+++ b/Documentation/target/tcmu-design.txt
@@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate
117when the commands are completed. 117when the commands are completed.
118 118
119version - 1 (userspace should abort if otherwise) 119version - 1 (userspace should abort if otherwise)
120flags - none yet defined. 120flags:
121- TCMU_MAILBOX_FLAG_CAP_OOOC: indicates out-of-order completion is
122 supported. See "The Command Ring" for details.
121cmdr_off - The offset of the start of the command ring from the start 123cmdr_off - The offset of the start of the command ring from the start
122of the memory region, to account for the mailbox size. 124of the memory region, to account for the mailbox size.
123cmdr_size - The size of the command ring. This does *not* need to be a 125cmdr_size - The size of the command ring. This does *not* need to be a
@@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments
162mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the 164mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
163kernel via the UIO method, a 4-byte write to the file descriptor. 165kernel via the UIO method, a 4-byte write to the file descriptor.
164 166
167If TCMU_MAILBOX_FLAG_CAP_OOOC is set for mailbox->flags, kernel is
168capable of handling out-of-order completions. In this case, userspace can
169handle command in different order other than original. Since kernel would
170still process the commands in the same order it appeared in the command
171ring, userspace need to update the cmd->id when completing the
172command(a.k.a steal the original command's entry).
173
165When the opcode is PAD, userspace only updates cmd_tail as above -- 174When the opcode is PAD, userspace only updates cmd_tail as above --
166it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry 175it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
167is contiguous within the command ring.) 176is contiguous within the command ring.)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f121e6129339..60b30d338a81 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -49,22 +49,25 @@ static struct workqueue_struct *isert_release_wq;
49static void 49static void
50isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 50isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
53 struct isert_rdma_wr *wr);
54static void 53static void
55isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 54isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
56static int 55static int
57isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 56isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
58 struct isert_rdma_wr *wr);
59static int 57static int
60isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 58isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
61static int 59static int
62isert_rdma_post_recvl(struct isert_conn *isert_conn); 60isert_login_post_recv(struct isert_conn *isert_conn);
63static int 61static int
64isert_rdma_accept(struct isert_conn *isert_conn); 62isert_rdma_accept(struct isert_conn *isert_conn);
65struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 63struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
66 64
67static void isert_release_work(struct work_struct *work); 65static void isert_release_work(struct work_struct *work);
66static void isert_wait4flush(struct isert_conn *isert_conn);
67static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
68static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
69static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
70static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
68 71
69static inline bool 72static inline bool
70isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) 73isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -177,12 +180,6 @@ err:
177 return ret; 180 return ret;
178} 181}
179 182
180static void
181isert_cq_event_callback(struct ib_event *e, void *context)
182{
183 isert_dbg("event: %d\n", e->event);
184}
185
186static int 183static int
187isert_alloc_rx_descriptors(struct isert_conn *isert_conn) 184isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
188{ 185{
@@ -212,6 +209,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
212 rx_sg->addr = rx_desc->dma_addr; 209 rx_sg->addr = rx_desc->dma_addr;
213 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 210 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
214 rx_sg->lkey = device->pd->local_dma_lkey; 211 rx_sg->lkey = device->pd->local_dma_lkey;
212 rx_desc->rx_cqe.done = isert_recv_done;
215 } 213 }
216 214
217 return 0; 215 return 0;
@@ -250,9 +248,6 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
250 isert_conn->rx_descs = NULL; 248 isert_conn->rx_descs = NULL;
251} 249}
252 250
253static void isert_cq_work(struct work_struct *);
254static void isert_cq_callback(struct ib_cq *, void *);
255
256static void 251static void
257isert_free_comps(struct isert_device *device) 252isert_free_comps(struct isert_device *device)
258{ 253{
@@ -261,10 +256,8 @@ isert_free_comps(struct isert_device *device)
261 for (i = 0; i < device->comps_used; i++) { 256 for (i = 0; i < device->comps_used; i++) {
262 struct isert_comp *comp = &device->comps[i]; 257 struct isert_comp *comp = &device->comps[i];
263 258
264 if (comp->cq) { 259 if (comp->cq)
265 cancel_work_sync(&comp->work); 260 ib_free_cq(comp->cq);
266 ib_destroy_cq(comp->cq);
267 }
268 } 261 }
269 kfree(device->comps); 262 kfree(device->comps);
270} 263}
@@ -293,28 +286,17 @@ isert_alloc_comps(struct isert_device *device)
293 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); 286 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
294 287
295 for (i = 0; i < device->comps_used; i++) { 288 for (i = 0; i < device->comps_used; i++) {
296 struct ib_cq_init_attr cq_attr = {};
297 struct isert_comp *comp = &device->comps[i]; 289 struct isert_comp *comp = &device->comps[i];
298 290
299 comp->device = device; 291 comp->device = device;
300 INIT_WORK(&comp->work, isert_cq_work); 292 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
301 cq_attr.cqe = max_cqe; 293 IB_POLL_WORKQUEUE);
302 cq_attr.comp_vector = i;
303 comp->cq = ib_create_cq(device->ib_device,
304 isert_cq_callback,
305 isert_cq_event_callback,
306 (void *)comp,
307 &cq_attr);
308 if (IS_ERR(comp->cq)) { 294 if (IS_ERR(comp->cq)) {
309 isert_err("Unable to allocate cq\n"); 295 isert_err("Unable to allocate cq\n");
310 ret = PTR_ERR(comp->cq); 296 ret = PTR_ERR(comp->cq);
311 comp->cq = NULL; 297 comp->cq = NULL;
312 goto out_cq; 298 goto out_cq;
313 } 299 }
314
315 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
316 if (ret)
317 goto out_cq;
318 } 300 }
319 301
320 return 0; 302 return 0;
@@ -582,7 +564,6 @@ isert_init_conn(struct isert_conn *isert_conn)
582 INIT_LIST_HEAD(&isert_conn->node); 564 INIT_LIST_HEAD(&isert_conn->node);
583 init_completion(&isert_conn->login_comp); 565 init_completion(&isert_conn->login_comp);
584 init_completion(&isert_conn->login_req_comp); 566 init_completion(&isert_conn->login_req_comp);
585 init_completion(&isert_conn->wait);
586 kref_init(&isert_conn->kref); 567 kref_init(&isert_conn->kref);
587 mutex_init(&isert_conn->mutex); 568 mutex_init(&isert_conn->mutex);
588 spin_lock_init(&isert_conn->pool_lock); 569 spin_lock_init(&isert_conn->pool_lock);
@@ -596,11 +577,13 @@ isert_free_login_buf(struct isert_conn *isert_conn)
596 struct ib_device *ib_dev = isert_conn->device->ib_device; 577 struct ib_device *ib_dev = isert_conn->device->ib_device;
597 578
598 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 579 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
599 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 580 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
581 kfree(isert_conn->login_rsp_buf);
582
600 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 583 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
601 ISCSI_DEF_MAX_RECV_SEG_LEN, 584 ISER_RX_PAYLOAD_SIZE,
602 DMA_FROM_DEVICE); 585 DMA_FROM_DEVICE);
603 kfree(isert_conn->login_buf); 586 kfree(isert_conn->login_req_buf);
604} 587}
605 588
606static int 589static int
@@ -609,50 +592,48 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
609{ 592{
610 int ret; 593 int ret;
611 594
612 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 595 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
613 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 596 GFP_KERNEL);
614 if (!isert_conn->login_buf) { 597 if (!isert_conn->login_req_buf) {
615 isert_err("Unable to allocate isert_conn->login_buf\n"); 598 isert_err("Unable to allocate isert_conn->login_buf\n");
616 return -ENOMEM; 599 return -ENOMEM;
617 } 600 }
618 601
619 isert_conn->login_req_buf = isert_conn->login_buf;
620 isert_conn->login_rsp_buf = isert_conn->login_buf +
621 ISCSI_DEF_MAX_RECV_SEG_LEN;
622
623 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
624 isert_conn->login_buf, isert_conn->login_req_buf,
625 isert_conn->login_rsp_buf);
626
627 isert_conn->login_req_dma = ib_dma_map_single(ib_dev, 602 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
628 (void *)isert_conn->login_req_buf, 603 isert_conn->login_req_buf,
629 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 604 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
630
631 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 605 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
632 if (ret) { 606 if (ret) {
633 isert_err("login_req_dma mapping error: %d\n", ret); 607 isert_err("login_req_dma mapping error: %d\n", ret);
634 isert_conn->login_req_dma = 0; 608 isert_conn->login_req_dma = 0;
635 goto out_login_buf; 609 goto out_free_login_req_buf;
636 } 610 }
637 611
638 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, 612 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
639 (void *)isert_conn->login_rsp_buf, 613 if (!isert_conn->login_rsp_buf) {
640 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 614 isert_err("Unable to allocate isert_conn->login_rspbuf\n");
615 goto out_unmap_login_req_buf;
616 }
641 617
618 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
619 isert_conn->login_rsp_buf,
620 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
642 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 621 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
643 if (ret) { 622 if (ret) {
644 isert_err("login_rsp_dma mapping error: %d\n", ret); 623 isert_err("login_rsp_dma mapping error: %d\n", ret);
645 isert_conn->login_rsp_dma = 0; 624 isert_conn->login_rsp_dma = 0;
646 goto out_req_dma_map; 625 goto out_free_login_rsp_buf;
647 } 626 }
648 627
649 return 0; 628 return 0;
650 629
651out_req_dma_map: 630out_free_login_rsp_buf:
631 kfree(isert_conn->login_rsp_buf);
632out_unmap_login_req_buf:
652 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, 633 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
653 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); 634 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
654out_login_buf: 635out_free_login_req_buf:
655 kfree(isert_conn->login_buf); 636 kfree(isert_conn->login_req_buf);
656 return ret; 637 return ret;
657} 638}
658 639
@@ -726,7 +707,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
726 if (ret) 707 if (ret)
727 goto out_conn_dev; 708 goto out_conn_dev;
728 709
729 ret = isert_rdma_post_recvl(isert_conn); 710 ret = isert_login_post_recv(isert_conn);
730 if (ret) 711 if (ret)
731 goto out_conn_dev; 712 goto out_conn_dev;
732 713
@@ -773,7 +754,7 @@ isert_connect_release(struct isert_conn *isert_conn)
773 ib_destroy_qp(isert_conn->qp); 754 ib_destroy_qp(isert_conn->qp);
774 } 755 }
775 756
776 if (isert_conn->login_buf) 757 if (isert_conn->login_req_buf)
777 isert_free_login_buf(isert_conn); 758 isert_free_login_buf(isert_conn);
778 759
779 isert_device_put(device); 760 isert_device_put(device);
@@ -820,12 +801,30 @@ isert_put_conn(struct isert_conn *isert_conn)
820 kref_put(&isert_conn->kref, isert_release_kref); 801 kref_put(&isert_conn->kref, isert_release_kref);
821} 802}
822 803
804static void
805isert_handle_unbound_conn(struct isert_conn *isert_conn)
806{
807 struct isert_np *isert_np = isert_conn->cm_id->context;
808
809 mutex_lock(&isert_np->mutex);
810 if (!list_empty(&isert_conn->node)) {
811 /*
812 * This means iscsi doesn't know this connection
813 * so schedule a cleanup ourselves
814 */
815 list_del_init(&isert_conn->node);
816 isert_put_conn(isert_conn);
817 queue_work(isert_release_wq, &isert_conn->release_work);
818 }
819 mutex_unlock(&isert_np->mutex);
820}
821
823/** 822/**
824 * isert_conn_terminate() - Initiate connection termination 823 * isert_conn_terminate() - Initiate connection termination
825 * @isert_conn: isert connection struct 824 * @isert_conn: isert connection struct
826 * 825 *
827 * Notes: 826 * Notes:
828 * In case the connection state is FULL_FEATURE, move state 827 * In case the connection state is BOUND, move state
829 * to TEMINATING and start teardown sequence (rdma_disconnect). 828 * to TEMINATING and start teardown sequence (rdma_disconnect).
830 * In case the connection state is UP, complete flush as well. 829 * In case the connection state is UP, complete flush as well.
831 * 830 *
@@ -837,23 +836,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
837{ 836{
838 int err; 837 int err;
839 838
840 switch (isert_conn->state) { 839 if (isert_conn->state >= ISER_CONN_TERMINATING)
841 case ISER_CONN_TERMINATING: 840 return;
842 break; 841
843 case ISER_CONN_UP: 842 isert_info("Terminating conn %p state %d\n",
844 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ 843 isert_conn, isert_conn->state);
845 isert_info("Terminating conn %p state %d\n", 844 isert_conn->state = ISER_CONN_TERMINATING;
846 isert_conn, isert_conn->state); 845 err = rdma_disconnect(isert_conn->cm_id);
847 isert_conn->state = ISER_CONN_TERMINATING; 846 if (err)
848 err = rdma_disconnect(isert_conn->cm_id); 847 isert_warn("Failed rdma_disconnect isert_conn %p\n",
849 if (err) 848 isert_conn);
850 isert_warn("Failed rdma_disconnect isert_conn %p\n",
851 isert_conn);
852 break;
853 default:
854 isert_warn("conn %p teminating in state %d\n",
855 isert_conn, isert_conn->state);
856 }
857} 849}
858 850
859static int 851static int
@@ -887,35 +879,27 @@ static int
887isert_disconnected_handler(struct rdma_cm_id *cma_id, 879isert_disconnected_handler(struct rdma_cm_id *cma_id,
888 enum rdma_cm_event_type event) 880 enum rdma_cm_event_type event)
889{ 881{
890 struct isert_np *isert_np = cma_id->context; 882 struct isert_conn *isert_conn = cma_id->qp->qp_context;
891 struct isert_conn *isert_conn;
892 bool terminating = false;
893
894 if (isert_np->cm_id == cma_id)
895 return isert_np_cma_handler(cma_id->context, event);
896
897 isert_conn = cma_id->qp->qp_context;
898 883
899 mutex_lock(&isert_conn->mutex); 884 mutex_lock(&isert_conn->mutex);
900 terminating = (isert_conn->state == ISER_CONN_TERMINATING); 885 switch (isert_conn->state) {
901 isert_conn_terminate(isert_conn); 886 case ISER_CONN_TERMINATING:
902 mutex_unlock(&isert_conn->mutex); 887 break;
903 888 case ISER_CONN_UP:
904 isert_info("conn %p completing wait\n", isert_conn); 889 isert_conn_terminate(isert_conn);
905 complete(&isert_conn->wait); 890 isert_wait4flush(isert_conn);
906 891 isert_handle_unbound_conn(isert_conn);
907 if (terminating) 892 break;
908 goto out; 893 case ISER_CONN_BOUND:
909 894 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
910 mutex_lock(&isert_np->mutex); 895 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
911 if (!list_empty(&isert_conn->node)) { 896 break;
912 list_del_init(&isert_conn->node); 897 default:
913 isert_put_conn(isert_conn); 898 isert_warn("conn %p teminating in state %d\n",
914 queue_work(isert_release_wq, &isert_conn->release_work); 899 isert_conn, isert_conn->state);
915 } 900 }
916 mutex_unlock(&isert_np->mutex); 901 mutex_unlock(&isert_conn->mutex);
917 902
918out:
919 return 0; 903 return 0;
920} 904}
921 905
@@ -934,12 +918,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
934static int 918static int
935isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 919isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
936{ 920{
921 struct isert_np *isert_np = cma_id->context;
937 int ret = 0; 922 int ret = 0;
938 923
939 isert_info("%s (%d): status %d id %p np %p\n", 924 isert_info("%s (%d): status %d id %p np %p\n",
940 rdma_event_msg(event->event), event->event, 925 rdma_event_msg(event->event), event->event,
941 event->status, cma_id, cma_id->context); 926 event->status, cma_id, cma_id->context);
942 927
928 if (isert_np->cm_id == cma_id)
929 return isert_np_cma_handler(cma_id->context, event->event);
930
943 switch (event->event) { 931 switch (event->event) {
944 case RDMA_CM_EVENT_CONNECT_REQUEST: 932 case RDMA_CM_EVENT_CONNECT_REQUEST:
945 ret = isert_connect_request(cma_id, event); 933 ret = isert_connect_request(cma_id, event);
@@ -977,7 +965,8 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
977 965
978 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 966 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
979 rx_desc = &isert_conn->rx_descs[i]; 967 rx_desc = &isert_conn->rx_descs[i];
980 rx_wr->wr_id = (uintptr_t)rx_desc; 968
969 rx_wr->wr_cqe = &rx_desc->rx_cqe;
981 rx_wr->sg_list = &rx_desc->rx_sg; 970 rx_wr->sg_list = &rx_desc->rx_sg;
982 rx_wr->num_sge = 1; 971 rx_wr->num_sge = 1;
983 rx_wr->next = rx_wr + 1; 972 rx_wr->next = rx_wr + 1;
@@ -985,13 +974,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
985 rx_wr--; 974 rx_wr--;
986 rx_wr->next = NULL; /* mark end of work requests list */ 975 rx_wr->next = NULL; /* mark end of work requests list */
987 976
988 isert_conn->post_recv_buf_count += count;
989 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 977 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
990 &rx_wr_failed); 978 &rx_wr_failed);
991 if (ret) { 979 if (ret)
992 isert_err("ib_post_recv() failed with ret: %d\n", ret); 980 isert_err("ib_post_recv() failed with ret: %d\n", ret);
993 isert_conn->post_recv_buf_count -= count;
994 }
995 981
996 return ret; 982 return ret;
997} 983}
@@ -1002,23 +988,20 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
1002 struct ib_recv_wr *rx_wr_failed, rx_wr; 988 struct ib_recv_wr *rx_wr_failed, rx_wr;
1003 int ret; 989 int ret;
1004 990
1005 rx_wr.wr_id = (uintptr_t)rx_desc; 991 rx_wr.wr_cqe = &rx_desc->rx_cqe;
1006 rx_wr.sg_list = &rx_desc->rx_sg; 992 rx_wr.sg_list = &rx_desc->rx_sg;
1007 rx_wr.num_sge = 1; 993 rx_wr.num_sge = 1;
1008 rx_wr.next = NULL; 994 rx_wr.next = NULL;
1009 995
1010 isert_conn->post_recv_buf_count++;
1011 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); 996 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
1012 if (ret) { 997 if (ret)
1013 isert_err("ib_post_recv() failed with ret: %d\n", ret); 998 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1014 isert_conn->post_recv_buf_count--;
1015 }
1016 999
1017 return ret; 1000 return ret;
1018} 1001}
1019 1002
1020static int 1003static int
1021isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) 1004isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1022{ 1005{
1023 struct ib_device *ib_dev = isert_conn->cm_id->device; 1006 struct ib_device *ib_dev = isert_conn->cm_id->device;
1024 struct ib_send_wr send_wr, *send_wr_failed; 1007 struct ib_send_wr send_wr, *send_wr_failed;
@@ -1027,8 +1010,10 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1027 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, 1010 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1028 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1011 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1029 1012
1013 tx_desc->tx_cqe.done = isert_login_send_done;
1014
1030 send_wr.next = NULL; 1015 send_wr.next = NULL;
1031 send_wr.wr_id = (uintptr_t)tx_desc; 1016 send_wr.wr_cqe = &tx_desc->tx_cqe;
1032 send_wr.sg_list = tx_desc->tx_sg; 1017 send_wr.sg_list = tx_desc->tx_sg;
1033 send_wr.num_sge = tx_desc->num_sge; 1018 send_wr.num_sge = tx_desc->num_sge;
1034 send_wr.opcode = IB_WR_SEND; 1019 send_wr.opcode = IB_WR_SEND;
@@ -1056,7 +1041,6 @@ isert_create_send_desc(struct isert_conn *isert_conn,
1056 tx_desc->iser_header.flags = ISCSI_CTRL; 1041 tx_desc->iser_header.flags = ISCSI_CTRL;
1057 1042
1058 tx_desc->num_sge = 1; 1043 tx_desc->num_sge = 1;
1059 tx_desc->isert_cmd = isert_cmd;
1060 1044
1061 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { 1045 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
1062 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; 1046 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
@@ -1097,8 +1081,9 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1097{ 1081{
1098 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 1082 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1099 1083
1100 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 1084 isert_cmd->iser_ib_op = ISER_IB_SEND;
1101 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; 1085 tx_desc->tx_cqe.done = isert_send_done;
1086 send_wr->wr_cqe = &tx_desc->tx_cqe;
1102 1087
1103 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { 1088 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
1104 send_wr->opcode = IB_WR_SEND_WITH_INV; 1089 send_wr->opcode = IB_WR_SEND_WITH_INV;
@@ -1113,7 +1098,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1113} 1098}
1114 1099
1115static int 1100static int
1116isert_rdma_post_recvl(struct isert_conn *isert_conn) 1101isert_login_post_recv(struct isert_conn *isert_conn)
1117{ 1102{
1118 struct ib_recv_wr rx_wr, *rx_wr_fail; 1103 struct ib_recv_wr rx_wr, *rx_wr_fail;
1119 struct ib_sge sge; 1104 struct ib_sge sge;
@@ -1121,23 +1106,22 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
1121 1106
1122 memset(&sge, 0, sizeof(struct ib_sge)); 1107 memset(&sge, 0, sizeof(struct ib_sge));
1123 sge.addr = isert_conn->login_req_dma; 1108 sge.addr = isert_conn->login_req_dma;
1124 sge.length = ISER_RX_LOGIN_SIZE; 1109 sge.length = ISER_RX_PAYLOAD_SIZE;
1125 sge.lkey = isert_conn->device->pd->local_dma_lkey; 1110 sge.lkey = isert_conn->device->pd->local_dma_lkey;
1126 1111
1127 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", 1112 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1128 sge.addr, sge.length, sge.lkey); 1113 sge.addr, sge.length, sge.lkey);
1129 1114
1115 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
1116
1130 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 1117 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1131 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf; 1118 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
1132 rx_wr.sg_list = &sge; 1119 rx_wr.sg_list = &sge;
1133 rx_wr.num_sge = 1; 1120 rx_wr.num_sge = 1;
1134 1121
1135 isert_conn->post_recv_buf_count++;
1136 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); 1122 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
1137 if (ret) { 1123 if (ret)
1138 isert_err("ib_post_recv() failed: %d\n", ret); 1124 isert_err("ib_post_recv() failed: %d\n", ret);
1139 isert_conn->post_recv_buf_count--;
1140 }
1141 1125
1142 return ret; 1126 return ret;
1143} 1127}
@@ -1203,12 +1187,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1203 goto post_send; 1187 goto post_send;
1204 } 1188 }
1205 1189
1206 ret = isert_rdma_post_recvl(isert_conn); 1190 ret = isert_login_post_recv(isert_conn);
1207 if (ret) 1191 if (ret)
1208 return ret; 1192 return ret;
1209 } 1193 }
1210post_send: 1194post_send:
1211 ret = isert_post_send(isert_conn, tx_desc); 1195 ret = isert_login_post_send(isert_conn, tx_desc);
1212 if (ret) 1196 if (ret)
1213 return ret; 1197 return ret;
1214 1198
@@ -1218,7 +1202,7 @@ post_send:
1218static void 1202static void
1219isert_rx_login_req(struct isert_conn *isert_conn) 1203isert_rx_login_req(struct isert_conn *isert_conn)
1220{ 1204{
1221 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; 1205 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1222 int rx_buflen = isert_conn->login_req_len; 1206 int rx_buflen = isert_conn->login_req_len;
1223 struct iscsi_conn *conn = isert_conn->conn; 1207 struct iscsi_conn *conn = isert_conn->conn;
1224 struct iscsi_login *login = conn->conn_login; 1208 struct iscsi_login *login = conn->conn_login;
@@ -1551,12 +1535,42 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1551} 1535}
1552 1536
1553static void 1537static void
1554isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) 1538isert_print_wc(struct ib_wc *wc, const char *type)
1539{
1540 if (wc->status != IB_WC_WR_FLUSH_ERR)
1541 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1542 ib_wc_status_msg(wc->status), wc->status,
1543 wc->vendor_err);
1544 else
1545 isert_dbg("%s failure: %s (%d)\n", type,
1546 ib_wc_status_msg(wc->status), wc->status);
1547}
1548
1549static void
1550isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1555{ 1551{
1552 struct isert_conn *isert_conn = wc->qp->qp_context;
1553 struct ib_device *ib_dev = isert_conn->cm_id->device;
1554 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1555 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1556 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; 1556 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1557 uint64_t read_va = 0, write_va = 0; 1557 uint64_t read_va = 0, write_va = 0;
1558 uint32_t read_stag = 0, write_stag = 0; 1558 uint32_t read_stag = 0, write_stag = 0;
1559 1559
1560 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1561 isert_print_wc(wc, "recv");
1562 if (wc->status != IB_WC_WR_FLUSH_ERR)
1563 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1564 return;
1565 }
1566
1567 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1568 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1569
1570 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1571 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1572 (int)(wc->byte_len - ISER_HEADERS_LEN));
1573
1560 switch (iser_ctrl->flags & 0xF0) { 1574 switch (iser_ctrl->flags & 0xF0) {
1561 case ISCSI_CTRL: 1575 case ISCSI_CTRL:
1562 if (iser_ctrl->flags & ISER_RSV) { 1576 if (iser_ctrl->flags & ISER_RSV) {
@@ -1584,56 +1598,40 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1584 1598
1585 isert_rx_opcode(isert_conn, rx_desc, 1599 isert_rx_opcode(isert_conn, rx_desc,
1586 read_stag, read_va, write_stag, write_va); 1600 read_stag, read_va, write_stag, write_va);
1601
1602 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1603 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1587} 1604}
1588 1605
1589static void 1606static void
1590isert_rcv_completion(struct iser_rx_desc *desc, 1607isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1591 struct isert_conn *isert_conn,
1592 u32 xfer_len)
1593{ 1608{
1609 struct isert_conn *isert_conn = wc->qp->qp_context;
1594 struct ib_device *ib_dev = isert_conn->cm_id->device; 1610 struct ib_device *ib_dev = isert_conn->cm_id->device;
1595 struct iscsi_hdr *hdr; 1611
1596 u64 rx_dma; 1612 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1597 int rx_buflen; 1613 isert_print_wc(wc, "login recv");
1598 1614 return;
1599 if ((char *)desc == isert_conn->login_req_buf) {
1600 rx_dma = isert_conn->login_req_dma;
1601 rx_buflen = ISER_RX_LOGIN_SIZE;
1602 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1603 rx_dma, rx_buflen);
1604 } else {
1605 rx_dma = desc->dma_addr;
1606 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1607 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1608 rx_dma, rx_buflen);
1609 } 1615 }
1610 1616
1611 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); 1617 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1618 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1612 1619
1613 hdr = &desc->iscsi_header; 1620 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1614 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1615 hdr->opcode, hdr->itt, hdr->flags,
1616 (int)(xfer_len - ISER_HEADERS_LEN));
1617 1621
1618 if ((char *)desc == isert_conn->login_req_buf) { 1622 if (isert_conn->conn) {
1619 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; 1623 struct iscsi_login *login = isert_conn->conn->conn_login;
1620 if (isert_conn->conn) {
1621 struct iscsi_login *login = isert_conn->conn->conn_login;
1622 1624
1623 if (login && !login->first_request) 1625 if (login && !login->first_request)
1624 isert_rx_login_req(isert_conn); 1626 isert_rx_login_req(isert_conn);
1625 }
1626 mutex_lock(&isert_conn->mutex);
1627 complete(&isert_conn->login_req_comp);
1628 mutex_unlock(&isert_conn->mutex);
1629 } else {
1630 isert_rx_do_work(desc, isert_conn);
1631 } 1627 }
1632 1628
1633 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, 1629 mutex_lock(&isert_conn->mutex);
1634 DMA_FROM_DEVICE); 1630 complete(&isert_conn->login_req_comp);
1631 mutex_unlock(&isert_conn->mutex);
1635 1632
1636 isert_conn->post_recv_buf_count--; 1633 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1634 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1637} 1635}
1638 1636
1639static int 1637static int
@@ -1683,54 +1681,50 @@ isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1683static void 1681static void
1684isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1682isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1685{ 1683{
1686 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1687
1688 isert_dbg("Cmd %p\n", isert_cmd); 1684 isert_dbg("Cmd %p\n", isert_cmd);
1689 1685
1690 if (wr->data.sg) { 1686 if (isert_cmd->data.sg) {
1691 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1687 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1692 isert_unmap_data_buf(isert_conn, &wr->data); 1688 isert_unmap_data_buf(isert_conn, &isert_cmd->data);
1693 } 1689 }
1694 1690
1695 if (wr->rdma_wr) { 1691 if (isert_cmd->rdma_wr) {
1696 isert_dbg("Cmd %p free send_wr\n", isert_cmd); 1692 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1697 kfree(wr->rdma_wr); 1693 kfree(isert_cmd->rdma_wr);
1698 wr->rdma_wr = NULL; 1694 isert_cmd->rdma_wr = NULL;
1699 } 1695 }
1700 1696
1701 if (wr->ib_sge) { 1697 if (isert_cmd->ib_sge) {
1702 isert_dbg("Cmd %p free ib_sge\n", isert_cmd); 1698 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1703 kfree(wr->ib_sge); 1699 kfree(isert_cmd->ib_sge);
1704 wr->ib_sge = NULL; 1700 isert_cmd->ib_sge = NULL;
1705 } 1701 }
1706} 1702}
1707 1703
1708static void 1704static void
1709isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1705isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1710{ 1706{
1711 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1712
1713 isert_dbg("Cmd %p\n", isert_cmd); 1707 isert_dbg("Cmd %p\n", isert_cmd);
1714 1708
1715 if (wr->fr_desc) { 1709 if (isert_cmd->fr_desc) {
1716 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc); 1710 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
1717 if (wr->fr_desc->ind & ISERT_PROTECTED) { 1711 if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
1718 isert_unmap_data_buf(isert_conn, &wr->prot); 1712 isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
1719 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1713 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1720 } 1714 }
1721 spin_lock_bh(&isert_conn->pool_lock); 1715 spin_lock_bh(&isert_conn->pool_lock);
1722 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool); 1716 list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
1723 spin_unlock_bh(&isert_conn->pool_lock); 1717 spin_unlock_bh(&isert_conn->pool_lock);
1724 wr->fr_desc = NULL; 1718 isert_cmd->fr_desc = NULL;
1725 } 1719 }
1726 1720
1727 if (wr->data.sg) { 1721 if (isert_cmd->data.sg) {
1728 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1722 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1729 isert_unmap_data_buf(isert_conn, &wr->data); 1723 isert_unmap_data_buf(isert_conn, &isert_cmd->data);
1730 } 1724 }
1731 1725
1732 wr->ib_sge = NULL; 1726 isert_cmd->ib_sge = NULL;
1733 wr->rdma_wr = NULL; 1727 isert_cmd->rdma_wr = NULL;
1734} 1728}
1735 1729
1736static void 1730static void
@@ -1882,52 +1876,70 @@ fail_mr_status:
1882} 1876}
1883 1877
1884static void 1878static void
1885isert_completion_rdma_write(struct iser_tx_desc *tx_desc, 1879isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1886 struct isert_cmd *isert_cmd)
1887{ 1880{
1888 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1881 struct isert_conn *isert_conn = wc->qp->qp_context;
1889 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1890 struct se_cmd *se_cmd = &cmd->se_cmd;
1891 struct isert_conn *isert_conn = isert_cmd->conn;
1892 struct isert_device *device = isert_conn->device; 1882 struct isert_device *device = isert_conn->device;
1883 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1884 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1885 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1893 int ret = 0; 1886 int ret = 0;
1894 1887
1895 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { 1888 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1896 ret = isert_check_pi_status(se_cmd, 1889 isert_print_wc(wc, "rdma write");
1897 wr->fr_desc->pi_ctx->sig_mr); 1890 if (wc->status != IB_WC_WR_FLUSH_ERR)
1898 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1891 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1892 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1893 return;
1894 }
1895
1896 isert_dbg("Cmd %p\n", isert_cmd);
1897
1898 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
1899 ret = isert_check_pi_status(cmd,
1900 isert_cmd->fr_desc->pi_ctx->sig_mr);
1901 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1899 } 1902 }
1900 1903
1901 device->unreg_rdma_mem(isert_cmd, isert_conn); 1904 device->unreg_rdma_mem(isert_cmd, isert_conn);
1902 wr->rdma_wr_num = 0; 1905 isert_cmd->rdma_wr_num = 0;
1903 if (ret) 1906 if (ret)
1904 transport_send_check_condition_and_sense(se_cmd, 1907 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
1905 se_cmd->pi_err, 0);
1906 else 1908 else
1907 isert_put_response(isert_conn->conn, cmd); 1909 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1908} 1910}
1909 1911
1910static void 1912static void
1911isert_completion_rdma_read(struct iser_tx_desc *tx_desc, 1913isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1912 struct isert_cmd *isert_cmd)
1913{ 1914{
1914 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1915 struct isert_conn *isert_conn = wc->qp->qp_context;
1916 struct isert_device *device = isert_conn->device;
1917 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1918 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1915 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1919 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1916 struct se_cmd *se_cmd = &cmd->se_cmd; 1920 struct se_cmd *se_cmd = &cmd->se_cmd;
1917 struct isert_conn *isert_conn = isert_cmd->conn;
1918 struct isert_device *device = isert_conn->device;
1919 int ret = 0; 1921 int ret = 0;
1920 1922
1921 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { 1923 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1924 isert_print_wc(wc, "rdma read");
1925 if (wc->status != IB_WC_WR_FLUSH_ERR)
1926 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1927 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1928 return;
1929 }
1930
1931 isert_dbg("Cmd %p\n", isert_cmd);
1932
1933 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
1922 ret = isert_check_pi_status(se_cmd, 1934 ret = isert_check_pi_status(se_cmd,
1923 wr->fr_desc->pi_ctx->sig_mr); 1935 isert_cmd->fr_desc->pi_ctx->sig_mr);
1924 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1936 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1925 } 1937 }
1926 1938
1927 iscsit_stop_dataout_timer(cmd); 1939 iscsit_stop_dataout_timer(cmd);
1928 device->unreg_rdma_mem(isert_cmd, isert_conn); 1940 device->unreg_rdma_mem(isert_cmd, isert_conn);
1929 cmd->write_data_done = wr->data.len; 1941 cmd->write_data_done = isert_cmd->data.len;
1930 wr->rdma_wr_num = 0; 1942 isert_cmd->rdma_wr_num = 0;
1931 1943
1932 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1944 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1933 spin_lock_bh(&cmd->istate_lock); 1945 spin_lock_bh(&cmd->istate_lock);
@@ -1975,170 +1987,56 @@ isert_do_control_comp(struct work_struct *work)
1975} 1987}
1976 1988
1977static void 1989static void
1978isert_response_completion(struct iser_tx_desc *tx_desc, 1990isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1979 struct isert_cmd *isert_cmd,
1980 struct isert_conn *isert_conn,
1981 struct ib_device *ib_dev)
1982{ 1991{
1983 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1992 struct isert_conn *isert_conn = wc->qp->qp_context;
1984 1993 struct ib_device *ib_dev = isert_conn->cm_id->device;
1985 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1994 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1986 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1987 cmd->i_state == ISTATE_SEND_REJECT ||
1988 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1989 isert_unmap_tx_desc(tx_desc, ib_dev);
1990 1995
1991 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); 1996 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1992 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1997 isert_print_wc(wc, "login send");
1993 return; 1998 if (wc->status != IB_WC_WR_FLUSH_ERR)
1999 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1994 } 2000 }
1995 2001
1996 cmd->i_state = ISTATE_SENT_STATUS; 2002 isert_unmap_tx_desc(tx_desc, ib_dev);
1997 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1998} 2003}
1999 2004
2000static void 2005static void
2001isert_snd_completion(struct iser_tx_desc *tx_desc, 2006isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
2002 struct isert_conn *isert_conn)
2003{ 2007{
2008 struct isert_conn *isert_conn = wc->qp->qp_context;
2004 struct ib_device *ib_dev = isert_conn->cm_id->device; 2009 struct ib_device *ib_dev = isert_conn->cm_id->device;
2005 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 2010 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
2006 struct isert_rdma_wr *wr; 2011 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
2007 2012
2008 if (!isert_cmd) { 2013 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2009 isert_unmap_tx_desc(tx_desc, ib_dev); 2014 isert_print_wc(wc, "send");
2015 if (wc->status != IB_WC_WR_FLUSH_ERR)
2016 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2017 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
2010 return; 2018 return;
2011 } 2019 }
2012 wr = &isert_cmd->rdma_wr;
2013 2020
2014 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); 2021 isert_dbg("Cmd %p\n", isert_cmd);
2015 2022
2016 switch (wr->iser_ib_op) { 2023 switch (isert_cmd->iscsi_cmd->i_state) {
2017 case ISER_IB_SEND: 2024 case ISTATE_SEND_TASKMGTRSP:
2018 isert_response_completion(tx_desc, isert_cmd, 2025 case ISTATE_SEND_LOGOUTRSP:
2019 isert_conn, ib_dev); 2026 case ISTATE_SEND_REJECT:
2020 break; 2027 case ISTATE_SEND_TEXTRSP:
2021 case ISER_IB_RDMA_WRITE: 2028 isert_unmap_tx_desc(tx_desc, ib_dev);
2022 isert_completion_rdma_write(tx_desc, isert_cmd); 2029
2023 break; 2030 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
2024 case ISER_IB_RDMA_READ: 2031 queue_work(isert_comp_wq, &isert_cmd->comp_work);
2025 isert_completion_rdma_read(tx_desc, isert_cmd); 2032 return;
2026 break;
2027 default: 2033 default:
2028 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op); 2034 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
2029 dump_stack(); 2035 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
2030 break; 2036 break;
2031 } 2037 }
2032} 2038}
2033 2039
2034/**
2035 * is_isert_tx_desc() - Indicate if the completion wr_id
2036 * is a TX descriptor or not.
2037 * @isert_conn: iser connection
2038 * @wr_id: completion WR identifier
2039 *
2040 * Since we cannot rely on wc opcode in FLUSH errors
2041 * we must work around it by checking if the wr_id address
2042 * falls in the iser connection rx_descs buffer. If so
2043 * it is an RX descriptor, otherwize it is a TX.
2044 */
2045static inline bool
2046is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2047{
2048 void *start = isert_conn->rx_descs;
2049 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
2050
2051 if (wr_id >= start && wr_id < start + len)
2052 return false;
2053
2054 return true;
2055}
2056
2057static void
2058isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
2059{
2060 if (wc->wr_id == ISER_BEACON_WRID) {
2061 isert_info("conn %p completing wait_comp_err\n",
2062 isert_conn);
2063 complete(&isert_conn->wait_comp_err);
2064 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2065 struct ib_device *ib_dev = isert_conn->cm_id->device;
2066 struct isert_cmd *isert_cmd;
2067 struct iser_tx_desc *desc;
2068
2069 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2070 isert_cmd = desc->isert_cmd;
2071 if (!isert_cmd)
2072 isert_unmap_tx_desc(desc, ib_dev);
2073 else
2074 isert_completion_put(desc, isert_cmd, ib_dev, true);
2075 } else {
2076 isert_conn->post_recv_buf_count--;
2077 if (!isert_conn->post_recv_buf_count)
2078 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2079 }
2080}
2081
2082static void
2083isert_handle_wc(struct ib_wc *wc)
2084{
2085 struct isert_conn *isert_conn;
2086 struct iser_tx_desc *tx_desc;
2087 struct iser_rx_desc *rx_desc;
2088
2089 isert_conn = wc->qp->qp_context;
2090 if (likely(wc->status == IB_WC_SUCCESS)) {
2091 if (wc->opcode == IB_WC_RECV) {
2092 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2093 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
2094 } else {
2095 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2096 isert_snd_completion(tx_desc, isert_conn);
2097 }
2098 } else {
2099 if (wc->status != IB_WC_WR_FLUSH_ERR)
2100 isert_err("%s (%d): wr id %llx vend_err %x\n",
2101 ib_wc_status_msg(wc->status), wc->status,
2102 wc->wr_id, wc->vendor_err);
2103 else
2104 isert_dbg("%s (%d): wr id %llx\n",
2105 ib_wc_status_msg(wc->status), wc->status,
2106 wc->wr_id);
2107
2108 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2109 isert_cq_comp_err(isert_conn, wc);
2110 }
2111}
2112
2113static void
2114isert_cq_work(struct work_struct *work)
2115{
2116 enum { isert_poll_budget = 65536 };
2117 struct isert_comp *comp = container_of(work, struct isert_comp,
2118 work);
2119 struct ib_wc *const wcs = comp->wcs;
2120 int i, n, completed = 0;
2121
2122 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2123 for (i = 0; i < n; i++)
2124 isert_handle_wc(&wcs[i]);
2125
2126 completed += n;
2127 if (completed >= isert_poll_budget)
2128 break;
2129 }
2130
2131 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2132}
2133
2134static void
2135isert_cq_callback(struct ib_cq *cq, void *context)
2136{
2137 struct isert_comp *comp = context;
2138
2139 queue_work(isert_comp_wq, &comp->work);
2140}
2141
2142static int 2040static int
2143isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) 2041isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2144{ 2042{
@@ -2395,7 +2293,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2395 page_off = offset % PAGE_SIZE; 2293 page_off = offset % PAGE_SIZE;
2396 2294
2397 rdma_wr->wr.sg_list = ib_sge; 2295 rdma_wr->wr.sg_list = ib_sge;
2398 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; 2296 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
2297
2399 /* 2298 /*
2400 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2299 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2401 */ 2300 */
@@ -2428,24 +2327,23 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2428} 2327}
2429 2328
2430static int 2329static int
2431isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2330isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
2432 struct isert_rdma_wr *wr)
2433{ 2331{
2332 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2434 struct se_cmd *se_cmd = &cmd->se_cmd; 2333 struct se_cmd *se_cmd = &cmd->se_cmd;
2435 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2436 struct isert_conn *isert_conn = conn->context; 2334 struct isert_conn *isert_conn = conn->context;
2437 struct isert_data_buf *data = &wr->data; 2335 struct isert_data_buf *data = &isert_cmd->data;
2438 struct ib_rdma_wr *rdma_wr; 2336 struct ib_rdma_wr *rdma_wr;
2439 struct ib_sge *ib_sge; 2337 struct ib_sge *ib_sge;
2440 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2338 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2441 int ret = 0, i, ib_sge_cnt; 2339 int ret = 0, i, ib_sge_cnt;
2442 2340
2443 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2341 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
2444 2342 cmd->write_data_done : 0;
2445 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2446 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2343 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2447 se_cmd->t_data_nents, se_cmd->data_length, 2344 se_cmd->t_data_nents, se_cmd->data_length,
2448 offset, wr->iser_ib_op, &wr->data); 2345 offset, isert_cmd->iser_ib_op,
2346 &isert_cmd->data);
2449 if (ret) 2347 if (ret)
2450 return ret; 2348 return ret;
2451 2349
@@ -2458,41 +2356,44 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2458 ret = -ENOMEM; 2356 ret = -ENOMEM;
2459 goto unmap_cmd; 2357 goto unmap_cmd;
2460 } 2358 }
2461 wr->ib_sge = ib_sge; 2359 isert_cmd->ib_sge = ib_sge;
2462 2360
2463 wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2361 isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2464 wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num, 2362 isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
2465 GFP_KERNEL); 2363 isert_cmd->rdma_wr_num, GFP_KERNEL);
2466 if (!wr->rdma_wr) { 2364 if (!isert_cmd->rdma_wr) {
2467 isert_dbg("Unable to allocate wr->rdma_wr\n"); 2365 isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
2468 ret = -ENOMEM; 2366 ret = -ENOMEM;
2469 goto unmap_cmd; 2367 goto unmap_cmd;
2470 } 2368 }
2471 2369
2472 wr->isert_cmd = isert_cmd;
2473 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2370 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2474 2371
2475 for (i = 0; i < wr->rdma_wr_num; i++) { 2372 for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
2476 rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i]; 2373 rdma_wr = &isert_cmd->rdma_wr[i];
2477 data_len = min(data_left, rdma_write_max); 2374 data_len = min(data_left, rdma_write_max);
2478 2375
2479 rdma_wr->wr.send_flags = 0; 2376 rdma_wr->wr.send_flags = 0;
2480 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2377 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
2378 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2379
2481 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2380 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2482 rdma_wr->remote_addr = isert_cmd->read_va + offset; 2381 rdma_wr->remote_addr = isert_cmd->read_va + offset;
2483 rdma_wr->rkey = isert_cmd->read_stag; 2382 rdma_wr->rkey = isert_cmd->read_stag;
2484 if (i + 1 == wr->rdma_wr_num) 2383 if (i + 1 == isert_cmd->rdma_wr_num)
2485 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr; 2384 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
2486 else 2385 else
2487 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; 2386 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
2488 } else { 2387 } else {
2388 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2389
2489 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2390 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2490 rdma_wr->remote_addr = isert_cmd->write_va + va_offset; 2391 rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
2491 rdma_wr->rkey = isert_cmd->write_stag; 2392 rdma_wr->rkey = isert_cmd->write_stag;
2492 if (i + 1 == wr->rdma_wr_num) 2393 if (i + 1 == isert_cmd->rdma_wr_num)
2493 rdma_wr->wr.send_flags = IB_SEND_SIGNALED; 2394 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2494 else 2395 else
2495 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; 2396 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
2496 } 2397 }
2497 2398
2498 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2399 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
@@ -2517,7 +2418,7 @@ isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2517 u32 rkey; 2418 u32 rkey;
2518 2419
2519 memset(inv_wr, 0, sizeof(*inv_wr)); 2420 memset(inv_wr, 0, sizeof(*inv_wr));
2520 inv_wr->wr_id = ISER_FASTREG_LI_WRID; 2421 inv_wr->wr_cqe = NULL;
2521 inv_wr->opcode = IB_WR_LOCAL_INV; 2422 inv_wr->opcode = IB_WR_LOCAL_INV;
2522 inv_wr->ex.invalidate_rkey = mr->rkey; 2423 inv_wr->ex.invalidate_rkey = mr->rkey;
2523 2424
@@ -2573,7 +2474,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2573 2474
2574 reg_wr.wr.next = NULL; 2475 reg_wr.wr.next = NULL;
2575 reg_wr.wr.opcode = IB_WR_REG_MR; 2476 reg_wr.wr.opcode = IB_WR_REG_MR;
2576 reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID; 2477 reg_wr.wr.wr_cqe = NULL;
2577 reg_wr.wr.send_flags = 0; 2478 reg_wr.wr.send_flags = 0;
2578 reg_wr.wr.num_sge = 0; 2479 reg_wr.wr.num_sge = 0;
2579 reg_wr.mr = mr; 2480 reg_wr.mr = mr;
@@ -2660,10 +2561,10 @@ isert_set_prot_checks(u8 prot_checks)
2660 2561
2661static int 2562static int
2662isert_reg_sig_mr(struct isert_conn *isert_conn, 2563isert_reg_sig_mr(struct isert_conn *isert_conn,
2663 struct se_cmd *se_cmd, 2564 struct isert_cmd *isert_cmd,
2664 struct isert_rdma_wr *rdma_wr,
2665 struct fast_reg_descriptor *fr_desc) 2565 struct fast_reg_descriptor *fr_desc)
2666{ 2566{
2567 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2667 struct ib_sig_handover_wr sig_wr; 2568 struct ib_sig_handover_wr sig_wr;
2668 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; 2569 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2669 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2570 struct pi_context *pi_ctx = fr_desc->pi_ctx;
@@ -2684,14 +2585,14 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2684 2585
2685 memset(&sig_wr, 0, sizeof(sig_wr)); 2586 memset(&sig_wr, 0, sizeof(sig_wr));
2686 sig_wr.wr.opcode = IB_WR_REG_SIG_MR; 2587 sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
2687 sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID; 2588 sig_wr.wr.wr_cqe = NULL;
2688 sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA]; 2589 sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
2689 sig_wr.wr.num_sge = 1; 2590 sig_wr.wr.num_sge = 1;
2690 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; 2591 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2691 sig_wr.sig_attrs = &sig_attrs; 2592 sig_wr.sig_attrs = &sig_attrs;
2692 sig_wr.sig_mr = pi_ctx->sig_mr; 2593 sig_wr.sig_mr = pi_ctx->sig_mr;
2693 if (se_cmd->t_prot_sg) 2594 if (se_cmd->t_prot_sg)
2694 sig_wr.prot = &rdma_wr->ib_sg[PROT]; 2595 sig_wr.prot = &isert_cmd->ib_sg[PROT];
2695 2596
2696 if (!wr) 2597 if (!wr)
2697 wr = &sig_wr.wr; 2598 wr = &sig_wr.wr;
@@ -2705,35 +2606,34 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2705 } 2606 }
2706 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2607 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2707 2608
2708 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; 2609 isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2709 rdma_wr->ib_sg[SIG].addr = 0; 2610 isert_cmd->ib_sg[SIG].addr = 0;
2710 rdma_wr->ib_sg[SIG].length = se_cmd->data_length; 2611 isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
2711 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2612 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2712 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2613 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2713 /* 2614 /*
2714 * We have protection guards on the wire 2615 * We have protection guards on the wire
2715 * so we need to set a larget transfer 2616 * so we need to set a larget transfer
2716 */ 2617 */
2717 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length; 2618 isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
2718 2619
2719 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2620 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2720 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length, 2621 isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
2721 rdma_wr->ib_sg[SIG].lkey); 2622 isert_cmd->ib_sg[SIG].lkey);
2722err: 2623err:
2723 return ret; 2624 return ret;
2724} 2625}
2725 2626
2726static int 2627static int
2727isert_handle_prot_cmd(struct isert_conn *isert_conn, 2628isert_handle_prot_cmd(struct isert_conn *isert_conn,
2728 struct isert_cmd *isert_cmd, 2629 struct isert_cmd *isert_cmd)
2729 struct isert_rdma_wr *wr)
2730{ 2630{
2731 struct isert_device *device = isert_conn->device; 2631 struct isert_device *device = isert_conn->device;
2732 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2632 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2733 int ret; 2633 int ret;
2734 2634
2735 if (!wr->fr_desc->pi_ctx) { 2635 if (!isert_cmd->fr_desc->pi_ctx) {
2736 ret = isert_create_pi_ctx(wr->fr_desc, 2636 ret = isert_create_pi_ctx(isert_cmd->fr_desc,
2737 device->ib_device, 2637 device->ib_device,
2738 device->pd); 2638 device->pd);
2739 if (ret) { 2639 if (ret) {
@@ -2748,16 +2648,20 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
2748 se_cmd->t_prot_sg, 2648 se_cmd->t_prot_sg,
2749 se_cmd->t_prot_nents, 2649 se_cmd->t_prot_nents,
2750 se_cmd->prot_length, 2650 se_cmd->prot_length,
2751 0, wr->iser_ib_op, &wr->prot); 2651 0,
2652 isert_cmd->iser_ib_op,
2653 &isert_cmd->prot);
2752 if (ret) { 2654 if (ret) {
2753 isert_err("conn %p failed to map protection buffer\n", 2655 isert_err("conn %p failed to map protection buffer\n",
2754 isert_conn); 2656 isert_conn);
2755 return ret; 2657 return ret;
2756 } 2658 }
2757 2659
2758 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT])); 2660 memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
2759 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot, 2661 ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
2760 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]); 2662 &isert_cmd->prot,
2663 ISERT_PROT_KEY_VALID,
2664 &isert_cmd->ib_sg[PROT]);
2761 if (ret) { 2665 if (ret) {
2762 isert_err("conn %p failed to fast reg mr\n", 2666 isert_err("conn %p failed to fast reg mr\n",
2763 isert_conn); 2667 isert_conn);
@@ -2765,29 +2669,28 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
2765 } 2669 }
2766 } 2670 }
2767 2671
2768 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc); 2672 ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
2769 if (ret) { 2673 if (ret) {
2770 isert_err("conn %p failed to fast reg mr\n", 2674 isert_err("conn %p failed to fast reg mr\n",
2771 isert_conn); 2675 isert_conn);
2772 goto unmap_prot_cmd; 2676 goto unmap_prot_cmd;
2773 } 2677 }
2774 wr->fr_desc->ind |= ISERT_PROTECTED; 2678 isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
2775 2679
2776 return 0; 2680 return 0;
2777 2681
2778unmap_prot_cmd: 2682unmap_prot_cmd:
2779 if (se_cmd->t_prot_sg) 2683 if (se_cmd->t_prot_sg)
2780 isert_unmap_data_buf(isert_conn, &wr->prot); 2684 isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
2781 2685
2782 return ret; 2686 return ret;
2783} 2687}
2784 2688
2785static int 2689static int
2786isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2690isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
2787 struct isert_rdma_wr *wr)
2788{ 2691{
2692 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2789 struct se_cmd *se_cmd = &cmd->se_cmd; 2693 struct se_cmd *se_cmd = &cmd->se_cmd;
2790 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2791 struct isert_conn *isert_conn = conn->context; 2694 struct isert_conn *isert_conn = conn->context;
2792 struct fast_reg_descriptor *fr_desc = NULL; 2695 struct fast_reg_descriptor *fr_desc = NULL;
2793 struct ib_rdma_wr *rdma_wr; 2696 struct ib_rdma_wr *rdma_wr;
@@ -2796,57 +2699,61 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2796 int ret = 0; 2699 int ret = 0;
2797 unsigned long flags; 2700 unsigned long flags;
2798 2701
2799 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2702 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
2800 2703 cmd->write_data_done : 0;
2801 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2802 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, 2704 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2803 se_cmd->t_data_nents, se_cmd->data_length, 2705 se_cmd->t_data_nents, se_cmd->data_length,
2804 offset, wr->iser_ib_op, &wr->data); 2706 offset, isert_cmd->iser_ib_op,
2707 &isert_cmd->data);
2805 if (ret) 2708 if (ret)
2806 return ret; 2709 return ret;
2807 2710
2808 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { 2711 if (isert_cmd->data.dma_nents != 1 ||
2712 isert_prot_cmd(isert_conn, se_cmd)) {
2809 spin_lock_irqsave(&isert_conn->pool_lock, flags); 2713 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2810 fr_desc = list_first_entry(&isert_conn->fr_pool, 2714 fr_desc = list_first_entry(&isert_conn->fr_pool,
2811 struct fast_reg_descriptor, list); 2715 struct fast_reg_descriptor, list);
2812 list_del(&fr_desc->list); 2716 list_del(&fr_desc->list);
2813 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2717 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2814 wr->fr_desc = fr_desc; 2718 isert_cmd->fr_desc = fr_desc;
2815 } 2719 }
2816 2720
2817 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, 2721 ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
2818 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]); 2722 ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
2819 if (ret) 2723 if (ret)
2820 goto unmap_cmd; 2724 goto unmap_cmd;
2821 2725
2822 if (isert_prot_cmd(isert_conn, se_cmd)) { 2726 if (isert_prot_cmd(isert_conn, se_cmd)) {
2823 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr); 2727 ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
2824 if (ret) 2728 if (ret)
2825 goto unmap_cmd; 2729 goto unmap_cmd;
2826 2730
2827 ib_sg = &wr->ib_sg[SIG]; 2731 ib_sg = &isert_cmd->ib_sg[SIG];
2828 } else { 2732 } else {
2829 ib_sg = &wr->ib_sg[DATA]; 2733 ib_sg = &isert_cmd->ib_sg[DATA];
2830 } 2734 }
2831 2735
2832 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2736 memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
2833 wr->ib_sge = &wr->s_ib_sge; 2737 isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
2834 wr->rdma_wr_num = 1; 2738 isert_cmd->rdma_wr_num = 1;
2835 memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr)); 2739 memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
2836 wr->rdma_wr = &wr->s_rdma_wr; 2740 isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
2837 wr->isert_cmd = isert_cmd;
2838 2741
2839 rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr; 2742 rdma_wr = &isert_cmd->s_rdma_wr;
2840 rdma_wr->wr.sg_list = &wr->s_ib_sge; 2743 rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
2841 rdma_wr->wr.num_sge = 1; 2744 rdma_wr->wr.num_sge = 1;
2842 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; 2745 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
2843 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2746 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
2747 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2748
2844 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2749 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2845 rdma_wr->remote_addr = isert_cmd->read_va; 2750 rdma_wr->remote_addr = isert_cmd->read_va;
2846 rdma_wr->rkey = isert_cmd->read_stag; 2751 rdma_wr->rkey = isert_cmd->read_stag;
2847 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2752 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2848 0 : IB_SEND_SIGNALED; 2753 0 : IB_SEND_SIGNALED;
2849 } else { 2754 } else {
2755 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2756
2850 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2757 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2851 rdma_wr->remote_addr = isert_cmd->write_va; 2758 rdma_wr->remote_addr = isert_cmd->write_va;
2852 rdma_wr->rkey = isert_cmd->write_stag; 2759 rdma_wr->rkey = isert_cmd->write_stag;
@@ -2861,7 +2768,7 @@ unmap_cmd:
2861 list_add_tail(&fr_desc->list, &isert_conn->fr_pool); 2768 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2862 spin_unlock_irqrestore(&isert_conn->pool_lock, flags); 2769 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2863 } 2770 }
2864 isert_unmap_data_buf(isert_conn, &wr->data); 2771 isert_unmap_data_buf(isert_conn, &isert_cmd->data);
2865 2772
2866 return ret; 2773 return ret;
2867} 2774}
@@ -2871,7 +2778,6 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2871{ 2778{
2872 struct se_cmd *se_cmd = &cmd->se_cmd; 2779 struct se_cmd *se_cmd = &cmd->se_cmd;
2873 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2780 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2874 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2875 struct isert_conn *isert_conn = conn->context; 2781 struct isert_conn *isert_conn = conn->context;
2876 struct isert_device *device = isert_conn->device; 2782 struct isert_device *device = isert_conn->device;
2877 struct ib_send_wr *wr_failed; 2783 struct ib_send_wr *wr_failed;
@@ -2880,8 +2786,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2880 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2786 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2881 isert_cmd, se_cmd->data_length); 2787 isert_cmd, se_cmd->data_length);
2882 2788
2883 wr->iser_ib_op = ISER_IB_RDMA_WRITE; 2789 isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
2884 rc = device->reg_rdma_mem(conn, cmd, wr); 2790 rc = device->reg_rdma_mem(isert_cmd, conn);
2885 if (rc) { 2791 if (rc) {
2886 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2792 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2887 return rc; 2793 return rc;
@@ -2898,8 +2804,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2898 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2804 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2899 isert_init_send_wr(isert_conn, isert_cmd, 2805 isert_init_send_wr(isert_conn, isert_cmd,
2900 &isert_cmd->tx_desc.send_wr); 2806 &isert_cmd->tx_desc.send_wr);
2901 isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr; 2807 isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
2902 wr->rdma_wr_num += 1; 2808 isert_cmd->rdma_wr_num += 1;
2903 2809
2904 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2810 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2905 if (rc) { 2811 if (rc) {
@@ -2908,7 +2814,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2908 } 2814 }
2909 } 2815 }
2910 2816
2911 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); 2817 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
2912 if (rc) 2818 if (rc)
2913 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2819 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2914 2820
@@ -2927,7 +2833,6 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2927{ 2833{
2928 struct se_cmd *se_cmd = &cmd->se_cmd; 2834 struct se_cmd *se_cmd = &cmd->se_cmd;
2929 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2835 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2930 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2931 struct isert_conn *isert_conn = conn->context; 2836 struct isert_conn *isert_conn = conn->context;
2932 struct isert_device *device = isert_conn->device; 2837 struct isert_device *device = isert_conn->device;
2933 struct ib_send_wr *wr_failed; 2838 struct ib_send_wr *wr_failed;
@@ -2935,14 +2840,14 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2935 2840
2936 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2841 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2937 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2842 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2938 wr->iser_ib_op = ISER_IB_RDMA_READ; 2843 isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
2939 rc = device->reg_rdma_mem(conn, cmd, wr); 2844 rc = device->reg_rdma_mem(isert_cmd, conn);
2940 if (rc) { 2845 if (rc) {
2941 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2846 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2942 return rc; 2847 return rc;
2943 } 2848 }
2944 2849
2945 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); 2850 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
2946 if (rc) 2851 if (rc)
2947 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2852 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2948 2853
@@ -3214,6 +3119,7 @@ accept_wait:
3214 3119
3215 conn->context = isert_conn; 3120 conn->context = isert_conn;
3216 isert_conn->conn = conn; 3121 isert_conn->conn = conn;
3122 isert_conn->state = ISER_CONN_BOUND;
3217 3123
3218 isert_set_conn_info(np, conn, isert_conn); 3124 isert_set_conn_info(np, conn, isert_conn);
3219 3125
@@ -3274,8 +3180,6 @@ static void isert_release_work(struct work_struct *work)
3274 3180
3275 isert_info("Starting release conn %p\n", isert_conn); 3181 isert_info("Starting release conn %p\n", isert_conn);
3276 3182
3277 wait_for_completion(&isert_conn->wait);
3278
3279 mutex_lock(&isert_conn->mutex); 3183 mutex_lock(&isert_conn->mutex);
3280 isert_conn->state = ISER_CONN_DOWN; 3184 isert_conn->state = ISER_CONN_DOWN;
3281 mutex_unlock(&isert_conn->mutex); 3185 mutex_unlock(&isert_conn->mutex);
@@ -3310,14 +3214,26 @@ isert_wait4cmds(struct iscsi_conn *conn)
3310} 3214}
3311 3215
3312static void 3216static void
3217isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
3218{
3219 struct isert_conn *isert_conn = wc->qp->qp_context;
3220
3221 isert_print_wc(wc, "beacon");
3222
3223 isert_info("conn %p completing wait_comp_err\n", isert_conn);
3224 complete(&isert_conn->wait_comp_err);
3225}
3226
3227static void
3313isert_wait4flush(struct isert_conn *isert_conn) 3228isert_wait4flush(struct isert_conn *isert_conn)
3314{ 3229{
3315 struct ib_recv_wr *bad_wr; 3230 struct ib_recv_wr *bad_wr;
3231 static struct ib_cqe cqe = { .done = isert_beacon_done };
3316 3232
3317 isert_info("conn %p\n", isert_conn); 3233 isert_info("conn %p\n", isert_conn);
3318 3234
3319 init_completion(&isert_conn->wait_comp_err); 3235 init_completion(&isert_conn->wait_comp_err);
3320 isert_conn->beacon.wr_id = ISER_BEACON_WRID; 3236 isert_conn->beacon.wr_cqe = &cqe;
3321 /* post an indication that all flush errors were consumed */ 3237 /* post an indication that all flush errors were consumed */
3322 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { 3238 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3323 isert_err("conn %p failed to post beacon", isert_conn); 3239 isert_err("conn %p failed to post beacon", isert_conn);
@@ -3369,14 +3285,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3369 isert_info("Starting conn %p\n", isert_conn); 3285 isert_info("Starting conn %p\n", isert_conn);
3370 3286
3371 mutex_lock(&isert_conn->mutex); 3287 mutex_lock(&isert_conn->mutex);
3372 /*
3373 * Only wait for wait_comp_err if the isert_conn made it
3374 * into full feature phase..
3375 */
3376 if (isert_conn->state == ISER_CONN_INIT) {
3377 mutex_unlock(&isert_conn->mutex);
3378 return;
3379 }
3380 isert_conn_terminate(isert_conn); 3288 isert_conn_terminate(isert_conn);
3381 mutex_unlock(&isert_conn->mutex); 3289 mutex_unlock(&isert_conn->mutex);
3382 3290
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8d50453eef66..192788a4820c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -36,9 +36,7 @@
36/* Constant PDU lengths calculations */ 36/* Constant PDU lengths calculations */
37#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \ 37#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
38 sizeof(struct iscsi_hdr)) 38 sizeof(struct iscsi_hdr))
39#define ISER_RECV_DATA_SEG_LEN 8192 39#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
40#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
41#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
42 40
43/* QP settings */ 41/* QP settings */
44/* Maximal bounds on received asynchronous PDUs */ 42/* Maximal bounds on received asynchronous PDUs */
@@ -62,12 +60,11 @@
62 ISERT_MAX_TX_MISC_PDUS + \ 60 ISERT_MAX_TX_MISC_PDUS + \
63 ISERT_MAX_RX_MISC_PDUS) 61 ISERT_MAX_RX_MISC_PDUS)
64 62
65#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \ 63#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
66 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge))) 64 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
65 sizeof(struct ib_cqe)))
67 66
68#define ISCSI_ISER_SG_TABLESIZE 256 67#define ISCSI_ISER_SG_TABLESIZE 256
69#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
70#define ISER_BEACON_WRID 0xfffffffffffffffeULL
71 68
72enum isert_desc_type { 69enum isert_desc_type {
73 ISCSI_TX_CONTROL, 70 ISCSI_TX_CONTROL,
@@ -84,6 +81,7 @@ enum iser_ib_op_code {
84enum iser_conn_state { 81enum iser_conn_state {
85 ISER_CONN_INIT, 82 ISER_CONN_INIT,
86 ISER_CONN_UP, 83 ISER_CONN_UP,
84 ISER_CONN_BOUND,
87 ISER_CONN_FULL_FEATURE, 85 ISER_CONN_FULL_FEATURE,
88 ISER_CONN_TERMINATING, 86 ISER_CONN_TERMINATING,
89 ISER_CONN_DOWN, 87 ISER_CONN_DOWN,
@@ -92,23 +90,35 @@ enum iser_conn_state {
92struct iser_rx_desc { 90struct iser_rx_desc {
93 struct iser_ctrl iser_header; 91 struct iser_ctrl iser_header;
94 struct iscsi_hdr iscsi_header; 92 struct iscsi_hdr iscsi_header;
95 char data[ISER_RECV_DATA_SEG_LEN]; 93 char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
96 u64 dma_addr; 94 u64 dma_addr;
97 struct ib_sge rx_sg; 95 struct ib_sge rx_sg;
96 struct ib_cqe rx_cqe;
98 char pad[ISER_RX_PAD_SIZE]; 97 char pad[ISER_RX_PAD_SIZE];
99} __packed; 98} __packed;
100 99
100static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
101{
102 return container_of(cqe, struct iser_rx_desc, rx_cqe);
103}
104
101struct iser_tx_desc { 105struct iser_tx_desc {
102 struct iser_ctrl iser_header; 106 struct iser_ctrl iser_header;
103 struct iscsi_hdr iscsi_header; 107 struct iscsi_hdr iscsi_header;
104 enum isert_desc_type type; 108 enum isert_desc_type type;
105 u64 dma_addr; 109 u64 dma_addr;
106 struct ib_sge tx_sg[2]; 110 struct ib_sge tx_sg[2];
111 struct ib_cqe tx_cqe;
107 int num_sge; 112 int num_sge;
108 struct isert_cmd *isert_cmd;
109 struct ib_send_wr send_wr; 113 struct ib_send_wr send_wr;
110} __packed; 114} __packed;
111 115
116static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
117{
118 return container_of(cqe, struct iser_tx_desc, tx_cqe);
119}
120
121
112enum isert_indicator { 122enum isert_indicator {
113 ISERT_PROTECTED = 1 << 0, 123 ISERT_PROTECTED = 1 << 0,
114 ISERT_DATA_KEY_VALID = 1 << 1, 124 ISERT_DATA_KEY_VALID = 1 << 1,
@@ -144,20 +154,6 @@ enum {
144 SIG = 2, 154 SIG = 2,
145}; 155};
146 156
147struct isert_rdma_wr {
148 struct isert_cmd *isert_cmd;
149 enum iser_ib_op_code iser_ib_op;
150 struct ib_sge *ib_sge;
151 struct ib_sge s_ib_sge;
152 int rdma_wr_num;
153 struct ib_rdma_wr *rdma_wr;
154 struct ib_rdma_wr s_rdma_wr;
155 struct ib_sge ib_sg[3];
156 struct isert_data_buf data;
157 struct isert_data_buf prot;
158 struct fast_reg_descriptor *fr_desc;
159};
160
161struct isert_cmd { 157struct isert_cmd {
162 uint32_t read_stag; 158 uint32_t read_stag;
163 uint32_t write_stag; 159 uint32_t write_stag;
@@ -170,22 +166,34 @@ struct isert_cmd {
170 struct iscsi_cmd *iscsi_cmd; 166 struct iscsi_cmd *iscsi_cmd;
171 struct iser_tx_desc tx_desc; 167 struct iser_tx_desc tx_desc;
172 struct iser_rx_desc *rx_desc; 168 struct iser_rx_desc *rx_desc;
173 struct isert_rdma_wr rdma_wr; 169 enum iser_ib_op_code iser_ib_op;
170 struct ib_sge *ib_sge;
171 struct ib_sge s_ib_sge;
172 int rdma_wr_num;
173 struct ib_rdma_wr *rdma_wr;
174 struct ib_rdma_wr s_rdma_wr;
175 struct ib_sge ib_sg[3];
176 struct isert_data_buf data;
177 struct isert_data_buf prot;
178 struct fast_reg_descriptor *fr_desc;
174 struct work_struct comp_work; 179 struct work_struct comp_work;
175 struct scatterlist sg; 180 struct scatterlist sg;
176}; 181};
177 182
183static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
184{
185 return container_of(desc, struct isert_cmd, tx_desc);
186}
187
178struct isert_device; 188struct isert_device;
179 189
180struct isert_conn { 190struct isert_conn {
181 enum iser_conn_state state; 191 enum iser_conn_state state;
182 int post_recv_buf_count;
183 u32 responder_resources; 192 u32 responder_resources;
184 u32 initiator_depth; 193 u32 initiator_depth;
185 bool pi_support; 194 bool pi_support;
186 u32 max_sge; 195 u32 max_sge;
187 char *login_buf; 196 struct iser_rx_desc *login_req_buf;
188 char *login_req_buf;
189 char *login_rsp_buf; 197 char *login_rsp_buf;
190 u64 login_req_dma; 198 u64 login_req_dma;
191 int login_req_len; 199 int login_req_len;
@@ -201,7 +209,6 @@ struct isert_conn {
201 struct ib_qp *qp; 209 struct ib_qp *qp;
202 struct isert_device *device; 210 struct isert_device *device;
203 struct mutex mutex; 211 struct mutex mutex;
204 struct completion wait;
205 struct completion wait_comp_err; 212 struct completion wait_comp_err;
206 struct kref kref; 213 struct kref kref;
207 struct list_head fr_pool; 214 struct list_head fr_pool;
@@ -221,17 +228,13 @@ struct isert_conn {
221 * 228 *
222 * @device: pointer to device handle 229 * @device: pointer to device handle
223 * @cq: completion queue 230 * @cq: completion queue
224 * @wcs: work completion array
225 * @active_qps: Number of active QPs attached 231 * @active_qps: Number of active QPs attached
226 * to completion context 232 * to completion context
227 * @work: completion work handle
228 */ 233 */
229struct isert_comp { 234struct isert_comp {
230 struct isert_device *device; 235 struct isert_device *device;
231 struct ib_cq *cq; 236 struct ib_cq *cq;
232 struct ib_wc wcs[16];
233 int active_qps; 237 int active_qps;
234 struct work_struct work;
235}; 238};
236 239
237struct isert_device { 240struct isert_device {
@@ -243,9 +246,8 @@ struct isert_device {
243 struct isert_comp *comps; 246 struct isert_comp *comps;
244 int comps_used; 247 int comps_used;
245 struct list_head dev_node; 248 struct list_head dev_node;
246 int (*reg_rdma_mem)(struct iscsi_conn *conn, 249 int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
247 struct iscsi_cmd *cmd, 250 struct iscsi_conn *conn);
248 struct isert_rdma_wr *wr);
249 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd, 251 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
250 struct isert_conn *isert_conn); 252 struct isert_conn *isert_conn);
251}; 253};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 25bdaeef2520..1d1309091aba 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,40 +1264,26 @@ free_mem:
1264 */ 1264 */
1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1266{ 1266{
1267 struct se_session *se_sess;
1267 struct srpt_send_ioctx *ioctx; 1268 struct srpt_send_ioctx *ioctx;
1268 unsigned long flags; 1269 int tag;
1269 1270
1270 BUG_ON(!ch); 1271 BUG_ON(!ch);
1272 se_sess = ch->sess;
1271 1273
1272 ioctx = NULL; 1274 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
1273 spin_lock_irqsave(&ch->spinlock, flags); 1275 if (tag < 0) {
1274 if (!list_empty(&ch->free_list)) { 1276 pr_err("Unable to obtain tag for srpt_send_ioctx\n");
1275 ioctx = list_first_entry(&ch->free_list, 1277 return NULL;
1276 struct srpt_send_ioctx, free_list);
1277 list_del(&ioctx->free_list);
1278 } 1278 }
1279 spin_unlock_irqrestore(&ch->spinlock, flags); 1279 ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
1280 1280 memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
1281 if (!ioctx) 1281 ioctx->ch = ch;
1282 return ioctx;
1283
1284 BUG_ON(ioctx->ch != ch);
1285 spin_lock_init(&ioctx->spinlock); 1282 spin_lock_init(&ioctx->spinlock);
1286 ioctx->state = SRPT_STATE_NEW; 1283 ioctx->state = SRPT_STATE_NEW;
1287 ioctx->n_rbuf = 0;
1288 ioctx->rbufs = NULL;
1289 ioctx->n_rdma = 0;
1290 ioctx->n_rdma_wrs = 0;
1291 ioctx->rdma_wrs = NULL;
1292 ioctx->mapped_sg_count = 0;
1293 init_completion(&ioctx->tx_done); 1284 init_completion(&ioctx->tx_done);
1294 ioctx->queue_status_only = false; 1285
1295 /* 1286 ioctx->cmd.map_tag = tag;
1296 * transport_init_se_cmd() does not initialize all fields, so do it
1297 * here.
1298 */
1299 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1300 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1301 1287
1302 return ioctx; 1288 return ioctx;
1303} 1289}
@@ -2034,9 +2020,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2034 struct srp_login_rej *rej; 2020 struct srp_login_rej *rej;
2035 struct ib_cm_rep_param *rep_param; 2021 struct ib_cm_rep_param *rep_param;
2036 struct srpt_rdma_ch *ch, *tmp_ch; 2022 struct srpt_rdma_ch *ch, *tmp_ch;
2037 struct se_node_acl *se_acl;
2038 u32 it_iu_len; 2023 u32 it_iu_len;
2039 int i, ret = 0; 2024 int ret = 0;
2040 unsigned char *p; 2025 unsigned char *p;
2041 2026
2042 WARN_ON_ONCE(irqs_disabled()); 2027 WARN_ON_ONCE(irqs_disabled());
@@ -2158,12 +2143,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2158 if (!ch->ioctx_ring) 2143 if (!ch->ioctx_ring)
2159 goto free_ch; 2144 goto free_ch;
2160 2145
2161 INIT_LIST_HEAD(&ch->free_list);
2162 for (i = 0; i < ch->rq_size; i++) {
2163 ch->ioctx_ring[i]->ch = ch;
2164 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2165 }
2166
2167 ret = srpt_create_ch_ib(ch); 2146 ret = srpt_create_ch_ib(ch);
2168 if (ret) { 2147 if (ret) {
2169 rej->reason = cpu_to_be32( 2148 rej->reason = cpu_to_be32(
@@ -2193,19 +2172,13 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2193 pr_debug("registering session %s\n", ch->sess_name); 2172 pr_debug("registering session %s\n", ch->sess_name);
2194 p = &ch->sess_name[0]; 2173 p = &ch->sess_name[0];
2195 2174
2196 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2197 if (IS_ERR(ch->sess)) {
2198 rej->reason = cpu_to_be32(
2199 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2200 pr_debug("Failed to create session\n");
2201 goto destroy_ib;
2202 }
2203
2204try_again: 2175try_again:
2205 se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p); 2176 ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
2206 if (!se_acl) { 2177 sizeof(struct srpt_send_ioctx),
2178 TARGET_PROT_NORMAL, p, ch, NULL);
2179 if (IS_ERR(ch->sess)) {
2207 pr_info("Rejected login because no ACL has been" 2180 pr_info("Rejected login because no ACL has been"
2208 " configured yet for initiator %s.\n", ch->sess_name); 2181 " configured yet for initiator %s.\n", p);
2209 /* 2182 /*
2210 * XXX: Hack to retry of ch->i_port_id without leading '0x' 2183 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2211 */ 2184 */
@@ -2213,14 +2186,11 @@ try_again:
2213 p += 2; 2186 p += 2;
2214 goto try_again; 2187 goto try_again;
2215 } 2188 }
2216 rej->reason = cpu_to_be32( 2189 rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
2190 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2217 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2191 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2218 transport_free_session(ch->sess);
2219 goto destroy_ib; 2192 goto destroy_ib;
2220 } 2193 }
2221 ch->sess->se_node_acl = se_acl;
2222
2223 transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
2224 2194
2225 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, 2195 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2226 ch->sess_name, ch->cm_id); 2196 ch->sess_name, ch->cm_id);
@@ -2911,7 +2881,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2911 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 2881 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2912 struct srpt_send_ioctx, cmd); 2882 struct srpt_send_ioctx, cmd);
2913 struct srpt_rdma_ch *ch = ioctx->ch; 2883 struct srpt_rdma_ch *ch = ioctx->ch;
2914 unsigned long flags; 2884 struct se_session *se_sess = ch->sess;
2915 2885
2916 WARN_ON(ioctx->state != SRPT_STATE_DONE); 2886 WARN_ON(ioctx->state != SRPT_STATE_DONE);
2917 WARN_ON(ioctx->mapped_sg_count != 0); 2887 WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2922,9 +2892,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2922 ioctx->n_rbuf = 0; 2892 ioctx->n_rbuf = 0;
2923 } 2893 }
2924 2894
2925 spin_lock_irqsave(&ch->spinlock, flags); 2895 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
2926 list_add(&ioctx->free_list, &ch->free_list);
2927 spin_unlock_irqrestore(&ch->spinlock, flags);
2928} 2896}
2929 2897
2930/** 2898/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index af9b8b527340..ca288f019315 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,7 +179,6 @@ struct srpt_recv_ioctx {
179 * struct srpt_send_ioctx - SRPT send I/O context. 179 * struct srpt_send_ioctx - SRPT send I/O context.
180 * @ioctx: See above. 180 * @ioctx: See above.
181 * @ch: Channel pointer. 181 * @ch: Channel pointer.
182 * @free_list: Node in srpt_rdma_ch.free_list.
183 * @n_rbuf: Number of data buffers in the received SRP command. 182 * @n_rbuf: Number of data buffers in the received SRP command.
184 * @rbufs: Pointer to SRP data buffer array. 183 * @rbufs: Pointer to SRP data buffer array.
185 * @single_rbuf: SRP data buffer if the command has only a single buffer. 184 * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -202,7 +201,6 @@ struct srpt_send_ioctx {
202 struct srp_direct_buf *rbufs; 201 struct srp_direct_buf *rbufs;
203 struct srp_direct_buf single_rbuf; 202 struct srp_direct_buf single_rbuf;
204 struct scatterlist *sg; 203 struct scatterlist *sg;
205 struct list_head free_list;
206 spinlock_t spinlock; 204 spinlock_t spinlock;
207 enum srpt_command_state state; 205 enum srpt_command_state state;
208 struct se_cmd cmd; 206 struct se_cmd cmd;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ceb452dd143c..47f8b9b49bac 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2963,6 +2963,7 @@ struct qlt_hw_data {
2963 2963
2964 uint8_t tgt_node_name[WWN_SIZE]; 2964 uint8_t tgt_node_name[WWN_SIZE];
2965 2965
2966 struct dentry *dfs_tgt_sess;
2966 struct list_head q_full_list; 2967 struct list_head q_full_list;
2967 uint32_t num_pend_cmds; 2968 uint32_t num_pend_cmds;
2968 uint32_t num_qfull_cmds_alloc; 2969 uint32_t num_qfull_cmds_alloc;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index cd8b96a4b0dd..34272fde8a5b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -13,6 +13,47 @@ static struct dentry *qla2x00_dfs_root;
13static atomic_t qla2x00_dfs_root_count; 13static atomic_t qla2x00_dfs_root_count;
14 14
15static int 15static int
16qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
17{
18 scsi_qla_host_t *vha = s->private;
19 struct qla_hw_data *ha = vha->hw;
20 unsigned long flags;
21 struct qla_tgt_sess *sess = NULL;
22 struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
23
24 seq_printf(s, "%s\n",vha->host_str);
25 if (tgt) {
26 seq_printf(s, "Port ID Port Name Handle\n");
27
28 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
29 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
30 seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
31 sess->s_id.b.domain,sess->s_id.b.area,
32 sess->s_id.b.al_pa, sess->port_name,
33 sess->loop_id);
34 }
35 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
36 }
37
38 return 0;
39}
40
41static int
42qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
43{
44 scsi_qla_host_t *vha = inode->i_private;
45 return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
46}
47
48
49static const struct file_operations dfs_tgt_sess_ops = {
50 .open = qla2x00_dfs_tgt_sess_open,
51 .read = seq_read,
52 .llseek = seq_lseek,
53 .release = single_release,
54};
55
56static int
16qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) 57qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
17{ 58{
18 struct scsi_qla_host *vha = s->private; 59 struct scsi_qla_host *vha = s->private;
@@ -248,6 +289,15 @@ create_nodes:
248 "Unable to create debugfs fce node.\n"); 289 "Unable to create debugfs fce node.\n");
249 goto out; 290 goto out;
250 } 291 }
292
293 ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
294 S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
295 if (!ha->tgt.dfs_tgt_sess) {
296 ql_log(ql_log_warn, vha, 0xffff,
297 "Unable to create debugFS tgt_sess node.\n");
298 goto out;
299 }
300
251out: 301out:
252 return 0; 302 return 0;
253} 303}
@@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
257{ 307{
258 struct qla_hw_data *ha = vha->hw; 308 struct qla_hw_data *ha = vha->hw;
259 309
310 if (ha->tgt.dfs_tgt_sess) {
311 debugfs_remove(ha->tgt.dfs_tgt_sess);
312 ha->tgt.dfs_tgt_sess = NULL;
313 }
314
260 if (ha->dfs_fw_resource_cnt) { 315 if (ha->dfs_fw_resource_cnt) {
261 debugfs_remove(ha->dfs_fw_resource_cnt); 316 debugfs_remove(ha->dfs_fw_resource_cnt);
262 ha->dfs_fw_resource_cnt = NULL; 317 ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ee967becd257..985231900aca 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
641{ 641{
642 struct scsi_qla_host *vha = sess->vha; 642 struct scsi_qla_host *vha = sess->vha;
643 643
644 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 644 if (sess->se_sess)
645 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
645 646
646 if (!list_empty(&sess->del_list_entry)) 647 if (!list_empty(&sess->del_list_entry))
647 list_del_init(&sess->del_list_entry); 648 list_del_init(&sess->del_list_entry);
@@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 857 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
857 "Timeout: sess %p about to be deleted\n", 858 "Timeout: sess %p about to be deleted\n",
858 sess); 859 sess);
859 ha->tgt.tgt_ops->shutdown_sess(sess); 860 if (sess->se_sess) {
860 ha->tgt.tgt_ops->put_sess(sess); 861 ha->tgt.tgt_ops->shutdown_sess(sess);
862 ha->tgt.tgt_ops->put_sess(sess);
863 } else {
864 qlt_unreg_sess(sess);
865 }
861 } else { 866 } else {
862 schedule_delayed_work(&tgt->sess_del_work, 867 schedule_delayed_work(&tgt->sess_del_work,
863 sess->expires - elapsed); 868 sess->expires - elapsed);
@@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess(
879 struct qla_hw_data *ha = vha->hw; 884 struct qla_hw_data *ha = vha->hw;
880 struct qla_tgt_sess *sess; 885 struct qla_tgt_sess *sess;
881 unsigned long flags; 886 unsigned long flags;
882 unsigned char be_sid[3];
883 887
884 /* Check to avoid double sessions */ 888 /* Check to avoid double sessions */
885 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 889 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess(
905 if (sess->deleted) 909 if (sess->deleted)
906 qlt_undelete_sess(sess); 910 qlt_undelete_sess(sess);
907 911
912 if (!sess->se_sess) {
913 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
914 &sess->port_name[0], sess) < 0) {
915 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
916 return NULL;
917 }
918 }
919
908 kref_get(&sess->se_sess->sess_kref); 920 kref_get(&sess->se_sess->sess_kref);
909 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 921 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
910 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 922 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
@@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess(
948 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 960 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
949 sess, vha->vha_tgt.qla_tgt); 961 sess, vha->vha_tgt.qla_tgt);
950 962
951 be_sid[0] = sess->s_id.b.domain;
952 be_sid[1] = sess->s_id.b.area;
953 be_sid[2] = sess->s_id.b.al_pa;
954 /*
955 * Determine if this fc_port->port_name is allowed to access
956 * target mode using explict NodeACLs+MappedLUNs, or using
957 * TPG demo mode. If this is successful a target mode FC nexus
958 * is created.
959 */
960 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
961 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
962 kfree(sess);
963 return NULL;
964 }
965 /*
966 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
967 * access across ->tgt.sess_lock reaquire.
968 */
969 kref_get(&sess->se_sess->sess_kref);
970
971 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 963 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
972 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 964 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
973 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 965 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
@@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess(
985 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 977 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
986 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 978 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
987 979
980 /*
981 * Determine if this fc_port->port_name is allowed to access
982 * target mode using explict NodeACLs+MappedLUNs, or using
983 * TPG demo mode. If this is successful a target mode FC nexus
984 * is created.
985 */
986 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
987 &fcport->port_name[0], sess) < 0) {
988 return NULL;
989 } else {
990 /*
991 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
992 * access across ->tgt.sess_lock reaquire.
993 */
994 kref_get(&sess->se_sess->sess_kref);
995 }
996
988 return sess; 997 return sess;
989} 998}
990 999
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 22a6a767fe07..d857feeb6514 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl {
731 void (*free_session)(struct qla_tgt_sess *); 731 void (*free_session)(struct qla_tgt_sess *);
732 732
733 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, 733 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
734 void *, uint8_t *, uint16_t); 734 struct qla_tgt_sess *);
735 void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool); 735 void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
736 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, 736 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
737 const uint16_t); 737 const uint16_t);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 1808a01cfb7e..c1461d225f08 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1406 transport_deregister_session(sess->se_sess); 1406 transport_deregister_session(sess->se_sess);
1407} 1407}
1408 1408
1409static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
1410 struct se_session *se_sess, void *p)
1411{
1412 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1413 struct tcm_qla2xxx_tpg, se_tpg);
1414 struct tcm_qla2xxx_lport *lport = tpg->lport;
1415 struct qla_hw_data *ha = lport->qla_vha->hw;
1416 struct se_node_acl *se_nacl = se_sess->se_node_acl;
1417 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
1418 struct tcm_qla2xxx_nacl, se_node_acl);
1419 struct qla_tgt_sess *qlat_sess = p;
1420 uint16_t loop_id = qlat_sess->loop_id;
1421 unsigned long flags;
1422 unsigned char be_sid[3];
1423
1424 be_sid[0] = qlat_sess->s_id.b.domain;
1425 be_sid[1] = qlat_sess->s_id.b.area;
1426 be_sid[2] = qlat_sess->s_id.b.al_pa;
1427
1428 /*
1429 * And now setup se_nacl and session pointers into HW lport internal
1430 * mappings for fabric S_ID and LOOP_ID.
1431 */
1432 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1433 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
1434 se_sess, qlat_sess, be_sid);
1435 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
1436 se_sess, qlat_sess, loop_id);
1437 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1438
1439 return 0;
1440}
1441
1409/* 1442/*
1410 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() 1443 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1411 * to locate struct se_node_acl 1444 * to locate struct se_node_acl
@@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1413static int tcm_qla2xxx_check_initiator_node_acl( 1446static int tcm_qla2xxx_check_initiator_node_acl(
1414 scsi_qla_host_t *vha, 1447 scsi_qla_host_t *vha,
1415 unsigned char *fc_wwpn, 1448 unsigned char *fc_wwpn,
1416 void *qla_tgt_sess, 1449 struct qla_tgt_sess *qlat_sess)
1417 uint8_t *s_id,
1418 uint16_t loop_id)
1419{ 1450{
1420 struct qla_hw_data *ha = vha->hw; 1451 struct qla_hw_data *ha = vha->hw;
1421 struct tcm_qla2xxx_lport *lport; 1452 struct tcm_qla2xxx_lport *lport;
1422 struct tcm_qla2xxx_tpg *tpg; 1453 struct tcm_qla2xxx_tpg *tpg;
1423 struct tcm_qla2xxx_nacl *nacl;
1424 struct se_portal_group *se_tpg;
1425 struct se_node_acl *se_nacl;
1426 struct se_session *se_sess; 1454 struct se_session *se_sess;
1427 struct qla_tgt_sess *sess = qla_tgt_sess;
1428 unsigned char port_name[36]; 1455 unsigned char port_name[36];
1429 unsigned long flags;
1430 int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : 1456 int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
1431 TCM_QLA2XXX_DEFAULT_TAGS; 1457 TCM_QLA2XXX_DEFAULT_TAGS;
1432 1458
@@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1444 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); 1470 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1445 return -EINVAL; 1471 return -EINVAL;
1446 } 1472 }
1447 se_tpg = &tpg->se_tpg;
1448
1449 se_sess = transport_init_session_tags(num_tags,
1450 sizeof(struct qla_tgt_cmd),
1451 TARGET_PROT_ALL);
1452 if (IS_ERR(se_sess)) {
1453 pr_err("Unable to initialize struct se_session\n");
1454 return PTR_ERR(se_sess);
1455 }
1456 /* 1473 /*
1457 * Format the FCP Initiator port_name into colon seperated values to 1474 * Format the FCP Initiator port_name into colon seperated values to
1458 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1475 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
@@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1463 * Locate our struct se_node_acl either from an explict NodeACL created 1480 * Locate our struct se_node_acl either from an explict NodeACL created
1464 * via ConfigFS, or via running in TPG demo mode. 1481 * via ConfigFS, or via running in TPG demo mode.
1465 */ 1482 */
1466 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, 1483 se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
1467 port_name); 1484 sizeof(struct qla_tgt_cmd),
1468 if (!se_sess->se_node_acl) { 1485 TARGET_PROT_ALL, port_name,
1469 transport_free_session(se_sess); 1486 qlat_sess, tcm_qla2xxx_session_cb);
1470 return -EINVAL; 1487 if (IS_ERR(se_sess))
1471 } 1488 return PTR_ERR(se_sess);
1472 se_nacl = se_sess->se_node_acl;
1473 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1474 /*
1475 * And now setup the new se_nacl and session pointers into our HW lport
1476 * mappings for fabric S_ID and LOOP_ID.
1477 */
1478 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1479 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1480 qla_tgt_sess, s_id);
1481 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1482 qla_tgt_sess, loop_id);
1483 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1484 /*
1485 * Finally register the new FC Nexus with TCM
1486 */
1487 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1488 1489
1489 return 0; 1490 return 0;
1490} 1491}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index d41a5c300e31..0ad5ac541a7f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
802 802
803/* Start items for tcm_loop_nexus_cit */ 803/* Start items for tcm_loop_nexus_cit */
804 804
805static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
806 struct se_session *se_sess, void *p)
807{
808 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
809 struct tcm_loop_tpg, tl_se_tpg);
810
811 tl_tpg->tl_nexus = p;
812 return 0;
813}
814
805static int tcm_loop_make_nexus( 815static int tcm_loop_make_nexus(
806 struct tcm_loop_tpg *tl_tpg, 816 struct tcm_loop_tpg *tl_tpg,
807 const char *name) 817 const char *name)
808{ 818{
809 struct se_portal_group *se_tpg;
810 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 819 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
811 struct tcm_loop_nexus *tl_nexus; 820 struct tcm_loop_nexus *tl_nexus;
812 int ret = -ENOMEM; 821 int ret;
813 822
814 if (tl_tpg->tl_nexus) { 823 if (tl_tpg->tl_nexus) {
815 pr_debug("tl_tpg->tl_nexus already exists\n"); 824 pr_debug("tl_tpg->tl_nexus already exists\n");
816 return -EEXIST; 825 return -EEXIST;
817 } 826 }
818 se_tpg = &tl_tpg->tl_se_tpg;
819 827
820 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 828 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
821 if (!tl_nexus) { 829 if (!tl_nexus) {
822 pr_err("Unable to allocate struct tcm_loop_nexus\n"); 830 pr_err("Unable to allocate struct tcm_loop_nexus\n");
823 return -ENOMEM; 831 return -ENOMEM;
824 } 832 }
825 /* 833
826 * Initialize the struct se_session pointer 834 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
827 */ 835 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
828 tl_nexus->se_sess = transport_init_session( 836 name, tl_nexus, tcm_loop_alloc_sess_cb);
829 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
830 if (IS_ERR(tl_nexus->se_sess)) { 837 if (IS_ERR(tl_nexus->se_sess)) {
831 ret = PTR_ERR(tl_nexus->se_sess); 838 ret = PTR_ERR(tl_nexus->se_sess);
832 goto out; 839 kfree(tl_nexus);
833 } 840 return ret;
834 /*
835 * Since we are running in 'demo mode' this call with generate a
836 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
837 * Initiator port name of the passed configfs group 'name'.
838 */
839 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
840 se_tpg, (unsigned char *)name);
841 if (!tl_nexus->se_sess->se_node_acl) {
842 transport_free_session(tl_nexus->se_sess);
843 goto out;
844 } 841 }
845 /* Now, register the I_T Nexus as active. */ 842
846 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
847 tl_nexus->se_sess, tl_nexus);
848 tl_tpg->tl_nexus = tl_nexus;
849 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 843 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
850 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 844 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
851 name); 845 name);
852 return 0; 846 return 0;
853
854out:
855 kfree(tl_nexus);
856 return ret;
857} 847}
858 848
859static int tcm_loop_drop_nexus( 849static int tcm_loop_drop_nexus(
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3072f1aca8ec..c57e7884973d 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
196 struct sbp_session *sess; 196 struct sbp_session *sess;
197 int ret; 197 int ret;
198 char guid_str[17]; 198 char guid_str[17];
199 struct se_node_acl *se_nacl; 199
200 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
200 201
201 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 202 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
202 if (!sess) { 203 if (!sess) {
203 pr_err("failed to allocate session descriptor\n"); 204 pr_err("failed to allocate session descriptor\n");
204 return ERR_PTR(-ENOMEM); 205 return ERR_PTR(-ENOMEM);
205 } 206 }
207 spin_lock_init(&sess->lock);
208 INIT_LIST_HEAD(&sess->login_list);
209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
210 sess->guid = guid;
206 211
207 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); 212 sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
213 sizeof(struct sbp_target_request),
214 TARGET_PROT_NORMAL, guid_str,
215 sess, NULL);
208 if (IS_ERR(sess->se_sess)) { 216 if (IS_ERR(sess->se_sess)) {
209 pr_err("failed to init se_session\n"); 217 pr_err("failed to init se_session\n");
210
211 ret = PTR_ERR(sess->se_sess); 218 ret = PTR_ERR(sess->se_sess);
212 kfree(sess); 219 kfree(sess);
213 return ERR_PTR(ret); 220 return ERR_PTR(ret);
214 } 221 }
215 222
216 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
217
218 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
219 if (!se_nacl) {
220 pr_warn("Node ACL not found for %s\n", guid_str);
221
222 transport_free_session(sess->se_sess);
223 kfree(sess);
224
225 return ERR_PTR(-EPERM);
226 }
227
228 sess->se_sess->se_node_acl = se_nacl;
229
230 spin_lock_init(&sess->lock);
231 INIT_LIST_HEAD(&sess->login_list);
232 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
233
234 sess->guid = guid;
235
236 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
237
238 return sess; 223 return sess;
239} 224}
240 225
@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
908 STATUS_BLOCK_SBP_STATUS( 893 STATUS_BLOCK_SBP_STATUS(
909 SBP_STATUS_REQ_TYPE_NOTSUPP)); 894 SBP_STATUS_REQ_TYPE_NOTSUPP));
910 sbp_send_status(req); 895 sbp_send_status(req);
911 sbp_free_request(req);
912 return; 896 return;
913 case 3: /* Dummy ORB */ 897 case 3: /* Dummy ORB */
914 req->status.status |= cpu_to_be32( 898 req->status.status |= cpu_to_be32(
@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
919 STATUS_BLOCK_SBP_STATUS( 903 STATUS_BLOCK_SBP_STATUS(
920 SBP_STATUS_DUMMY_ORB_COMPLETE)); 904 SBP_STATUS_DUMMY_ORB_COMPLETE));
921 sbp_send_status(req); 905 sbp_send_status(req);
922 sbp_free_request(req);
923 return; 906 return;
924 default: 907 default:
925 BUG(); 908 BUG();
@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
938 return active; 921 return active;
939} 922}
940 923
924static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
925 struct fw_card *card, u64 next_orb)
926{
927 struct se_session *se_sess = sess->se_sess;
928 struct sbp_target_request *req;
929 int tag;
930
931 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
932 if (tag < 0)
933 return ERR_PTR(-ENOMEM);
934
935 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
936 memset(req, 0, sizeof(*req));
937 req->se_cmd.map_tag = tag;
938 req->se_cmd.tag = next_orb;
939
940 return req;
941}
942
941static void tgt_agent_fetch_work(struct work_struct *work) 943static void tgt_agent_fetch_work(struct work_struct *work)
942{ 944{
943 struct sbp_target_agent *agent = 945 struct sbp_target_agent *agent =
@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
949 u64 next_orb = agent->orb_pointer; 951 u64 next_orb = agent->orb_pointer;
950 952
951 while (next_orb && tgt_agent_check_active(agent)) { 953 while (next_orb && tgt_agent_check_active(agent)) {
952 req = kzalloc(sizeof(*req), GFP_KERNEL); 954 req = sbp_mgt_get_req(sess, sess->card, next_orb);
953 if (!req) { 955 if (IS_ERR(req)) {
954 spin_lock_bh(&agent->lock); 956 spin_lock_bh(&agent->lock);
955 agent->state = AGENT_STATE_DEAD; 957 agent->state = AGENT_STATE_DEAD;
956 spin_unlock_bh(&agent->lock); 958 spin_unlock_bh(&agent->lock);
@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
985 spin_unlock_bh(&agent->lock); 987 spin_unlock_bh(&agent->lock);
986 988
987 sbp_send_status(req); 989 sbp_send_status(req);
988 sbp_free_request(req);
989 return; 990 return;
990 } 991 }
991 992
@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
1232 req->se_cmd.tag = req->orb_pointer; 1233 req->se_cmd.tag = req->orb_pointer;
1233 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1234 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1234 req->sense_buf, unpacked_lun, data_length, 1235 req->sense_buf, unpacked_lun, data_length,
1235 TCM_SIMPLE_TAG, data_dir, 0)) 1236 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1236 goto err; 1237 goto err;
1237 1238
1238 return; 1239 return;
@@ -1244,7 +1245,6 @@ err:
1244 STATUS_BLOCK_LEN(1) | 1245 STATUS_BLOCK_LEN(1) |
1245 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1246 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1246 sbp_send_status(req); 1247 sbp_send_status(req);
1247 sbp_free_request(req);
1248} 1248}
1249 1249
1250/* 1250/*
@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
1343 1343
1344static int sbp_send_status(struct sbp_target_request *req) 1344static int sbp_send_status(struct sbp_target_request *req)
1345{ 1345{
1346 int ret, length; 1346 int rc, ret = 0, length;
1347 struct sbp_login_descriptor *login = req->login; 1347 struct sbp_login_descriptor *login = req->login;
1348 1348
1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1350 1350
1351 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1351 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1352 login->status_fifo_addr, &req->status, length); 1352 login->status_fifo_addr, &req->status, length);
1353 if (ret != RCODE_COMPLETE) { 1353 if (rc != RCODE_COMPLETE) {
1354 pr_debug("sbp_send_status: write failed: 0x%x\n", ret); 1354 pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1355 return -EIO; 1355 ret = -EIO;
1356 goto put_ref;
1356 } 1357 }
1357 1358
1358 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1359 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1359 req->orb_pointer); 1360 req->orb_pointer);
1360 1361 /*
1361 return 0; 1362 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1363 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1364 * final se_cmd->cmd_kref put.
1365 */
1366put_ref:
1367 target_put_sess_cmd(&req->se_cmd);
1368 return ret;
1362} 1369}
1363 1370
1364static void sbp_sense_mangle(struct sbp_target_request *req) 1371static void sbp_sense_mangle(struct sbp_target_request *req)
@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
1447 1454
1448static void sbp_free_request(struct sbp_target_request *req) 1455static void sbp_free_request(struct sbp_target_request *req)
1449{ 1456{
1457 struct se_cmd *se_cmd = &req->se_cmd;
1458 struct se_session *se_sess = se_cmd->se_sess;
1459
1450 kfree(req->pg_tbl); 1460 kfree(req->pg_tbl);
1451 kfree(req->cmd_buf); 1461 kfree(req->cmd_buf);
1452 kfree(req); 1462
1463 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1453} 1464}
1454 1465
1455static void sbp_mgt_agent_process(struct work_struct *work) 1466static void sbp_mgt_agent_process(struct work_struct *work)
@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
1609 rcode = RCODE_CONFLICT_ERROR; 1620 rcode = RCODE_CONFLICT_ERROR;
1610 goto out; 1621 goto out;
1611 } 1622 }
1612
1613 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1623 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1614 if (!req) { 1624 if (!req) {
1615 rcode = RCODE_CONFLICT_ERROR; 1625 rcode = RCODE_CONFLICT_ERROR;
@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
1815 struct sbp_target_request *req = container_of(se_cmd, 1825 struct sbp_target_request *req = container_of(se_cmd,
1816 struct sbp_target_request, se_cmd); 1826 struct sbp_target_request, se_cmd);
1817 1827
1818 transport_generic_free_cmd(&req->se_cmd, 0); 1828 return transport_generic_free_cmd(&req->se_cmd, 0);
1819 return 1;
1820} 1829}
1821 1830
1822static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1831static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index da457e25717a..a4046ca6e60d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
86 se_cmd->lun_ref_active = true; 86 se_cmd->lun_ref_active = true;
87 87
88 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 88 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
89 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 89 deve->lun_access_ro) {
90 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 90 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 " Access for 0x%08llx\n", 91 " Access for 0x%08llx\n",
92 se_cmd->se_tfo->get_fabric_name(), 92 se_cmd->se_tfo->get_fabric_name(),
@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
199 199
200 rcu_read_lock(); 200 rcu_read_lock();
201 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 201 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
202 ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY); 202 ret = deve && deve->lun_access_ro;
203 rcu_read_unlock(); 203 rcu_read_unlock();
204 204
205 return ret; 205 return ret;
@@ -258,22 +258,15 @@ void core_free_device_list_for_node(
258 258
259void core_update_device_list_access( 259void core_update_device_list_access(
260 u64 mapped_lun, 260 u64 mapped_lun,
261 u32 lun_access, 261 bool lun_access_ro,
262 struct se_node_acl *nacl) 262 struct se_node_acl *nacl)
263{ 263{
264 struct se_dev_entry *deve; 264 struct se_dev_entry *deve;
265 265
266 mutex_lock(&nacl->lun_entry_mutex); 266 mutex_lock(&nacl->lun_entry_mutex);
267 deve = target_nacl_find_deve(nacl, mapped_lun); 267 deve = target_nacl_find_deve(nacl, mapped_lun);
268 if (deve) { 268 if (deve)
269 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 269 deve->lun_access_ro = lun_access_ro;
270 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
271 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
272 } else {
273 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
274 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
275 }
276 }
277 mutex_unlock(&nacl->lun_entry_mutex); 270 mutex_unlock(&nacl->lun_entry_mutex);
278} 271}
279 272
@@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
319 struct se_lun *lun, 312 struct se_lun *lun,
320 struct se_lun_acl *lun_acl, 313 struct se_lun_acl *lun_acl,
321 u64 mapped_lun, 314 u64 mapped_lun,
322 u32 lun_access, 315 bool lun_access_ro,
323 struct se_node_acl *nacl, 316 struct se_node_acl *nacl,
324 struct se_portal_group *tpg) 317 struct se_portal_group *tpg)
325{ 318{
@@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
340 kref_init(&new->pr_kref); 333 kref_init(&new->pr_kref);
341 init_completion(&new->pr_comp); 334 init_completion(&new->pr_comp);
342 335
343 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) 336 new->lun_access_ro = lun_access_ro;
344 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
345 else
346 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
347
348 new->creation_time = get_jiffies_64(); 337 new->creation_time = get_jiffies_64();
349 new->attach_count++; 338 new->attach_count++;
350 339
@@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
433 422
434 hlist_del_rcu(&orig->link); 423 hlist_del_rcu(&orig->link);
435 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 424 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
436 orig->lun_flags = 0; 425 orig->lun_access_ro = false;
437 orig->creation_time = 0; 426 orig->creation_time = 0;
438 orig->attach_count--; 427 orig->attach_count--;
439 /* 428 /*
@@ -558,8 +547,7 @@ int core_dev_add_lun(
558{ 547{
559 int rc; 548 int rc;
560 549
561 rc = core_tpg_add_lun(tpg, lun, 550 rc = core_tpg_add_lun(tpg, lun, false, dev);
562 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
563 if (rc < 0) 551 if (rc < 0)
564 return rc; 552 return rc;
565 553
@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
635 struct se_portal_group *tpg, 623 struct se_portal_group *tpg,
636 struct se_lun_acl *lacl, 624 struct se_lun_acl *lacl,
637 struct se_lun *lun, 625 struct se_lun *lun,
638 u32 lun_access) 626 bool lun_access_ro)
639{ 627{
640 struct se_node_acl *nacl = lacl->se_lun_nacl; 628 struct se_node_acl *nacl = lacl->se_lun_nacl;
641 /* 629 /*
@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
647 if (!nacl) 635 if (!nacl)
648 return -EINVAL; 636 return -EINVAL;
649 637
650 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 638 if (lun->lun_access_ro)
651 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 639 lun_access_ro = true;
652 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
653 640
654 lacl->se_lun = lun; 641 lacl->se_lun = lun;
655 642
656 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 643 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
657 lun_access, nacl, tpg) < 0) 644 lun_access_ro, nacl, tpg) < 0)
658 return -EINVAL; 645 return -EINVAL;
659 646
660 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 647 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
661 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 648 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
662 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 649 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
663 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 650 lun_access_ro ? "RO" : "RW",
664 nacl->initiatorname); 651 nacl->initiatorname);
665 /* 652 /*
666 * Check to see if there are any existing persistent reservation APTPL 653 * Check to see if there are any existing persistent reservation APTPL
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 8caef31da415..1bd5c72b663e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
78 struct se_lun_acl, se_lun_group); 78 struct se_lun_acl, se_lun_group);
79 struct se_portal_group *se_tpg; 79 struct se_portal_group *se_tpg;
80 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; 80 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
81 int lun_access; 81 bool lun_access_ro;
82 82
83 if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { 83 if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
84 pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" 84 pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
115 } 115 }
116 /* 116 /*
117 * If this struct se_node_acl was dynamically generated with 117 * If this struct se_node_acl was dynamically generated with
118 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, 118 * tpg_1/attrib/generate_node_acls=1, use the existing
119 * which be will write protected (READ-ONLY) when 119 * deve->lun_access_ro value, which will be true when
120 * tpg_1/attrib/demo_mode_write_protect=1 120 * tpg_1/attrib/demo_mode_write_protect=1
121 */ 121 */
122 rcu_read_lock(); 122 rcu_read_lock();
123 deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun); 123 deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
124 if (deve) 124 if (deve)
125 lun_access = deve->lun_flags; 125 lun_access_ro = deve->lun_access_ro;
126 else 126 else
127 lun_access = 127 lun_access_ro =
128 (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( 128 (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
129 se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : 129 se_tpg)) ? true : false;
130 TRANSPORT_LUNFLAGS_READ_WRITE;
131 rcu_read_unlock(); 130 rcu_read_unlock();
132 /* 131 /*
133 * Determine the actual mapped LUN value user wants.. 132 * Determine the actual mapped LUN value user wants..
@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
135 * This value is what the SCSI Initiator actually sees the 134 * This value is what the SCSI Initiator actually sees the
136 * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. 135 * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
137 */ 136 */
138 return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access); 137 return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
139} 138}
140 139
141static int target_fabric_mappedlun_unlink( 140static int target_fabric_mappedlun_unlink(
@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
167 rcu_read_lock(); 166 rcu_read_lock();
168 deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun); 167 deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
169 if (deve) { 168 if (deve) {
170 len = sprintf(page, "%d\n", 169 len = sprintf(page, "%d\n", deve->lun_access_ro);
171 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
172 } 170 }
173 rcu_read_unlock(); 171 rcu_read_unlock();
174 172
@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
181 struct se_lun_acl *lacl = item_to_lun_acl(item); 179 struct se_lun_acl *lacl = item_to_lun_acl(item);
182 struct se_node_acl *se_nacl = lacl->se_lun_nacl; 180 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
183 struct se_portal_group *se_tpg = se_nacl->se_tpg; 181 struct se_portal_group *se_tpg = se_nacl->se_tpg;
184 unsigned long op; 182 unsigned long wp;
185 int ret; 183 int ret;
186 184
187 ret = kstrtoul(page, 0, &op); 185 ret = kstrtoul(page, 0, &wp);
188 if (ret) 186 if (ret)
189 return ret; 187 return ret;
190 188
191 if ((op != 1) && (op != 0)) 189 if ((wp != 1) && (wp != 0))
192 return -EINVAL; 190 return -EINVAL;
193 191
194 core_update_device_list_access(lacl->mapped_lun, (op) ? 192 /* wp=1 means lun_access_ro=true */
195 TRANSPORT_LUNFLAGS_READ_ONLY : 193 core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
196 TRANSPORT_LUNFLAGS_READ_WRITE,
197 lacl->se_lun_nacl);
198 194
199 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 195 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
200 " Mapped LUN: %llu Write Protect bit to %s\n", 196 " Mapped LUN: %llu Write Protect bit to %s\n",
201 se_tpg->se_tpg_tfo->get_fabric_name(), 197 se_tpg->se_tpg_tfo->get_fabric_name(),
202 se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); 198 se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
203 199
204 return count; 200 return count;
205 201
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index abe4eb997a84..026a758e5778 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -413,8 +413,39 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
413} 413}
414 414
415static sense_reason_t 415static sense_reason_t
416iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
417{
418 struct se_device *dev = cmd->se_dev;
419 struct scatterlist *sg = &cmd->t_data_sg[0];
420 struct page *page = NULL;
421 int ret;
422
423 if (sg->offset) {
424 page = alloc_page(GFP_KERNEL);
425 if (!page)
426 return TCM_OUT_OF_RESOURCES;
427 sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
428 dev->dev_attrib.block_size);
429 }
430
431 ret = blkdev_issue_write_same(bdev,
432 target_to_linux_sector(dev, cmd->t_task_lba),
433 target_to_linux_sector(dev,
434 sbc_get_write_same_sectors(cmd)),
435 GFP_KERNEL, page ? page : sg_page(sg));
436 if (page)
437 __free_page(page);
438 if (ret)
439 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
440
441 target_complete_cmd(cmd, GOOD);
442 return 0;
443}
444
445static sense_reason_t
416iblock_execute_write_same(struct se_cmd *cmd) 446iblock_execute_write_same(struct se_cmd *cmd)
417{ 447{
448 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
418 struct iblock_req *ibr; 449 struct iblock_req *ibr;
419 struct scatterlist *sg; 450 struct scatterlist *sg;
420 struct bio *bio; 451 struct bio *bio;
@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
439 return TCM_INVALID_CDB_FIELD; 470 return TCM_INVALID_CDB_FIELD;
440 } 471 }
441 472
473 if (bdev_write_same(bdev))
474 return iblock_execute_write_same_direct(bdev, cmd);
475
442 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 476 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
443 if (!ibr) 477 if (!ibr)
444 goto fail; 478 goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 4a7cf499cdfa..86b4a8375628 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
59void target_pr_kref_release(struct kref *); 59void target_pr_kref_release(struct kref *);
60void core_free_device_list_for_node(struct se_node_acl *, 60void core_free_device_list_for_node(struct se_node_acl *,
61 struct se_portal_group *); 61 struct se_portal_group *);
62void core_update_device_list_access(u64, u32, struct se_node_acl *); 62void core_update_device_list_access(u64, bool, struct se_node_acl *);
63struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64); 63struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
64int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, 64int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
65 u64, u32, struct se_node_acl *, struct se_portal_group *); 65 u64, bool, struct se_node_acl *, struct se_portal_group *);
66void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, 66void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
67 struct se_node_acl *, struct se_portal_group *); 67 struct se_node_acl *, struct se_portal_group *);
68void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); 68void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
@@ -72,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
72struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, 72struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
73 struct se_node_acl *, u64, int *); 73 struct se_node_acl *, u64, int *);
74int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, 74int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
75 struct se_lun_acl *, struct se_lun *lun, u32); 75 struct se_lun_acl *, struct se_lun *lun, bool);
76int core_dev_del_initiator_node_lun_acl(struct se_lun *, 76int core_dev_del_initiator_node_lun_acl(struct se_lun *,
77 struct se_lun_acl *); 77 struct se_lun_acl *);
78void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, 78void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
@@ -118,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
118void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 118void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
119struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); 119struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
120int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, 120int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
121 u32, struct se_device *); 121 bool, struct se_device *);
122void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); 122void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
123struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg, 123struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
124 const char *initiatorname); 124 const char *initiatorname);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 0aa47babd16c..2a91ed3ef380 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
997 int length = 0; 997 int length = 0;
998 int ret; 998 int ret;
999 int i; 999 int i;
1000 bool read_only = target_lun_is_rdonly(cmd);;
1001 1000
1002 memset(buf, 0, SE_MODE_PAGE_BUF); 1001 memset(buf, 0, SE_MODE_PAGE_BUF);
1003 1002
@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1008 length = ten ? 3 : 2; 1007 length = ten ? 3 : 2;
1009 1008
1010 /* DEVICE-SPECIFIC PARAMETER */ 1009 /* DEVICE-SPECIFIC PARAMETER */
1011 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only) 1010 if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
1012 spc_modesense_write_protect(&buf[length], type); 1011 spc_modesense_write_protect(&buf[length], type);
1013 1012
1014 /* 1013 /*
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 3608b1b5ecf7..ddf046080dc3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
121 struct se_portal_group *tpg, 121 struct se_portal_group *tpg,
122 struct se_lun *lun_orig) 122 struct se_lun *lun_orig)
123{ 123{
124 u32 lun_access = 0; 124 bool lun_access_ro = true;
125 struct se_lun *lun; 125 struct se_lun *lun;
126 struct se_device *dev; 126 struct se_device *dev;
127 127
@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
137 * demo_mode_write_protect is ON, or READ_ONLY; 137 * demo_mode_write_protect is ON, or READ_ONLY;
138 */ 138 */
139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 140 lun_access_ro = false;
141 } else { 141 } else {
142 /* 142 /*
143 * Allow only optical drives to issue R/W in default RO 143 * Allow only optical drives to issue R/W in default RO
144 * demo mode. 144 * demo mode.
145 */ 145 */
146 if (dev->transport->get_device_type(dev) == TYPE_DISK) 146 if (dev->transport->get_device_type(dev) == TYPE_DISK)
147 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 147 lun_access_ro = true;
148 else 148 else
149 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 149 lun_access_ro = false;
150 } 150 }
151 151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n", 153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(), 154 tpg->se_tpg_tfo->get_fabric_name(),
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 156 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157 "READ-WRITE" : "READ-ONLY");
158 157
159 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 158 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
160 lun_access, acl, tpg); 159 lun_access_ro, acl, tpg);
161 /* 160 /*
162 * Check to see if there are any existing persistent reservation 161 * Check to see if there are any existing persistent reservation
163 * APTPL pre-registrations that need to be enabled for this dynamic 162 * APTPL pre-registrations that need to be enabled for this dynamic
@@ -522,7 +521,7 @@ int core_tpg_register(
522 return PTR_ERR(se_tpg->tpg_virt_lun0); 521 return PTR_ERR(se_tpg->tpg_virt_lun0);
523 522
524 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 523 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
525 TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); 524 true, g_lun0_dev);
526 if (ret < 0) { 525 if (ret < 0) {
527 kfree(se_tpg->tpg_virt_lun0); 526 kfree(se_tpg->tpg_virt_lun0);
528 return ret; 527 return ret;
@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
616int core_tpg_add_lun( 615int core_tpg_add_lun(
617 struct se_portal_group *tpg, 616 struct se_portal_group *tpg,
618 struct se_lun *lun, 617 struct se_lun *lun,
619 u32 lun_access, 618 bool lun_access_ro,
620 struct se_device *dev) 619 struct se_device *dev)
621{ 620{
622 int ret; 621 int ret;
@@ -644,9 +643,9 @@ int core_tpg_add_lun(
644 spin_unlock(&dev->se_port_lock); 643 spin_unlock(&dev->se_port_lock);
645 644
646 if (dev->dev_flags & DF_READ_ONLY) 645 if (dev->dev_flags & DF_READ_ONLY)
647 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 646 lun->lun_access_ro = true;
648 else 647 else
649 lun->lun_access = lun_access; 648 lun->lun_access_ro = lun_access_ro;
650 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 649 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
651 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 650 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
652 mutex_unlock(&tpg->tpg_lun_mutex); 651 mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 867bc6d0a68a..ab2bf12975e1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
281 struct se_session *se_sess; 281 struct se_session *se_sess;
282 int rc; 282 int rc;
283 283
284 if (tag_num != 0 && !tag_size) {
285 pr_err("init_session_tags called with percpu-ida tag_num:"
286 " %u, but zero tag_size\n", tag_num);
287 return ERR_PTR(-EINVAL);
288 }
289 if (!tag_num && tag_size) {
290 pr_err("init_session_tags called with percpu-ida tag_size:"
291 " %u, but zero tag_num\n", tag_size);
292 return ERR_PTR(-EINVAL);
293 }
294
284 se_sess = transport_init_session(sup_prot_ops); 295 se_sess = transport_init_session(sup_prot_ops);
285 if (IS_ERR(se_sess)) 296 if (IS_ERR(se_sess))
286 return se_sess; 297 return se_sess;
@@ -374,6 +385,51 @@ void transport_register_session(
374} 385}
375EXPORT_SYMBOL(transport_register_session); 386EXPORT_SYMBOL(transport_register_session);
376 387
388struct se_session *
389target_alloc_session(struct se_portal_group *tpg,
390 unsigned int tag_num, unsigned int tag_size,
391 enum target_prot_op prot_op,
392 const char *initiatorname, void *private,
393 int (*callback)(struct se_portal_group *,
394 struct se_session *, void *))
395{
396 struct se_session *sess;
397
398 /*
399 * If the fabric driver is using percpu-ida based pre allocation
400 * of I/O descriptor tags, go ahead and perform that setup now..
401 */
402 if (tag_num != 0)
403 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
404 else
405 sess = transport_init_session(prot_op);
406
407 if (IS_ERR(sess))
408 return sess;
409
410 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
411 (unsigned char *)initiatorname);
412 if (!sess->se_node_acl) {
413 transport_free_session(sess);
414 return ERR_PTR(-EACCES);
415 }
416 /*
417 * Go ahead and perform any remaining fabric setup that is
418 * required before transport_register_session().
419 */
420 if (callback != NULL) {
421 int rc = callback(tpg, sess, private);
422 if (rc) {
423 transport_free_session(sess);
424 return ERR_PTR(rc);
425 }
426 }
427
428 transport_register_session(tpg, sess->se_node_acl, sess, private);
429 return sess;
430}
431EXPORT_SYMBOL(target_alloc_session);
432
377static void target_release_session(struct kref *kref) 433static void target_release_session(struct kref *kref)
378{ 434{
379 struct se_session *se_sess = container_of(kref, 435 struct se_session *se_sess = container_of(kref,
@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
1941 1997
1942 switch (cmd->data_direction) { 1998 switch (cmd->data_direction) {
1943 case DMA_FROM_DEVICE: 1999 case DMA_FROM_DEVICE:
2000 if (cmd->scsi_status)
2001 goto queue_status;
2002
1944 trace_target_cmd_complete(cmd); 2003 trace_target_cmd_complete(cmd);
1945 ret = cmd->se_tfo->queue_data_in(cmd); 2004 ret = cmd->se_tfo->queue_data_in(cmd);
1946 break; 2005 break;
@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1951 } 2010 }
1952 /* Fall through for DMA_TO_DEVICE */ 2011 /* Fall through for DMA_TO_DEVICE */
1953 case DMA_NONE: 2012 case DMA_NONE:
2013queue_status:
1954 trace_target_cmd_complete(cmd); 2014 trace_target_cmd_complete(cmd);
1955 ret = cmd->se_tfo->queue_status(cmd); 2015 ret = cmd->se_tfo->queue_status(cmd);
1956 break; 2016 break;
@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
2072queue_rsp: 2132queue_rsp:
2073 switch (cmd->data_direction) { 2133 switch (cmd->data_direction) {
2074 case DMA_FROM_DEVICE: 2134 case DMA_FROM_DEVICE:
2135 if (cmd->scsi_status)
2136 goto queue_status;
2137
2075 atomic_long_add(cmd->data_length, 2138 atomic_long_add(cmd->data_length,
2076 &cmd->se_lun->lun_stats.tx_data_octets); 2139 &cmd->se_lun->lun_stats.tx_data_octets);
2077 /* 2140 /*
@@ -2111,6 +2174,7 @@ queue_rsp:
2111 } 2174 }
2112 /* Fall through for DMA_TO_DEVICE */ 2175 /* Fall through for DMA_TO_DEVICE */
2113 case DMA_NONE: 2176 case DMA_NONE:
2177queue_status:
2114 trace_target_cmd_complete(cmd); 2178 trace_target_cmd_complete(cmd);
2115 ret = cmd->se_tfo->queue_status(cmd); 2179 ret = cmd->se_tfo->queue_status(cmd);
2116 if (ret == -EAGAIN || ret == -ENOMEM) 2180 if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2596 2660
2597 list_for_each_entry_safe(se_cmd, tmp_cmd, 2661 list_for_each_entry_safe(se_cmd, tmp_cmd,
2598 &se_sess->sess_wait_list, se_cmd_list) { 2662 &se_sess->sess_wait_list, se_cmd_list) {
2599 list_del_init(&se_cmd->se_cmd_list);
2600
2601 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2663 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2602 " %d\n", se_cmd, se_cmd->t_state, 2664 " %d\n", se_cmd, se_cmd->t_state,
2603 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2665 se_cmd->se_tfo->get_cmd_state(se_cmd));
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 94f5154ac788..62bf4fe5704a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -26,6 +26,7 @@
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/uio_driver.h> 27#include <linux/uio_driver.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include <linux/bitops.h>
29#include <net/genetlink.h> 30#include <net/genetlink.h>
30#include <scsi/scsi_common.h> 31#include <scsi/scsi_common.h>
31#include <scsi/scsi_proto.h> 32#include <scsi/scsi_proto.h>
@@ -63,8 +64,11 @@
63 64
64#define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 65#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
65 66
67#define DATA_BLOCK_BITS 256
68#define DATA_BLOCK_SIZE 4096
69
66#define CMDR_SIZE (16 * 4096) 70#define CMDR_SIZE (16 * 4096)
67#define DATA_SIZE (257 * 4096) 71#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
68 72
69#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 73#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
70 74
@@ -93,12 +97,11 @@ struct tcmu_dev {
93 u32 cmdr_size; 97 u32 cmdr_size;
94 u32 cmdr_last_cleaned; 98 u32 cmdr_last_cleaned;
95 /* Offset of data ring from start of mb */ 99 /* Offset of data ring from start of mb */
100 /* Must add data_off and mb_addr to get the address */
96 size_t data_off; 101 size_t data_off;
97 size_t data_size; 102 size_t data_size;
98 /* Ring head + tail values. */ 103
99 /* Must add data_off and mb_addr to get the address */ 104 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
100 size_t data_head;
101 size_t data_tail;
102 105
103 wait_queue_head_t wait_cmdr; 106 wait_queue_head_t wait_cmdr;
104 /* TODO should this be a mutex? */ 107 /* TODO should this be a mutex? */
@@ -122,9 +125,9 @@ struct tcmu_cmd {
122 125
123 uint16_t cmd_id; 126 uint16_t cmd_id;
124 127
125 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if 128 /* Can't use se_cmd when cleaning up expired cmds, because if
126 cmd has been completed then accessing se_cmd is off limits */ 129 cmd has been completed then accessing se_cmd is off limits */
127 size_t data_length; 130 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
128 131
129 unsigned long deadline; 132 unsigned long deadline;
130 133
@@ -168,13 +171,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
168 171
169 tcmu_cmd->se_cmd = se_cmd; 172 tcmu_cmd->se_cmd = se_cmd;
170 tcmu_cmd->tcmu_dev = udev; 173 tcmu_cmd->tcmu_dev = udev;
171 tcmu_cmd->data_length = se_cmd->data_length;
172
173 if (se_cmd->se_cmd_flags & SCF_BIDI) {
174 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
175 tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
176 }
177
178 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 174 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
179 175
180 idr_preload(GFP_KERNEL); 176 idr_preload(GFP_KERNEL);
@@ -231,105 +227,126 @@ static inline size_t head_to_end(size_t head, size_t size)
231 return size - head; 227 return size - head;
232} 228}
233 229
230static inline void new_iov(struct iovec **iov, int *iov_cnt,
231 struct tcmu_dev *udev)
232{
233 struct iovec *iovec;
234
235 if (*iov_cnt != 0)
236 (*iov)++;
237 (*iov_cnt)++;
238
239 iovec = *iov;
240 memset(iovec, 0, sizeof(struct iovec));
241}
242
234#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 243#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
235 244
245/* offset is relative to mb_addr */
246static inline size_t get_block_offset(struct tcmu_dev *dev,
247 int block, int remaining)
248{
249 return dev->data_off + block * DATA_BLOCK_SIZE +
250 DATA_BLOCK_SIZE - remaining;
251}
252
253static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
254{
255 return (size_t)iov->iov_base + iov->iov_len;
256}
257
236static void alloc_and_scatter_data_area(struct tcmu_dev *udev, 258static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
237 struct scatterlist *data_sg, unsigned int data_nents, 259 struct scatterlist *data_sg, unsigned int data_nents,
238 struct iovec **iov, int *iov_cnt, bool copy_data) 260 struct iovec **iov, int *iov_cnt, bool copy_data)
239{ 261{
240 int i; 262 int i, block;
263 int block_remaining = 0;
241 void *from, *to; 264 void *from, *to;
242 size_t copy_bytes; 265 size_t copy_bytes, to_offset;
243 struct scatterlist *sg; 266 struct scatterlist *sg;
244 267
245 for_each_sg(data_sg, sg, data_nents, i) { 268 for_each_sg(data_sg, sg, data_nents, i) {
246 copy_bytes = min_t(size_t, sg->length, 269 int sg_remaining = sg->length;
247 head_to_end(udev->data_head, udev->data_size));
248 from = kmap_atomic(sg_page(sg)) + sg->offset; 270 from = kmap_atomic(sg_page(sg)) + sg->offset;
249 to = (void *) udev->mb_addr + udev->data_off + udev->data_head; 271 while (sg_remaining > 0) {
250 272 if (block_remaining == 0) {
251 if (copy_data) { 273 block = find_first_zero_bit(udev->data_bitmap,
252 memcpy(to, from, copy_bytes); 274 DATA_BLOCK_BITS);
253 tcmu_flush_dcache_range(to, copy_bytes); 275 block_remaining = DATA_BLOCK_SIZE;
254 } 276 set_bit(block, udev->data_bitmap);
255 277 }
256 /* Even iov_base is relative to mb_addr */ 278 copy_bytes = min_t(size_t, sg_remaining,
257 (*iov)->iov_len = copy_bytes; 279 block_remaining);
258 (*iov)->iov_base = (void __user *) udev->data_off + 280 to_offset = get_block_offset(udev, block,
259 udev->data_head; 281 block_remaining);
260 (*iov_cnt)++; 282 to = (void *)udev->mb_addr + to_offset;
261 (*iov)++; 283 if (*iov_cnt != 0 &&
262 284 to_offset == iov_tail(udev, *iov)) {
263 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); 285 (*iov)->iov_len += copy_bytes;
264 286 } else {
265 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ 287 new_iov(iov, iov_cnt, udev);
266 if (sg->length != copy_bytes) { 288 (*iov)->iov_base = (void __user *) to_offset;
267 void *from_skip = from + copy_bytes; 289 (*iov)->iov_len = copy_bytes;
268 290 }
269 copy_bytes = sg->length - copy_bytes;
270
271 (*iov)->iov_len = copy_bytes;
272 (*iov)->iov_base = (void __user *) udev->data_off +
273 udev->data_head;
274
275 if (copy_data) { 291 if (copy_data) {
276 to = (void *) udev->mb_addr + 292 memcpy(to, from + sg->length - sg_remaining,
277 udev->data_off + udev->data_head; 293 copy_bytes);
278 memcpy(to, from_skip, copy_bytes);
279 tcmu_flush_dcache_range(to, copy_bytes); 294 tcmu_flush_dcache_range(to, copy_bytes);
280 } 295 }
281 296 sg_remaining -= copy_bytes;
282 (*iov_cnt)++; 297 block_remaining -= copy_bytes;
283 (*iov)++;
284
285 UPDATE_HEAD(udev->data_head,
286 copy_bytes, udev->data_size);
287 } 298 }
288
289 kunmap_atomic(from - sg->offset); 299 kunmap_atomic(from - sg->offset);
290 } 300 }
291} 301}
292 302
293static void gather_and_free_data_area(struct tcmu_dev *udev, 303static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
294 struct scatterlist *data_sg, unsigned int data_nents)
295{ 304{
296 int i; 305 bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
306 DATA_BLOCK_BITS);
307}
308
309static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
310 struct scatterlist *data_sg, unsigned int data_nents)
311{
312 int i, block;
313 int block_remaining = 0;
297 void *from, *to; 314 void *from, *to;
298 size_t copy_bytes; 315 size_t copy_bytes, from_offset;
299 struct scatterlist *sg; 316 struct scatterlist *sg;
300 317
301 /* It'd be easier to look at entry's iovec again, but UAM */
302 for_each_sg(data_sg, sg, data_nents, i) { 318 for_each_sg(data_sg, sg, data_nents, i) {
303 copy_bytes = min_t(size_t, sg->length, 319 int sg_remaining = sg->length;
304 head_to_end(udev->data_tail, udev->data_size));
305
306 to = kmap_atomic(sg_page(sg)) + sg->offset; 320 to = kmap_atomic(sg_page(sg)) + sg->offset;
307 WARN_ON(sg->length + sg->offset > PAGE_SIZE); 321 while (sg_remaining > 0) {
308 from = (void *) udev->mb_addr + 322 if (block_remaining == 0) {
309 udev->data_off + udev->data_tail; 323 block = find_first_bit(cmd_bitmap,
310 tcmu_flush_dcache_range(from, copy_bytes); 324 DATA_BLOCK_BITS);
311 memcpy(to, from, copy_bytes); 325 block_remaining = DATA_BLOCK_SIZE;
312 326 clear_bit(block, cmd_bitmap);
313 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); 327 }
314 328 copy_bytes = min_t(size_t, sg_remaining,
315 /* Uh oh, wrapped the data buffer for this sg's data */ 329 block_remaining);
316 if (sg->length != copy_bytes) { 330 from_offset = get_block_offset(udev, block,
317 void *to_skip = to + copy_bytes; 331 block_remaining);
318 332 from = (void *) udev->mb_addr + from_offset;
319 from = (void *) udev->mb_addr +
320 udev->data_off + udev->data_tail;
321 WARN_ON(udev->data_tail);
322 copy_bytes = sg->length - copy_bytes;
323 tcmu_flush_dcache_range(from, copy_bytes); 333 tcmu_flush_dcache_range(from, copy_bytes);
324 memcpy(to_skip, from, copy_bytes); 334 memcpy(to + sg->length - sg_remaining, from,
335 copy_bytes);
325 336
326 UPDATE_HEAD(udev->data_tail, 337 sg_remaining -= copy_bytes;
327 copy_bytes, udev->data_size); 338 block_remaining -= copy_bytes;
328 } 339 }
329 kunmap_atomic(to - sg->offset); 340 kunmap_atomic(to - sg->offset);
330 } 341 }
331} 342}
332 343
344static inline size_t spc_bitmap_free(unsigned long *bitmap)
345{
346 return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
347 bitmap_weight(bitmap, DATA_BLOCK_BITS));
348}
349
333/* 350/*
334 * We can't queue a command until we have space available on the cmd ring *and* 351 * We can't queue a command until we have space available on the cmd ring *and*
335 * space available on the data ring. 352 * space available on the data ring.
@@ -339,9 +356,8 @@ static void gather_and_free_data_area(struct tcmu_dev *udev,
339static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) 356static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
340{ 357{
341 struct tcmu_mailbox *mb = udev->mb_addr; 358 struct tcmu_mailbox *mb = udev->mb_addr;
342 size_t space; 359 size_t space, cmd_needed;
343 u32 cmd_head; 360 u32 cmd_head;
344 size_t cmd_needed;
345 361
346 tcmu_flush_dcache_range(mb, sizeof(*mb)); 362 tcmu_flush_dcache_range(mb, sizeof(*mb));
347 363
@@ -363,10 +379,10 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
363 return false; 379 return false;
364 } 380 }
365 381
366 space = spc_free(udev->data_head, udev->data_tail, udev->data_size); 382 space = spc_bitmap_free(udev->data_bitmap);
367 if (space < data_needed) { 383 if (space < data_needed) {
368 pr_debug("no data space: %zu %zu %zu\n", udev->data_head, 384 pr_debug("no data space: only %zu available, but ask for %zu\n",
369 udev->data_tail, udev->data_size); 385 space, data_needed);
370 return false; 386 return false;
371 } 387 }
372 388
@@ -385,6 +401,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
385 uint32_t cmd_head; 401 uint32_t cmd_head;
386 uint64_t cdb_off; 402 uint64_t cdb_off;
387 bool copy_to_data_area; 403 bool copy_to_data_area;
404 size_t data_length;
405 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
388 406
389 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 407 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
390 return -EINVAL; 408 return -EINVAL;
@@ -393,12 +411,12 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
393 * Must be a certain minimum size for response sense info, but 411 * Must be a certain minimum size for response sense info, but
394 * also may be larger if the iov array is large. 412 * also may be larger if the iov array is large.
395 * 413 *
396 * iovs = sgl_nents+1, for end-of-ring case, plus another 1 414 * We prepare way too many iovs for potential uses here, because it's
397 * b/c size == offsetof one-past-element. 415 * expensive to tell how many regions are freed in the bitmap
398 */ 416 */
399 base_command_size = max(offsetof(struct tcmu_cmd_entry, 417 base_command_size = max(offsetof(struct tcmu_cmd_entry,
400 req.iov[se_cmd->t_bidi_data_nents + 418 req.iov[se_cmd->t_bidi_data_nents +
401 se_cmd->t_data_nents + 2]), 419 se_cmd->t_data_nents]),
402 sizeof(struct tcmu_cmd_entry)); 420 sizeof(struct tcmu_cmd_entry));
403 command_size = base_command_size 421 command_size = base_command_size
404 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 422 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -409,13 +427,18 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
409 427
410 mb = udev->mb_addr; 428 mb = udev->mb_addr;
411 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 429 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
430 data_length = se_cmd->data_length;
431 if (se_cmd->se_cmd_flags & SCF_BIDI) {
432 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
433 data_length += se_cmd->t_bidi_data_sg->length;
434 }
412 if ((command_size > (udev->cmdr_size / 2)) 435 if ((command_size > (udev->cmdr_size / 2))
413 || tcmu_cmd->data_length > (udev->data_size - 1)) 436 || data_length > udev->data_size)
414 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " 437 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
415 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, 438 "cmd/data ring buffers\n", command_size, data_length,
416 udev->cmdr_size, udev->data_size); 439 udev->cmdr_size, udev->data_size);
417 440
418 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { 441 while (!is_ring_space_avail(udev, command_size, data_length)) {
419 int ret; 442 int ret;
420 DEFINE_WAIT(__wait); 443 DEFINE_WAIT(__wait);
421 444
@@ -462,6 +485,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
462 entry->hdr.kflags = 0; 485 entry->hdr.kflags = 0;
463 entry->hdr.uflags = 0; 486 entry->hdr.uflags = 0;
464 487
488 bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
489
465 /* 490 /*
466 * Fix up iovecs, and handle if allocation in data ring wrapped. 491 * Fix up iovecs, and handle if allocation in data ring wrapped.
467 */ 492 */
@@ -480,6 +505,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
480 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 505 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
481 entry->req.iov_bidi_cnt = iov_cnt; 506 entry->req.iov_bidi_cnt = iov_cnt;
482 507
508 /* cmd's data_bitmap is what changed in process */
509 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
510 DATA_BLOCK_BITS);
511
483 /* All offsets relative to mb_addr, not start of entry! */ 512 /* All offsets relative to mb_addr, not start of entry! */
484 cdb_off = CMDR_OFF + cmd_head + base_command_size; 513 cdb_off = CMDR_OFF + cmd_head + base_command_size;
485 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 514 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -530,35 +559,42 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
530 struct tcmu_dev *udev = cmd->tcmu_dev; 559 struct tcmu_dev *udev = cmd->tcmu_dev;
531 560
532 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 561 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
533 /* cmd has been completed already from timeout, just reclaim data 562 /*
534 ring space */ 563 * cmd has been completed already from timeout, just reclaim
535 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 564 * data ring space and free cmd
565 */
566 free_data_area(udev, cmd);
567
568 kmem_cache_free(tcmu_cmd_cache, cmd);
536 return; 569 return;
537 } 570 }
538 571
539 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 572 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
540 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 573 free_data_area(udev, cmd);
541 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 574 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
542 cmd->se_cmd); 575 cmd->se_cmd);
543 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 576 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
544 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 577 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
545 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 578 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
546 se_cmd->scsi_sense_length); 579 se_cmd->scsi_sense_length);
547 580 free_data_area(udev, cmd);
548 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
549 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 581 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
550 /* Discard data_out buffer */ 582 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
551 UPDATE_HEAD(udev->data_tail,
552 (size_t)se_cmd->t_data_sg->length, udev->data_size);
553 583
554 /* Get Data-In buffer */ 584 /* Get Data-In buffer before clean up */
555 gather_and_free_data_area(udev, 585 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
586 gather_data_area(udev, bitmap,
556 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); 587 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
588 free_data_area(udev, cmd);
557 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 589 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
558 gather_and_free_data_area(udev, 590 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
591
592 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
593 gather_data_area(udev, bitmap,
559 se_cmd->t_data_sg, se_cmd->t_data_nents); 594 se_cmd->t_data_sg, se_cmd->t_data_nents);
595 free_data_area(udev, cmd);
560 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 596 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
561 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 597 free_data_area(udev, cmd);
562 } else if (se_cmd->data_direction != DMA_NONE) { 598 } else if (se_cmd->data_direction != DMA_NONE) {
563 pr_warn("TCMU: data direction was %d!\n", 599 pr_warn("TCMU: data direction was %d!\n",
564 se_cmd->data_direction); 600 se_cmd->data_direction);
@@ -894,11 +930,13 @@ static int tcmu_configure_device(struct se_device *dev)
894 930
895 mb = udev->mb_addr; 931 mb = udev->mb_addr;
896 mb->version = TCMU_MAILBOX_VERSION; 932 mb->version = TCMU_MAILBOX_VERSION;
933 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
897 mb->cmdr_off = CMDR_OFF; 934 mb->cmdr_off = CMDR_OFF;
898 mb->cmdr_size = udev->cmdr_size; 935 mb->cmdr_size = udev->cmdr_size;
899 936
900 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 937 WARN_ON(!PAGE_ALIGNED(udev->data_off));
901 WARN_ON(udev->data_size % PAGE_SIZE); 938 WARN_ON(udev->data_size % PAGE_SIZE);
939 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
902 940
903 info->version = __stringify(TCMU_MAILBOX_VERSION); 941 info->version = __stringify(TCMU_MAILBOX_VERSION);
904 942
@@ -942,12 +980,12 @@ err_vzalloc:
942 return ret; 980 return ret;
943} 981}
944 982
945static int tcmu_check_pending_cmd(int id, void *p, void *data) 983static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
946{ 984{
947 struct tcmu_cmd *cmd = p; 985 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
948 986 kmem_cache_free(tcmu_cmd_cache, cmd);
949 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
950 return 0; 987 return 0;
988 }
951 return -EINVAL; 989 return -EINVAL;
952} 990}
953 991
@@ -962,6 +1000,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
962static void tcmu_free_device(struct se_device *dev) 1000static void tcmu_free_device(struct se_device *dev)
963{ 1001{
964 struct tcmu_dev *udev = TCMU_DEV(dev); 1002 struct tcmu_dev *udev = TCMU_DEV(dev);
1003 struct tcmu_cmd *cmd;
1004 bool all_expired = true;
965 int i; 1005 int i;
966 1006
967 del_timer_sync(&udev->timeout); 1007 del_timer_sync(&udev->timeout);
@@ -970,10 +1010,13 @@ static void tcmu_free_device(struct se_device *dev)
970 1010
971 /* Upper layer should drain all requests before calling this */ 1011 /* Upper layer should drain all requests before calling this */
972 spin_lock_irq(&udev->commands_lock); 1012 spin_lock_irq(&udev->commands_lock);
973 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); 1013 idr_for_each_entry(&udev->commands, cmd, i) {
1014 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1015 all_expired = false;
1016 }
974 idr_destroy(&udev->commands); 1017 idr_destroy(&udev->commands);
975 spin_unlock_irq(&udev->commands_lock); 1018 spin_unlock_irq(&udev->commands_lock);
976 WARN_ON(i); 1019 WARN_ON(!all_expired);
977 1020
978 /* Device was configured */ 1021 /* Device was configured */
979 if (udev->uio_info.uio_dev) { 1022 if (udev->uio_info.uio_dev) {
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 064d6dfb5b6d..216e18cc9133 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
107 107
108int ft_check_stop_free(struct se_cmd *se_cmd) 108int ft_check_stop_free(struct se_cmd *se_cmd)
109{ 109{
110 transport_generic_free_cmd(se_cmd, 0); 110 return transport_generic_free_cmd(se_cmd, 0);
111 return 1;
112} 111}
113 112
114/* 113/*
@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
179 return -ENOMEM; 178 return -ENOMEM;
180 } 179 }
181 lport->tt.exch_done(cmd->seq); 180 lport->tt.exch_done(cmd->seq);
181 /*
182 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
183 * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
184 * final se_cmd->cmd_kref put.
185 */
186 target_put_sess_cmd(&cmd->se_cmd);
182 return 0; 187 return 0;
183} 188}
184 189
@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
387 /* FIXME: Add referenced task tag for ABORT_TASK */ 392 /* FIXME: Add referenced task tag for ABORT_TASK */
388 rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, 393 rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
389 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), 394 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
390 cmd, tm_func, GFP_KERNEL, 0, 0); 395 cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
391 if (rc < 0) 396 if (rc < 0)
392 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); 397 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
393} 398}
@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
422 pr_debug("tmr fn %d resp %d fcp code %d\n", 427 pr_debug("tmr fn %d resp %d fcp code %d\n",
423 tmr->function, tmr->response, code); 428 tmr->function, tmr->response, code);
424 ft_send_resp_code(cmd, code); 429 ft_send_resp_code(cmd, code);
430 /*
431 * Drop the extra ACK_KREF reference taken by target_submit_tmr()
432 * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
433 * final se_cmd->cmd_kref put.
434 */
435 target_put_sess_cmd(&cmd->se_cmd);
425} 436}
426 437
427void ft_aborted_task(struct se_cmd *se_cmd) 438void ft_aborted_task(struct se_cmd *se_cmd)
@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
560 */ 571 */
561 if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, 572 if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
562 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), 573 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
563 ntohl(fcp->fc_dl), task_attr, data_dir, 0)) 574 ntohl(fcp->fc_dl), task_attr, data_dir,
575 TARGET_SCF_ACK_KREF))
564 goto err; 576 goto err;
565 577
566 pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); 578 pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index e19f4c58c6fa..d0c3e1894c61 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -186,6 +186,20 @@ out:
186 return NULL; 186 return NULL;
187} 187}
188 188
189static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
190 struct se_session *se_sess, void *p)
191{
192 struct ft_sess *sess = p;
193 struct ft_tport *tport = sess->tport;
194 struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
195
196 pr_debug("port_id %x sess %p\n", sess->port_id, sess);
197 hlist_add_head_rcu(&sess->hash, head);
198 tport->sess_count++;
199
200 return 0;
201}
202
189/* 203/*
190 * Allocate session and enter it in the hash for the local port. 204 * Allocate session and enter it in the hash for the local port.
191 * Caller holds ft_lport_lock. 205 * Caller holds ft_lport_lock.
@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
194 struct fc_rport_priv *rdata) 208 struct fc_rport_priv *rdata)
195{ 209{
196 struct se_portal_group *se_tpg = &tport->tpg->se_tpg; 210 struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
197 struct se_node_acl *se_acl;
198 struct ft_sess *sess; 211 struct ft_sess *sess;
199 struct hlist_head *head; 212 struct hlist_head *head;
200 unsigned char initiatorname[TRANSPORT_IQN_LEN]; 213 unsigned char initiatorname[TRANSPORT_IQN_LEN];
@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
210 if (!sess) 223 if (!sess)
211 return NULL; 224 return NULL;
212 225
213 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, 226 kref_init(&sess->kref); /* ref for table entry */
214 sizeof(struct ft_cmd), 227 sess->tport = tport;
215 TARGET_PROT_NORMAL); 228 sess->port_id = port_id;
216 if (IS_ERR(sess->se_sess)) {
217 kfree(sess);
218 return NULL;
219 }
220 229
221 se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]); 230 sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
222 if (!se_acl) { 231 sizeof(struct ft_cmd),
223 transport_free_session(sess->se_sess); 232 TARGET_PROT_NORMAL, &initiatorname[0],
233 sess, ft_sess_alloc_cb);
234 if (IS_ERR(sess->se_sess)) {
224 kfree(sess); 235 kfree(sess);
225 return NULL; 236 return NULL;
226 } 237 }
227 sess->se_sess->se_node_acl = se_acl;
228 sess->tport = tport;
229 sess->port_id = port_id;
230 kref_init(&sess->kref); /* ref for table entry */
231 hlist_add_head_rcu(&sess->hash, head);
232 tport->sess_count++;
233
234 pr_debug("port_id %x sess %p\n", port_id, sess);
235
236 transport_register_session(&tport->tpg->se_tpg, se_acl,
237 sess->se_sess, sess);
238 return sess; 238 return sess;
239} 239}
240 240
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index dfb733047a4c..2ace0295408e 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
41 return container_of(f, struct f_uas, function); 41 return container_of(f, struct f_uas, function);
42} 42}
43 43
44static void usbg_cmd_release(struct kref *);
45
46static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
47{
48 kref_put(&cmd->ref, usbg_cmd_release);
49}
50
51/* Start bot.c code */ 44/* Start bot.c code */
52 45
53static int bot_enqueue_cmd_cbw(struct f_uas *fu) 46static int bot_enqueue_cmd_cbw(struct f_uas *fu)
@@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
68 struct usbg_cmd *cmd = req->context; 61 struct usbg_cmd *cmd = req->context;
69 struct f_uas *fu = cmd->fu; 62 struct f_uas *fu = cmd->fu;
70 63
71 usbg_cleanup_cmd(cmd); 64 transport_generic_free_cmd(&cmd->se_cmd, 0);
72 if (req->status < 0) { 65 if (req->status < 0) {
73 pr_err("ERR %s(%d)\n", __func__, __LINE__); 66 pr_err("ERR %s(%d)\n", __func__, __LINE__);
74 return; 67 return;
@@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
605 break; 598 break;
606 599
607 case UASP_QUEUE_COMMAND: 600 case UASP_QUEUE_COMMAND:
608 usbg_cleanup_cmd(cmd); 601 transport_generic_free_cmd(&cmd->se_cmd, 0);
609 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC); 602 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
610 break; 603 break;
611 604
@@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
615 return; 608 return;
616 609
617cleanup: 610cleanup:
618 usbg_cleanup_cmd(cmd); 611 transport_generic_free_cmd(&cmd->se_cmd, 0);
619} 612}
620 613
621static int uasp_send_status_response(struct usbg_cmd *cmd) 614static int uasp_send_status_response(struct usbg_cmd *cmd)
@@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
977 return; 970 return;
978 971
979cleanup: 972cleanup:
980 usbg_cleanup_cmd(cmd); 973 transport_generic_free_cmd(&cmd->se_cmd, 0);
981} 974}
982 975
983static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req) 976static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work)
1046 struct se_cmd *se_cmd; 1039 struct se_cmd *se_cmd;
1047 struct tcm_usbg_nexus *tv_nexus; 1040 struct tcm_usbg_nexus *tv_nexus;
1048 struct usbg_tpg *tpg; 1041 struct usbg_tpg *tpg;
1049 int dir; 1042 int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
1050 1043
1051 se_cmd = &cmd->se_cmd; 1044 se_cmd = &cmd->se_cmd;
1052 tpg = cmd->fu->tpg; 1045 tpg = cmd->fu->tpg;
@@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work)
1060 goto out; 1053 goto out;
1061 } 1054 }
1062 1055
1063 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, 1056 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
1064 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, 1057 cmd->sense_iu.sense, cmd->unpacked_lun, 0,
1065 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0) 1058 cmd->prio_attr, dir, flags) < 0)
1066 goto out; 1059 goto out;
1067 1060
1068 return; 1061 return;
@@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work)
1070out: 1063out:
1071 transport_send_check_condition_and_sense(se_cmd, 1064 transport_send_check_condition_and_sense(se_cmd,
1072 TCM_UNSUPPORTED_SCSI_OPCODE, 1); 1065 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1073 usbg_cleanup_cmd(cmd); 1066 transport_generic_free_cmd(&cmd->se_cmd, 0);
1074} 1067}
1075 1068
1069static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
1070 struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
1071{
1072 struct se_session *se_sess = tv_nexus->tvn_se_sess;
1073 struct usbg_cmd *cmd;
1074 int tag;
1075
1076 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
1077 if (tag < 0)
1078 return ERR_PTR(-ENOMEM);
1079
1080 cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
1081 memset(cmd, 0, sizeof(*cmd));
1082 cmd->se_cmd.map_tag = tag;
1083 cmd->se_cmd.tag = cmd->tag = scsi_tag;
1084 cmd->fu = fu;
1085
1086 return cmd;
1087}
1088
1089static void usbg_release_cmd(struct se_cmd *);
1090
1076static int usbg_submit_command(struct f_uas *fu, 1091static int usbg_submit_command(struct f_uas *fu,
1077 void *cmdbuf, unsigned int len) 1092 void *cmdbuf, unsigned int len)
1078{ 1093{
1079 struct command_iu *cmd_iu = cmdbuf; 1094 struct command_iu *cmd_iu = cmdbuf;
1080 struct usbg_cmd *cmd; 1095 struct usbg_cmd *cmd;
1081 struct usbg_tpg *tpg; 1096 struct usbg_tpg *tpg = fu->tpg;
1082 struct tcm_usbg_nexus *tv_nexus; 1097 struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
1083 u32 cmd_len; 1098 u32 cmd_len;
1099 u16 scsi_tag;
1084 1100
1085 if (cmd_iu->iu_id != IU_ID_COMMAND) { 1101 if (cmd_iu->iu_id != IU_ID_COMMAND) {
1086 pr_err("Unsupported type %d\n", cmd_iu->iu_id); 1102 pr_err("Unsupported type %d\n", cmd_iu->iu_id);
1087 return -EINVAL; 1103 return -EINVAL;
1088 } 1104 }
1089 1105
1090 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 1106 tv_nexus = tpg->tpg_nexus;
1091 if (!cmd) 1107 if (!tv_nexus) {
1092 return -ENOMEM; 1108 pr_err("Missing nexus, ignoring command\n");
1093 1109 return -EINVAL;
1094 cmd->fu = fu; 1110 }
1095
1096 /* XXX until I figure out why I can't free in on complete */
1097 kref_init(&cmd->ref);
1098 kref_get(&cmd->ref);
1099 1111
1100 tpg = fu->tpg;
1101 cmd_len = (cmd_iu->len & ~0x3) + 16; 1112 cmd_len = (cmd_iu->len & ~0x3) + 16;
1102 if (cmd_len > USBG_MAX_CMD) 1113 if (cmd_len > USBG_MAX_CMD)
1103 goto err; 1114 return -EINVAL;
1104 1115
1116 scsi_tag = be16_to_cpup(&cmd_iu->tag);
1117 cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
1118 if (IS_ERR(cmd)) {
1119 pr_err("usbg_get_cmd failed\n");
1120 return -ENOMEM;
1121 }
1105 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len); 1122 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1106 1123
1107 cmd->tag = be16_to_cpup(&cmd_iu->tag);
1108 cmd->se_cmd.tag = cmd->tag;
1109 if (fu->flags & USBG_USE_STREAMS) { 1124 if (fu->flags & USBG_USE_STREAMS) {
1110 if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS) 1125 if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
1111 goto err; 1126 goto err;
@@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu,
1117 cmd->stream = &fu->stream[0]; 1132 cmd->stream = &fu->stream[0];
1118 } 1133 }
1119 1134
1120 tv_nexus = tpg->tpg_nexus;
1121 if (!tv_nexus) {
1122 pr_err("Missing nexus, ignoring command\n");
1123 goto err;
1124 }
1125
1126 switch (cmd_iu->prio_attr & 0x7) { 1135 switch (cmd_iu->prio_attr & 0x7) {
1127 case UAS_HEAD_TAG: 1136 case UAS_HEAD_TAG:
1128 cmd->prio_attr = TCM_HEAD_TAG; 1137 cmd->prio_attr = TCM_HEAD_TAG;
@@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu,
1148 1157
1149 return 0; 1158 return 0;
1150err: 1159err:
1151 kfree(cmd); 1160 usbg_release_cmd(&cmd->se_cmd);
1152 return -EINVAL; 1161 return -EINVAL;
1153} 1162}
1154 1163
@@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work)
1182out: 1191out:
1183 transport_send_check_condition_and_sense(se_cmd, 1192 transport_send_check_condition_and_sense(se_cmd,
1184 TCM_UNSUPPORTED_SCSI_OPCODE, 1); 1193 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1185 usbg_cleanup_cmd(cmd); 1194 transport_generic_free_cmd(&cmd->se_cmd, 0);
1186} 1195}
1187 1196
1188static int bot_submit_command(struct f_uas *fu, 1197static int bot_submit_command(struct f_uas *fu,
@@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu,
1190{ 1199{
1191 struct bulk_cb_wrap *cbw = cmdbuf; 1200 struct bulk_cb_wrap *cbw = cmdbuf;
1192 struct usbg_cmd *cmd; 1201 struct usbg_cmd *cmd;
1193 struct usbg_tpg *tpg; 1202 struct usbg_tpg *tpg = fu->tpg;
1194 struct tcm_usbg_nexus *tv_nexus; 1203 struct tcm_usbg_nexus *tv_nexus;
1195 u32 cmd_len; 1204 u32 cmd_len;
1196 1205
@@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu,
1207 if (cmd_len < 1 || cmd_len > 16) 1216 if (cmd_len < 1 || cmd_len > 16)
1208 return -EINVAL; 1217 return -EINVAL;
1209 1218
1210 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1211 if (!cmd)
1212 return -ENOMEM;
1213
1214 cmd->fu = fu;
1215
1216 /* XXX until I figure out why I can't free in on complete */
1217 kref_init(&cmd->ref);
1218 kref_get(&cmd->ref);
1219
1220 tpg = fu->tpg;
1221
1222 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1223
1224 cmd->bot_tag = cbw->Tag;
1225
1226 tv_nexus = tpg->tpg_nexus; 1219 tv_nexus = tpg->tpg_nexus;
1227 if (!tv_nexus) { 1220 if (!tv_nexus) {
1228 pr_err("Missing nexus, ignoring command\n"); 1221 pr_err("Missing nexus, ignoring command\n");
1229 goto err; 1222 return -ENODEV;
1230 } 1223 }
1231 1224
1225 cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
1226 if (IS_ERR(cmd)) {
1227 pr_err("usbg_get_cmd failed\n");
1228 return -ENOMEM;
1229 }
1230 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1231
1232 cmd->bot_tag = cbw->Tag;
1232 cmd->prio_attr = TCM_SIMPLE_TAG; 1233 cmd->prio_attr = TCM_SIMPLE_TAG;
1233 cmd->unpacked_lun = cbw->Lun; 1234 cmd->unpacked_lun = cbw->Lun;
1234 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; 1235 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
@@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu,
1239 queue_work(tpg->workqueue, &cmd->work); 1240 queue_work(tpg->workqueue, &cmd->work);
1240 1241
1241 return 0; 1242 return 0;
1242err:
1243 kfree(cmd);
1244 return -EINVAL;
1245} 1243}
1246 1244
1247/* Start fabric.c code */ 1245/* Start fabric.c code */
@@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
1282 return 1; 1280 return 1;
1283} 1281}
1284 1282
1285static void usbg_cmd_release(struct kref *ref)
1286{
1287 struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
1288 ref);
1289
1290 transport_generic_free_cmd(&cmd->se_cmd, 0);
1291}
1292
1293static void usbg_release_cmd(struct se_cmd *se_cmd) 1283static void usbg_release_cmd(struct se_cmd *se_cmd)
1294{ 1284{
1295 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, 1285 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1296 se_cmd); 1286 se_cmd);
1287 struct se_session *se_sess = se_cmd->se_sess;
1288
1297 kfree(cmd->data_buf); 1289 kfree(cmd->data_buf);
1298 kfree(cmd); 1290 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1299} 1291}
1300 1292
1301static int usbg_shutdown_session(struct se_session *se_sess) 1293static int usbg_shutdown_session(struct se_session *se_sess)
@@ -1579,55 +1571,48 @@ out:
1579 return ret; 1571 return ret;
1580} 1572}
1581 1573
1574static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
1575 struct se_session *se_sess, void *p)
1576{
1577 struct usbg_tpg *tpg = container_of(se_tpg,
1578 struct usbg_tpg, se_tpg);
1579
1580 tpg->tpg_nexus = p;
1581 return 0;
1582}
1583
1582static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) 1584static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1583{ 1585{
1584 struct se_portal_group *se_tpg;
1585 struct tcm_usbg_nexus *tv_nexus; 1586 struct tcm_usbg_nexus *tv_nexus;
1586 int ret; 1587 int ret = 0;
1587 1588
1588 mutex_lock(&tpg->tpg_mutex); 1589 mutex_lock(&tpg->tpg_mutex);
1589 if (tpg->tpg_nexus) { 1590 if (tpg->tpg_nexus) {
1590 ret = -EEXIST; 1591 ret = -EEXIST;
1591 pr_debug("tpg->tpg_nexus already exists\n"); 1592 pr_debug("tpg->tpg_nexus already exists\n");
1592 goto err_unlock; 1593 goto out_unlock;
1593 } 1594 }
1594 se_tpg = &tpg->se_tpg;
1595 1595
1596 ret = -ENOMEM;
1597 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL); 1596 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1598 if (!tv_nexus) 1597 if (!tv_nexus) {
1599 goto err_unlock; 1598 ret = -ENOMEM;
1600 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL); 1599 goto out_unlock;
1601 if (IS_ERR(tv_nexus->tvn_se_sess)) 1600 }
1602 goto err_free;
1603 1601
1604 /* 1602 tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1605 * Since we are running in 'demo mode' this call with generate a 1603 USB_G_DEFAULT_SESSION_TAGS,
1606 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1604 sizeof(struct usbg_cmd),
1607 * the SCSI Initiator port name of the passed configfs group 'name'. 1605 TARGET_PROT_NORMAL, name,
1608 */ 1606 tv_nexus, usbg_alloc_sess_cb);
1609 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1607 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1610 se_tpg, name);
1611 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1612#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n" 1608#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
1613 pr_debug(MAKE_NEXUS_MSG, name); 1609 pr_debug(MAKE_NEXUS_MSG, name);
1614#undef MAKE_NEXUS_MSG 1610#undef MAKE_NEXUS_MSG
1615 goto err_session; 1611 ret = PTR_ERR(tv_nexus->tvn_se_sess);
1612 kfree(tv_nexus);
1616 } 1613 }
1617 /*
1618 * Now register the TCM vHost virtual I_T Nexus as active.
1619 */
1620 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1621 tv_nexus->tvn_se_sess, tv_nexus);
1622 tpg->tpg_nexus = tv_nexus;
1623 mutex_unlock(&tpg->tpg_mutex);
1624 return 0;
1625 1614
1626err_session: 1615out_unlock:
1627 transport_free_session(tv_nexus->tvn_se_sess);
1628err_free:
1629 kfree(tv_nexus);
1630err_unlock:
1631 mutex_unlock(&tpg->tpg_mutex); 1616 mutex_unlock(&tpg->tpg_mutex);
1632 return ret; 1617 return ret;
1633} 1618}
@@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
1735 1720
1736static int usbg_check_stop_free(struct se_cmd *se_cmd) 1721static int usbg_check_stop_free(struct se_cmd *se_cmd)
1737{ 1722{
1738 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, 1723 return target_put_sess_cmd(se_cmd);
1739 se_cmd);
1740
1741 kref_put(&cmd->ref, usbg_cmd_release);
1742 return 1;
1743} 1724}
1744 1725
1745static const struct target_core_fabric_ops usbg_ops = { 1726static const struct target_core_fabric_ops usbg_ops = {
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index b75c6f3e1980..a27e6e34db0b 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -23,6 +23,8 @@ enum {
23#define USB_G_ALT_INT_BBB 0 23#define USB_G_ALT_INT_BBB 0
24#define USB_G_ALT_INT_UAS 1 24#define USB_G_ALT_INT_UAS 1
25 25
26#define USB_G_DEFAULT_SESSION_TAGS 128
27
26struct tcm_usbg_nexus { 28struct tcm_usbg_nexus {
27 struct se_session *tvn_se_sess; 29 struct se_session *tvn_se_sess;
28}; 30};
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f898686cdd93..0e6fd556c982 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1664 mutex_unlock(&vhost_scsi_mutex); 1664 mutex_unlock(&vhost_scsi_mutex);
1665} 1665}
1666 1666
1667static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, 1667static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1668 struct se_session *se_sess)
1669{ 1668{
1670 struct vhost_scsi_cmd *tv_cmd; 1669 struct vhost_scsi_cmd *tv_cmd;
1671 unsigned int i; 1670 unsigned int i;
@@ -1721,98 +1720,82 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1721 NULL, 1720 NULL,
1722}; 1721};
1723 1722
1724static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, 1723static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1725 const char *name) 1724 struct se_session *se_sess, void *p)
1726{ 1725{
1727 struct se_portal_group *se_tpg;
1728 struct se_session *se_sess;
1729 struct vhost_scsi_nexus *tv_nexus;
1730 struct vhost_scsi_cmd *tv_cmd; 1726 struct vhost_scsi_cmd *tv_cmd;
1731 unsigned int i; 1727 unsigned int i;
1732 1728
1733 mutex_lock(&tpg->tv_tpg_mutex);
1734 if (tpg->tpg_nexus) {
1735 mutex_unlock(&tpg->tv_tpg_mutex);
1736 pr_debug("tpg->tpg_nexus already exists\n");
1737 return -EEXIST;
1738 }
1739 se_tpg = &tpg->se_tpg;
1740
1741 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1742 if (!tv_nexus) {
1743 mutex_unlock(&tpg->tv_tpg_mutex);
1744 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1745 return -ENOMEM;
1746 }
1747 /*
1748 * Initialize the struct se_session pointer and setup tagpool
1749 * for struct vhost_scsi_cmd descriptors
1750 */
1751 tv_nexus->tvn_se_sess = transport_init_session_tags(
1752 VHOST_SCSI_DEFAULT_TAGS,
1753 sizeof(struct vhost_scsi_cmd),
1754 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1755 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1756 mutex_unlock(&tpg->tv_tpg_mutex);
1757 kfree(tv_nexus);
1758 return -ENOMEM;
1759 }
1760 se_sess = tv_nexus->tvn_se_sess;
1761 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { 1729 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1762 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; 1730 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1763 1731
1764 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1732 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1765 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); 1733 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1766 if (!tv_cmd->tvc_sgl) { 1734 if (!tv_cmd->tvc_sgl) {
1767 mutex_unlock(&tpg->tv_tpg_mutex);
1768 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1735 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1769 goto out; 1736 goto out;
1770 } 1737 }
1771 1738
1772 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1739 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1773 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); 1740 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1774 if (!tv_cmd->tvc_upages) { 1741 if (!tv_cmd->tvc_upages) {
1775 mutex_unlock(&tpg->tv_tpg_mutex);
1776 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1742 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1777 goto out; 1743 goto out;
1778 } 1744 }
1779 1745
1780 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * 1746 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1781 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); 1747 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1782 if (!tv_cmd->tvc_prot_sgl) { 1748 if (!tv_cmd->tvc_prot_sgl) {
1783 mutex_unlock(&tpg->tv_tpg_mutex);
1784 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1749 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1785 goto out; 1750 goto out;
1786 } 1751 }
1787 } 1752 }
1753 return 0;
1754out:
1755 vhost_scsi_free_cmd_map_res(se_sess);
1756 return -ENOMEM;
1757}
1758
1759static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1760 const char *name)
1761{
1762 struct se_portal_group *se_tpg;
1763 struct vhost_scsi_nexus *tv_nexus;
1764
1765 mutex_lock(&tpg->tv_tpg_mutex);
1766 if (tpg->tpg_nexus) {
1767 mutex_unlock(&tpg->tv_tpg_mutex);
1768 pr_debug("tpg->tpg_nexus already exists\n");
1769 return -EEXIST;
1770 }
1771 se_tpg = &tpg->se_tpg;
1772
1773 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1774 if (!tv_nexus) {
1775 mutex_unlock(&tpg->tv_tpg_mutex);
1776 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1777 return -ENOMEM;
1778 }
1788 /* 1779 /*
1789 * Since we are running in 'demo mode' this call with generate a 1780 * Since we are running in 'demo mode' this call with generate a
1790 * struct se_node_acl for the vhost_scsi struct se_portal_group with 1781 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1791 * the SCSI Initiator port name of the passed configfs group 'name'. 1782 * the SCSI Initiator port name of the passed configfs group 'name'.
1792 */ 1783 */
1793 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1784 tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1794 se_tpg, (unsigned char *)name); 1785 VHOST_SCSI_DEFAULT_TAGS,
1795 if (!tv_nexus->tvn_se_sess->se_node_acl) { 1786 sizeof(struct vhost_scsi_cmd),
1787 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1788 (unsigned char *)name, tv_nexus,
1789 vhost_scsi_nexus_cb);
1790 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1796 mutex_unlock(&tpg->tv_tpg_mutex); 1791 mutex_unlock(&tpg->tv_tpg_mutex);
1797 pr_debug("core_tpg_check_initiator_node_acl() failed" 1792 kfree(tv_nexus);
1798 " for %s\n", name); 1793 return -ENOMEM;
1799 goto out;
1800 } 1794 }
1801 /*
1802 * Now register the TCM vhost virtual I_T Nexus as active.
1803 */
1804 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1805 tv_nexus->tvn_se_sess, tv_nexus);
1806 tpg->tpg_nexus = tv_nexus; 1795 tpg->tpg_nexus = tv_nexus;
1807 1796
1808 mutex_unlock(&tpg->tv_tpg_mutex); 1797 mutex_unlock(&tpg->tv_tpg_mutex);
1809 return 0; 1798 return 0;
1810
1811out:
1812 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1813 transport_free_session(se_sess);
1814 kfree(tv_nexus);
1815 return -ENOMEM;
1816} 1799}
1817 1800
1818static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) 1801static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
@@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1853 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), 1836 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1854 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1837 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1855 1838
1856 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); 1839 vhost_scsi_free_cmd_map_res(se_sess);
1857 /* 1840 /*
1858 * Release the SCSI I_T Nexus to the emulated vhost Target Port 1841 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1859 */ 1842 */
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index c46ee189466f..ff932624eaad 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -141,6 +141,8 @@ struct scsiback_tmr {
141 wait_queue_head_t tmr_wait; 141 wait_queue_head_t tmr_wait;
142}; 142};
143 143
144#define VSCSI_DEFAULT_SESSION_TAGS 128
145
144struct scsiback_nexus { 146struct scsiback_nexus {
145 /* Pointer to TCM session for I_T Nexus */ 147 /* Pointer to TCM session for I_T Nexus */
146 struct se_session *tvn_se_sess; 148 struct se_session *tvn_se_sess;
@@ -190,7 +192,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
190MODULE_PARM_DESC(max_buffer_pages, 192MODULE_PARM_DESC(max_buffer_pages,
191"Maximum number of free pages to keep in backend buffer"); 193"Maximum number of free pages to keep in backend buffer");
192 194
193static struct kmem_cache *scsiback_cachep;
194static DEFINE_SPINLOCK(free_pages_lock); 195static DEFINE_SPINLOCK(free_pages_lock);
195static int free_pages_num; 196static int free_pages_num;
196static LIST_HEAD(scsiback_free_pages); 197static LIST_HEAD(scsiback_free_pages);
@@ -321,11 +322,11 @@ static void scsiback_free_translation_entry(struct kref *kref)
321 kfree(entry); 322 kfree(entry);
322} 323}
323 324
324static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result, 325static void scsiback_send_response(struct vscsibk_info *info,
325 uint32_t resid, struct vscsibk_pend *pending_req) 326 char *sense_buffer, int32_t result, uint32_t resid,
327 uint16_t rqid)
326{ 328{
327 struct vscsiif_response *ring_res; 329 struct vscsiif_response *ring_res;
328 struct vscsibk_info *info = pending_req->info;
329 int notify; 330 int notify;
330 struct scsi_sense_hdr sshdr; 331 struct scsi_sense_hdr sshdr;
331 unsigned long flags; 332 unsigned long flags;
@@ -337,7 +338,7 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
337 info->ring.rsp_prod_pvt++; 338 info->ring.rsp_prod_pvt++;
338 339
339 ring_res->rslt = result; 340 ring_res->rslt = result;
340 ring_res->rqid = pending_req->rqid; 341 ring_res->rqid = rqid;
341 342
342 if (sense_buffer != NULL && 343 if (sense_buffer != NULL &&
343 scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE, 344 scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
@@ -357,6 +358,13 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
357 358
358 if (notify) 359 if (notify)
359 notify_remote_via_irq(info->irq); 360 notify_remote_via_irq(info->irq);
361}
362
363static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
364 uint32_t resid, struct vscsibk_pend *pending_req)
365{
366 scsiback_send_response(pending_req->info, sense_buffer, result,
367 resid, pending_req->rqid);
360 368
361 if (pending_req->v2p) 369 if (pending_req->v2p)
362 kref_put(&pending_req->v2p->kref, 370 kref_put(&pending_req->v2p->kref,
@@ -380,6 +388,12 @@ static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
380 scsiback_fast_flush_area(pending_req); 388 scsiback_fast_flush_area(pending_req);
381 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req); 389 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
382 scsiback_put(info); 390 scsiback_put(info);
391 /*
392 * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
393 * ahead of scsiback_check_stop_free() -> transport_generic_free_cmd()
394 * final se_cmd->cmd_kref put.
395 */
396 target_put_sess_cmd(&pending_req->se_cmd);
383} 397}
384 398
385static void scsiback_cmd_exec(struct vscsibk_pend *pending_req) 399static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
@@ -388,16 +402,12 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
388 struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess; 402 struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
389 int rc; 403 int rc;
390 404
391 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
392
393 memset(se_cmd, 0, sizeof(*se_cmd));
394
395 scsiback_get(pending_req->info); 405 scsiback_get(pending_req->info);
396 se_cmd->tag = pending_req->rqid; 406 se_cmd->tag = pending_req->rqid;
397 rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd, 407 rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
398 pending_req->sense_buffer, pending_req->v2p->lun, 408 pending_req->sense_buffer, pending_req->v2p->lun,
399 pending_req->data_len, 0, 409 pending_req->data_len, 0,
400 pending_req->sc_data_direction, 0, 410 pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
401 pending_req->sgl, pending_req->n_sg, 411 pending_req->sgl, pending_req->n_sg,
402 NULL, 0, NULL, 0); 412 NULL, 0, NULL, 0);
403 if (rc < 0) { 413 if (rc < 0) {
@@ -586,45 +596,40 @@ static void scsiback_disconnect(struct vscsibk_info *info)
586static void scsiback_device_action(struct vscsibk_pend *pending_req, 596static void scsiback_device_action(struct vscsibk_pend *pending_req,
587 enum tcm_tmreq_table act, int tag) 597 enum tcm_tmreq_table act, int tag)
588{ 598{
589 int rc, err = FAILED;
590 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 599 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
600 struct scsiback_nexus *nexus = tpg->tpg_nexus;
591 struct se_cmd *se_cmd = &pending_req->se_cmd; 601 struct se_cmd *se_cmd = &pending_req->se_cmd;
592 struct scsiback_tmr *tmr; 602 struct scsiback_tmr *tmr;
603 u64 unpacked_lun = pending_req->v2p->lun;
604 int rc, err = FAILED;
593 605
594 tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); 606 tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
595 if (!tmr) 607 if (!tmr) {
596 goto out; 608 target_put_sess_cmd(se_cmd);
609 goto err;
610 }
597 611
598 init_waitqueue_head(&tmr->tmr_wait); 612 init_waitqueue_head(&tmr->tmr_wait);
599 613
600 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo, 614 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
601 tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, 615 &pending_req->sense_buffer[0],
602 &pending_req->sense_buffer[0]); 616 unpacked_lun, tmr, act, GFP_KERNEL,
603 617 tag, TARGET_SCF_ACK_KREF);
604 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL); 618 if (rc)
605 if (rc < 0) 619 goto err;
606 goto out;
607
608 se_cmd->se_tmr_req->ref_task_tag = tag;
609
610 if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
611 goto out;
612 620
613 transport_generic_handle_tmr(se_cmd);
614 wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); 621 wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
615 622
616 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 623 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
617 SUCCESS : FAILED; 624 SUCCESS : FAILED;
618 625
619out: 626 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
620 if (tmr) { 627 transport_generic_free_cmd(&pending_req->se_cmd, 1);
621 transport_generic_free_cmd(&pending_req->se_cmd, 1); 628 return;
629err:
630 if (tmr)
622 kfree(tmr); 631 kfree(tmr);
623 }
624
625 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 632 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
626
627 kmem_cache_free(scsiback_cachep, pending_req);
628} 633}
629 634
630/* 635/*
@@ -653,15 +658,53 @@ out:
653 return entry; 658 return entry;
654} 659}
655 660
656static int prepare_pending_reqs(struct vscsibk_info *info, 661static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
657 struct vscsiif_request *ring_req, 662 struct v2p_entry *v2p)
658 struct vscsibk_pend *pending_req) 663{
664 struct scsiback_tpg *tpg = v2p->tpg;
665 struct scsiback_nexus *nexus = tpg->tpg_nexus;
666 struct se_session *se_sess = nexus->tvn_se_sess;
667 struct vscsibk_pend *req;
668 int tag, i;
669
670 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
671 if (tag < 0) {
672 pr_err("Unable to obtain tag for vscsiif_request\n");
673 return ERR_PTR(-ENOMEM);
674 }
675
676 req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
677 memset(req, 0, sizeof(*req));
678 req->se_cmd.map_tag = tag;
679
680 for (i = 0; i < VSCSI_MAX_GRANTS; i++)
681 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
682
683 return req;
684}
685
686static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
687 struct vscsiif_back_ring *ring,
688 struct vscsiif_request *ring_req)
659{ 689{
690 struct vscsibk_pend *pending_req;
660 struct v2p_entry *v2p; 691 struct v2p_entry *v2p;
661 struct ids_tuple vir; 692 struct ids_tuple vir;
662 693
663 pending_req->rqid = ring_req->rqid; 694 /* request range check from frontend */
664 pending_req->info = info; 695 if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
696 (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
697 (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
698 (ring_req->sc_data_direction != DMA_NONE)) {
699 pr_debug("invalid parameter data_dir = %d\n",
700 ring_req->sc_data_direction);
701 return ERR_PTR(-EINVAL);
702 }
703 if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
704 pr_debug("invalid parameter cmd_len = %d\n",
705 ring_req->cmd_len);
706 return ERR_PTR(-EINVAL);
707 }
665 708
666 vir.chn = ring_req->channel; 709 vir.chn = ring_req->channel;
667 vir.tgt = ring_req->id; 710 vir.tgt = ring_req->id;
@@ -669,33 +712,24 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
669 712
670 v2p = scsiback_do_translation(info, &vir); 713 v2p = scsiback_do_translation(info, &vir);
671 if (!v2p) { 714 if (!v2p) {
672 pending_req->v2p = NULL;
673 pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n", 715 pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
674 vir.chn, vir.tgt, vir.lun); 716 vir.chn, vir.tgt, vir.lun);
675 return -ENODEV; 717 return ERR_PTR(-ENODEV);
676 } 718 }
677 pending_req->v2p = v2p;
678 719
679 /* request range check from frontend */ 720 pending_req = scsiback_get_pend_req(ring, v2p);
680 pending_req->sc_data_direction = ring_req->sc_data_direction; 721 if (IS_ERR(pending_req)) {
681 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) && 722 kref_put(&v2p->kref, scsiback_free_translation_entry);
682 (pending_req->sc_data_direction != DMA_TO_DEVICE) && 723 return ERR_PTR(-ENOMEM);
683 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
684 (pending_req->sc_data_direction != DMA_NONE)) {
685 pr_debug("invalid parameter data_dir = %d\n",
686 pending_req->sc_data_direction);
687 return -EINVAL;
688 } 724 }
689 725 pending_req->rqid = ring_req->rqid;
726 pending_req->info = info;
727 pending_req->v2p = v2p;
728 pending_req->sc_data_direction = ring_req->sc_data_direction;
690 pending_req->cmd_len = ring_req->cmd_len; 729 pending_req->cmd_len = ring_req->cmd_len;
691 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
692 pr_debug("invalid parameter cmd_len = %d\n",
693 pending_req->cmd_len);
694 return -EINVAL;
695 }
696 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len); 730 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
697 731
698 return 0; 732 return pending_req;
699} 733}
700 734
701static int scsiback_do_cmd_fn(struct vscsibk_info *info) 735static int scsiback_do_cmd_fn(struct vscsibk_info *info)
@@ -704,7 +738,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
704 struct vscsiif_request ring_req; 738 struct vscsiif_request ring_req;
705 struct vscsibk_pend *pending_req; 739 struct vscsibk_pend *pending_req;
706 RING_IDX rc, rp; 740 RING_IDX rc, rp;
707 int err, more_to_do; 741 int more_to_do;
708 uint32_t result; 742 uint32_t result;
709 743
710 rc = ring->req_cons; 744 rc = ring->req_cons;
@@ -722,16 +756,13 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
722 while ((rc != rp)) { 756 while ((rc != rp)) {
723 if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) 757 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
724 break; 758 break;
725 pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
726 if (!pending_req)
727 return 1;
728 759
729 RING_COPY_REQUEST(ring, rc, &ring_req); 760 RING_COPY_REQUEST(ring, rc, &ring_req);
730 ring->req_cons = ++rc; 761 ring->req_cons = ++rc;
731 762
732 err = prepare_pending_reqs(info, &ring_req, pending_req); 763 pending_req = prepare_pending_reqs(info, ring, &ring_req);
733 if (err) { 764 if (IS_ERR(pending_req)) {
734 switch (err) { 765 switch (PTR_ERR(pending_req)) {
735 case -ENODEV: 766 case -ENODEV:
736 result = DID_NO_CONNECT; 767 result = DID_NO_CONNECT;
737 break; 768 break;
@@ -739,9 +770,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
739 result = DRIVER_ERROR; 770 result = DRIVER_ERROR;
740 break; 771 break;
741 } 772 }
742 scsiback_do_resp_with_sense(NULL, result << 24, 0, 773 scsiback_send_response(info, NULL, result << 24, 0,
743 pending_req); 774 ring_req.rqid);
744 kmem_cache_free(scsiback_cachep, pending_req);
745 return 1; 775 return 1;
746 } 776 }
747 777
@@ -750,8 +780,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
750 if (scsiback_gnttab_data_map(&ring_req, pending_req)) { 780 if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
751 scsiback_fast_flush_area(pending_req); 781 scsiback_fast_flush_area(pending_req);
752 scsiback_do_resp_with_sense(NULL, 782 scsiback_do_resp_with_sense(NULL,
753 DRIVER_ERROR << 24, 0, pending_req); 783 DRIVER_ERROR << 24, 0, pending_req);
754 kmem_cache_free(scsiback_cachep, pending_req); 784 transport_generic_free_cmd(&pending_req->se_cmd, 0);
755 } else { 785 } else {
756 scsiback_cmd_exec(pending_req); 786 scsiback_cmd_exec(pending_req);
757 } 787 }
@@ -765,9 +795,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
765 break; 795 break;
766 default: 796 default:
767 pr_err_ratelimited("invalid request\n"); 797 pr_err_ratelimited("invalid request\n");
768 scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 798 scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
769 0, pending_req); 799 pending_req);
770 kmem_cache_free(scsiback_cachep, pending_req); 800 transport_generic_free_cmd(&pending_req->se_cmd, 0);
771 break; 801 break;
772 } 802 }
773 803
@@ -1353,24 +1383,20 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
1353 1383
1354static int scsiback_check_stop_free(struct se_cmd *se_cmd) 1384static int scsiback_check_stop_free(struct se_cmd *se_cmd)
1355{ 1385{
1356 /* 1386 return transport_generic_free_cmd(se_cmd, 0);
1357 * Do not release struct se_cmd's containing a valid TMR pointer.
1358 * These will be released directly in scsiback_device_action()
1359 * with transport_generic_free_cmd().
1360 */
1361 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1362 return 0;
1363
1364 transport_generic_free_cmd(se_cmd, 0);
1365 return 1;
1366} 1387}
1367 1388
1368static void scsiback_release_cmd(struct se_cmd *se_cmd) 1389static void scsiback_release_cmd(struct se_cmd *se_cmd)
1369{ 1390{
1370 struct vscsibk_pend *pending_req = container_of(se_cmd, 1391 struct se_session *se_sess = se_cmd->se_sess;
1371 struct vscsibk_pend, se_cmd); 1392 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
1393
1394 if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
1395 struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
1396 kfree(tmr);
1397 }
1372 1398
1373 kmem_cache_free(scsiback_cachep, pending_req); 1399 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1374} 1400}
1375 1401
1376static int scsiback_shutdown_session(struct se_session *se_sess) 1402static int scsiback_shutdown_session(struct se_session *se_sess)
@@ -1494,61 +1520,49 @@ static struct configfs_attribute *scsiback_param_attrs[] = {
1494 NULL, 1520 NULL,
1495}; 1521};
1496 1522
1523static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
1524 struct se_session *se_sess, void *p)
1525{
1526 struct scsiback_tpg *tpg = container_of(se_tpg,
1527 struct scsiback_tpg, se_tpg);
1528
1529 tpg->tpg_nexus = p;
1530 return 0;
1531}
1532
1497static int scsiback_make_nexus(struct scsiback_tpg *tpg, 1533static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1498 const char *name) 1534 const char *name)
1499{ 1535{
1500 struct se_portal_group *se_tpg;
1501 struct se_session *se_sess;
1502 struct scsiback_nexus *tv_nexus; 1536 struct scsiback_nexus *tv_nexus;
1537 int ret = 0;
1503 1538
1504 mutex_lock(&tpg->tv_tpg_mutex); 1539 mutex_lock(&tpg->tv_tpg_mutex);
1505 if (tpg->tpg_nexus) { 1540 if (tpg->tpg_nexus) {
1506 mutex_unlock(&tpg->tv_tpg_mutex);
1507 pr_debug("tpg->tpg_nexus already exists\n"); 1541 pr_debug("tpg->tpg_nexus already exists\n");
1508 return -EEXIST; 1542 ret = -EEXIST;
1543 goto out_unlock;
1509 } 1544 }
1510 se_tpg = &tpg->se_tpg;
1511 1545
1512 tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL); 1546 tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
1513 if (!tv_nexus) { 1547 if (!tv_nexus) {
1514 mutex_unlock(&tpg->tv_tpg_mutex); 1548 ret = -ENOMEM;
1515 return -ENOMEM; 1549 goto out_unlock;
1516 } 1550 }
1517 /* 1551
1518 * Initialize the struct se_session pointer 1552 tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1519 */ 1553 VSCSI_DEFAULT_SESSION_TAGS,
1520 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL); 1554 sizeof(struct vscsibk_pend),
1555 TARGET_PROT_NORMAL, name,
1556 tv_nexus, scsiback_alloc_sess_cb);
1521 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1557 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1522 mutex_unlock(&tpg->tv_tpg_mutex);
1523 kfree(tv_nexus); 1558 kfree(tv_nexus);
1524 return -ENOMEM; 1559 ret = -ENOMEM;
1560 goto out_unlock;
1525 } 1561 }
1526 se_sess = tv_nexus->tvn_se_sess;
1527 /*
1528 * Since we are running in 'demo mode' this call with generate a
1529 * struct se_node_acl for the scsiback struct se_portal_group with
1530 * the SCSI Initiator port name of the passed configfs group 'name'.
1531 */
1532 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1533 se_tpg, (unsigned char *)name);
1534 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1535 mutex_unlock(&tpg->tv_tpg_mutex);
1536 pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
1537 name);
1538 goto out;
1539 }
1540 /* Now register the TCM pvscsi virtual I_T Nexus as active. */
1541 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1542 tv_nexus->tvn_se_sess, tv_nexus);
1543 tpg->tpg_nexus = tv_nexus;
1544 1562
1563out_unlock:
1545 mutex_unlock(&tpg->tv_tpg_mutex); 1564 mutex_unlock(&tpg->tv_tpg_mutex);
1546 return 0; 1565 return ret;
1547
1548out:
1549 transport_free_session(se_sess);
1550 kfree(tv_nexus);
1551 return -ENOMEM;
1552} 1566}
1553 1567
1554static int scsiback_drop_nexus(struct scsiback_tpg *tpg) 1568static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
@@ -1866,16 +1880,6 @@ static struct xenbus_driver scsiback_driver = {
1866 .otherend_changed = scsiback_frontend_changed 1880 .otherend_changed = scsiback_frontend_changed
1867}; 1881};
1868 1882
1869static void scsiback_init_pend(void *p)
1870{
1871 struct vscsibk_pend *pend = p;
1872 int i;
1873
1874 memset(pend, 0, sizeof(*pend));
1875 for (i = 0; i < VSCSI_MAX_GRANTS; i++)
1876 pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
1877}
1878
1879static int __init scsiback_init(void) 1883static int __init scsiback_init(void)
1880{ 1884{
1881 int ret; 1885 int ret;
@@ -1886,14 +1890,9 @@ static int __init scsiback_init(void)
1886 pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", 1890 pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
1887 VSCSI_VERSION, utsname()->sysname, utsname()->machine); 1891 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
1888 1892
1889 scsiback_cachep = kmem_cache_create("vscsiif_cache",
1890 sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
1891 if (!scsiback_cachep)
1892 return -ENOMEM;
1893
1894 ret = xenbus_register_backend(&scsiback_driver); 1893 ret = xenbus_register_backend(&scsiback_driver);
1895 if (ret) 1894 if (ret)
1896 goto out_cache_destroy; 1895 goto out;
1897 1896
1898 ret = target_register_template(&scsiback_ops); 1897 ret = target_register_template(&scsiback_ops);
1899 if (ret) 1898 if (ret)
@@ -1903,8 +1902,7 @@ static int __init scsiback_init(void)
1903 1902
1904out_unregister_xenbus: 1903out_unregister_xenbus:
1905 xenbus_unregister_driver(&scsiback_driver); 1904 xenbus_unregister_driver(&scsiback_driver);
1906out_cache_destroy: 1905out:
1907 kmem_cache_destroy(scsiback_cachep);
1908 pr_err("%s: error %d\n", __func__, ret); 1906 pr_err("%s: error %d\n", __func__, ret);
1909 return ret; 1907 return ret;
1910} 1908}
@@ -1920,7 +1918,6 @@ static void __exit scsiback_exit(void)
1920 } 1918 }
1921 target_unregister_template(&scsiback_ops); 1919 target_unregister_template(&scsiback_ops);
1922 xenbus_unregister_driver(&scsiback_driver); 1920 xenbus_unregister_driver(&scsiback_driver);
1923 kmem_cache_destroy(scsiback_cachep);
1924} 1921}
1925 1922
1926module_init(scsiback_init); 1923module_init(scsiback_init);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1b09cac06508..3e0dd86360a2 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -144,12 +144,6 @@ enum se_cmd_flags_table {
144 SCF_USE_CPUID = 0x00800000, 144 SCF_USE_CPUID = 0x00800000,
145}; 145};
146 146
147/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
148enum transport_lunflags_table {
149 TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
150 TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
151};
152
153/* 147/*
154 * Used by transport_send_check_condition_and_sense() 148 * Used by transport_send_check_condition_and_sense()
155 * to signal which ASC/ASCQ sense payload should be built. 149 * to signal which ASC/ASCQ sense payload should be built.
@@ -633,11 +627,10 @@ struct se_lun_acl {
633}; 627};
634 628
635struct se_dev_entry { 629struct se_dev_entry {
636 /* See transport_lunflags_table */
637 u64 mapped_lun; 630 u64 mapped_lun;
638 u64 pr_res_key; 631 u64 pr_res_key;
639 u64 creation_time; 632 u64 creation_time;
640 u32 lun_flags; 633 bool lun_access_ro;
641 u32 attach_count; 634 u32 attach_count;
642 atomic_long_t total_cmds; 635 atomic_long_t total_cmds;
643 atomic_long_t read_bytes; 636 atomic_long_t read_bytes;
@@ -711,7 +704,7 @@ struct se_lun {
711 u64 unpacked_lun; 704 u64 unpacked_lun;
712#define SE_LUN_LINK_MAGIC 0xffff7771 705#define SE_LUN_LINK_MAGIC 0xffff7771
713 u32 lun_link_magic; 706 u32 lun_link_magic;
714 u32 lun_access; 707 bool lun_access_ro;
715 u32 lun_index; 708 u32 lun_index;
716 709
717 /* RELATIVE TARGET PORT IDENTIFER */ 710 /* RELATIVE TARGET PORT IDENTIFER */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 56653408f53b..685a51aa98cc 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
108int target_depend_item(struct config_item *item); 108int target_depend_item(struct config_item *item);
109void target_undepend_item(struct config_item *item); 109void target_undepend_item(struct config_item *item);
110 110
111struct se_session *target_alloc_session(struct se_portal_group *,
112 unsigned int, unsigned int, enum target_prot_op prot_op,
113 const char *, void *,
114 int (*callback)(struct se_portal_group *,
115 struct se_session *, void *));
116
111struct se_session *transport_init_session(enum target_prot_op); 117struct se_session *transport_init_session(enum target_prot_op);
112int transport_alloc_session_tags(struct se_session *, unsigned int, 118int transport_alloc_session_tags(struct se_session *, unsigned int,
113 unsigned int); 119 unsigned int);
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 95c6521d8a95..c506cddb8165 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -41,6 +41,7 @@
41 41
42#define TCMU_MAILBOX_VERSION 2 42#define TCMU_MAILBOX_VERSION 2
43#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ 43#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
44#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
44 45
45struct tcmu_mailbox { 46struct tcmu_mailbox {
46 __u16 version; 47 __u16 version;