aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 19:11:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 19:11:45 -0400
commit48efe453e6b29561f78a1df55c7f58375259cb8c (patch)
tree53d6ac1f2010b102c15b264b13fc4c98ba634d48
parentac4de9543aca59f2b763746647577302fbedd57e (diff)
parent2999ee7fda3f670effbfa746164c525f9d1be4b8 (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "Lots of activity again this round for I/O performance optimizations (per-cpu IDA pre-allocation for vhost + iscsi/target), and the addition of new fabric independent features to target-core (COMPARE_AND_WRITE + EXTENDED_COPY). The main highlights include: - Support for iscsi-target login multiplexing across individual network portals - Generic Per-cpu IDA logic (kent + akpm + clameter) - Conversion of vhost to use per-cpu IDA pre-allocation for descriptors, SGLs and userspace page pointer list - Conversion of iscsi-target + iser-target to use per-cpu IDA pre-allocation for descriptors - Add support for generic COMPARE_AND_WRITE (AtomicTestandSet) emulation for virtual backend drivers - Add support for generic EXTENDED_COPY (CopyOffload) emulation for virtual backend drivers. - Add support for fast memory registration mode to iser-target (Vu) The patches to add COMPARE_AND_WRITE and EXTENDED_COPY support are of particular significance, which make us the first and only open source target to support the full set of VAAI primitives. Currently Linux clients are lacking upstream support to actually utilize these primitives. However, with server side support now in place for folks like MKP + ZAB working on the client, this logic once reserved for the highest end of storage arrays, can now be run in VMs on their laptops" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits) target/iscsi: Bump versions to v4.1.0 target: Update copyright ownership/year information to 2013 iscsi-target: Bump default TCP listen backlog to 256 target: Fix >= v3.9+ regression in PR APTPL + ALUA metadata write-out iscsi-target; Bump default CmdSN Depth to 64 iscsi-target: Remove unnecessary wait_for_completion in iscsi_get_thread_set iscsi-target: Add thread_set->ts_activate_sem + use common deallocate iscsi-target: Fix race with thread_pre_handler flush_signals + ISCSI_THREAD_SET_DIE target: remove unused including <linux/version.h> iser-target: introduce fast memory registration mode (FRWR) iser-target: generalize rdma memory registration and cleanup iser-target: move rdma wr processing to a shared function target: Enable global EXTENDED_COPY setup/release target: Add Third Party Copy (3PC) bit in INQUIRY response target: Enable EXTENDED_COPY setup in spc_parse_cdb target: Add support for EXTENDED_COPY copy offload emulation target: Avoid non-existent tg_pt_gp_mem in target_alua_state_check target: Add global device list for EXTENDED_COPY target: Make helpers non static for EXTENDED_COPY command setup target: Make spc_parse_naa_6h_vendor_specific non static ...
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c747
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c59
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c83
-rw-r--r--drivers/target/iscsi/iscsi_target.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c186
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c367
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c29
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c167
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c45
-rw-r--r--drivers/target/loopback/tcm_loop.c2
-rw-r--r--drivers/target/target_core_alua.c39
-rw-r--r--drivers/target/target_core_configfs.c60
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_fabric_configfs.c18
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/target/target_core_rd.c8
-rw-r--r--drivers/target/target_core_sbc.c257
-rw-r--r--drivers/target/target_core_spc.c27
-rw-r--r--drivers/target/target_core_stat.c2
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c170
-rw-r--r--drivers/target/target_core_ua.c2
-rw-r--r--drivers/target/target_core_xcopy.c1081
-rw-r--r--drivers/target/target_core_xcopy.h62
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/vhost/scsi.c136
-rw-r--r--include/linux/percpu_ida.h60
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/target/iscsi/iscsi_transport.h8
-rw-r--r--include/target/target_core_backend.h7
-rw-r--r--include/target/target_core_base.h26
-rw-r--r--include/target/target_core_fabric.h30
-rw-r--r--lib/Makefile5
-rw-r--r--lib/percpu_ida.c335
61 files changed, 3487 insertions, 776 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3f62041222f2..3591855cc5b5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 * 3 *
4 * (c) Copyright 2013 RisingTide Systems LLC. 4 * (c) Copyright 2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 * 7 *
@@ -39,7 +39,17 @@ static DEFINE_MUTEX(device_list_mutex);
39static LIST_HEAD(device_list); 39static LIST_HEAD(device_list);
40static struct workqueue_struct *isert_rx_wq; 40static struct workqueue_struct *isert_rx_wq;
41static struct workqueue_struct *isert_comp_wq; 41static struct workqueue_struct *isert_comp_wq;
42static struct kmem_cache *isert_cmd_cache; 42
43static void
44isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
45static int
46isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
47 struct isert_rdma_wr *wr);
48static void
49isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
50static int
51isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
52 struct isert_rdma_wr *wr);
43 53
44static void 54static void
45isert_qp_event_callback(struct ib_event *e, void *context) 55isert_qp_event_callback(struct ib_event *e, void *context)
@@ -80,14 +90,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
80{ 90{
81 struct isert_device *device = isert_conn->conn_device; 91 struct isert_device *device = isert_conn->conn_device;
82 struct ib_qp_init_attr attr; 92 struct ib_qp_init_attr attr;
83 struct ib_device_attr devattr;
84 int ret, index, min_index = 0; 93 int ret, index, min_index = 0;
85 94
86 memset(&devattr, 0, sizeof(struct ib_device_attr));
87 ret = isert_query_device(cma_id->device, &devattr);
88 if (ret)
89 return ret;
90
91 mutex_lock(&device_list_mutex); 95 mutex_lock(&device_list_mutex);
92 for (index = 0; index < device->cqs_used; index++) 96 for (index = 0; index < device->cqs_used; index++)
93 if (device->cq_active_qps[index] < 97 if (device->cq_active_qps[index] <
@@ -108,7 +112,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 112 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 * work-around for RDMA_READ.. 113 * work-around for RDMA_READ..
110 */ 114 */
111 attr.cap.max_send_sge = devattr.max_sge - 2; 115 attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
112 isert_conn->max_sge = attr.cap.max_send_sge; 116 isert_conn->max_sge = attr.cap.max_send_sge;
113 117
114 attr.cap.max_recv_sge = 1; 118 attr.cap.max_recv_sge = 1;
@@ -210,14 +214,31 @@ isert_create_device_ib_res(struct isert_device *device)
210{ 214{
211 struct ib_device *ib_dev = device->ib_device; 215 struct ib_device *ib_dev = device->ib_device;
212 struct isert_cq_desc *cq_desc; 216 struct isert_cq_desc *cq_desc;
217 struct ib_device_attr *dev_attr;
213 int ret = 0, i, j; 218 int ret = 0, i, j;
214 219
220 dev_attr = &device->dev_attr;
221 ret = isert_query_device(ib_dev, dev_attr);
222 if (ret)
223 return ret;
224
225 /* asign function handlers */
226 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
227 device->use_frwr = 1;
228 device->reg_rdma_mem = isert_reg_rdma_frwr;
229 device->unreg_rdma_mem = isert_unreg_rdma_frwr;
230 } else {
231 device->use_frwr = 0;
232 device->reg_rdma_mem = isert_map_rdma;
233 device->unreg_rdma_mem = isert_unmap_cmd;
234 }
235
215 device->cqs_used = min_t(int, num_online_cpus(), 236 device->cqs_used = min_t(int, num_online_cpus(),
216 device->ib_device->num_comp_vectors); 237 device->ib_device->num_comp_vectors);
217 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 238 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
218 pr_debug("Using %d CQs, device %s supports %d vectors\n", 239 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
219 device->cqs_used, device->ib_device->name, 240 device->cqs_used, device->ib_device->name,
220 device->ib_device->num_comp_vectors); 241 device->ib_device->num_comp_vectors, device->use_frwr);
221 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 242 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
222 device->cqs_used, GFP_KERNEL); 243 device->cqs_used, GFP_KERNEL);
223 if (!device->cq_desc) { 244 if (!device->cq_desc) {
@@ -363,6 +384,85 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
363 return device; 384 return device;
364} 385}
365 386
387static void
388isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
389{
390 struct fast_reg_descriptor *fr_desc, *tmp;
391 int i = 0;
392
393 if (list_empty(&isert_conn->conn_frwr_pool))
394 return;
395
396 pr_debug("Freeing conn %p frwr pool", isert_conn);
397
398 list_for_each_entry_safe(fr_desc, tmp,
399 &isert_conn->conn_frwr_pool, list) {
400 list_del(&fr_desc->list);
401 ib_free_fast_reg_page_list(fr_desc->data_frpl);
402 ib_dereg_mr(fr_desc->data_mr);
403 kfree(fr_desc);
404 ++i;
405 }
406
407 if (i < isert_conn->conn_frwr_pool_size)
408 pr_warn("Pool still has %d regions registered\n",
409 isert_conn->conn_frwr_pool_size - i);
410}
411
412static int
413isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
414{
415 struct fast_reg_descriptor *fr_desc;
416 struct isert_device *device = isert_conn->conn_device;
417 int i, ret;
418
419 INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
420 isert_conn->conn_frwr_pool_size = 0;
421 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
422 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
423 if (!fr_desc) {
424 pr_err("Failed to allocate fast_reg descriptor\n");
425 ret = -ENOMEM;
426 goto err;
427 }
428
429 fr_desc->data_frpl =
430 ib_alloc_fast_reg_page_list(device->ib_device,
431 ISCSI_ISER_SG_TABLESIZE);
432 if (IS_ERR(fr_desc->data_frpl)) {
433 pr_err("Failed to allocate fr_pg_list err=%ld\n",
434 PTR_ERR(fr_desc->data_frpl));
435 ret = PTR_ERR(fr_desc->data_frpl);
436 goto err;
437 }
438
439 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
440 ISCSI_ISER_SG_TABLESIZE);
441 if (IS_ERR(fr_desc->data_mr)) {
442 pr_err("Failed to allocate frmr err=%ld\n",
443 PTR_ERR(fr_desc->data_mr));
444 ret = PTR_ERR(fr_desc->data_mr);
445 ib_free_fast_reg_page_list(fr_desc->data_frpl);
446 goto err;
447 }
448 pr_debug("Create fr_desc %p page_list %p\n",
449 fr_desc, fr_desc->data_frpl->page_list);
450
451 fr_desc->valid = true;
452 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
453 isert_conn->conn_frwr_pool_size++;
454 }
455
456 pr_debug("Creating conn %p frwr pool size=%d",
457 isert_conn, isert_conn->conn_frwr_pool_size);
458
459 return 0;
460
461err:
462 isert_conn_free_frwr_pool(isert_conn);
463 return ret;
464}
465
366static int 466static int
367isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 467isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
368{ 468{
@@ -389,6 +489,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
389 kref_init(&isert_conn->conn_kref); 489 kref_init(&isert_conn->conn_kref);
390 kref_get(&isert_conn->conn_kref); 490 kref_get(&isert_conn->conn_kref);
391 mutex_init(&isert_conn->conn_mutex); 491 mutex_init(&isert_conn->conn_mutex);
492 spin_lock_init(&isert_conn->conn_lock);
392 493
393 cma_id->context = isert_conn; 494 cma_id->context = isert_conn;
394 isert_conn->conn_cm_id = cma_id; 495 isert_conn->conn_cm_id = cma_id;
@@ -446,6 +547,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
446 isert_conn->conn_pd = device->dev_pd; 547 isert_conn->conn_pd = device->dev_pd;
447 isert_conn->conn_mr = device->dev_mr; 548 isert_conn->conn_mr = device->dev_mr;
448 549
550 if (device->use_frwr) {
551 ret = isert_conn_create_frwr_pool(isert_conn);
552 if (ret) {
553 pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
554 goto out_frwr;
555 }
556 }
557
449 ret = isert_conn_setup_qp(isert_conn, cma_id); 558 ret = isert_conn_setup_qp(isert_conn, cma_id);
450 if (ret) 559 if (ret)
451 goto out_conn_dev; 560 goto out_conn_dev;
@@ -459,6 +568,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
459 return 0; 568 return 0;
460 569
461out_conn_dev: 570out_conn_dev:
571 if (device->use_frwr)
572 isert_conn_free_frwr_pool(isert_conn);
573out_frwr:
462 isert_device_try_release(device); 574 isert_device_try_release(device);
463out_rsp_dma_map: 575out_rsp_dma_map:
464 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 576 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -482,6 +594,9 @@ isert_connect_release(struct isert_conn *isert_conn)
482 594
483 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
484 596
597 if (device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn);
599
485 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
486 cq_index = ((struct isert_cq_desc *) 601 cq_index = ((struct isert_cq_desc *)
487 isert_conn->conn_qp->recv_cq->cq_context)->cq_index; 602 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
@@ -869,46 +984,37 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
869 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 984 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
870 memcpy(login->req_buf, &rx_desc->data[0], size); 985 memcpy(login->req_buf, &rx_desc->data[0], size);
871 986
872 complete(&isert_conn->conn_login_comp); 987 if (login->first_request) {
873} 988 complete(&isert_conn->conn_login_comp);
874 989 return;
875static void 990 }
876isert_release_cmd(struct iscsi_cmd *cmd) 991 schedule_delayed_work(&conn->login_work, 0);
877{
878 struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
879 iscsi_cmd);
880
881 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
882
883 kfree(cmd->buf_ptr);
884 kfree(cmd->tmr_req);
885
886 kmem_cache_free(isert_cmd_cache, isert_cmd);
887} 992}
888 993
889static struct iscsi_cmd 994static struct iscsi_cmd
890*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp) 995*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
891{ 996{
892 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 997 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
893 struct isert_cmd *isert_cmd; 998 struct isert_cmd *isert_cmd;
999 struct iscsi_cmd *cmd;
894 1000
895 isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp); 1001 cmd = iscsit_allocate_cmd(conn, gfp);
896 if (!isert_cmd) { 1002 if (!cmd) {
897 pr_err("Unable to allocate isert_cmd\n"); 1003 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
898 return NULL; 1004 return NULL;
899 } 1005 }
1006 isert_cmd = iscsit_priv_cmd(cmd);
900 isert_cmd->conn = isert_conn; 1007 isert_cmd->conn = isert_conn;
901 isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd; 1008 isert_cmd->iscsi_cmd = cmd;
902 1009
903 return &isert_cmd->iscsi_cmd; 1010 return cmd;
904} 1011}
905 1012
906static int 1013static int
907isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1014isert_handle_scsi_cmd(struct isert_conn *isert_conn,
908 struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc, 1015 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
909 unsigned char *buf) 1016 struct iser_rx_desc *rx_desc, unsigned char *buf)
910{ 1017{
911 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
912 struct iscsi_conn *conn = isert_conn->conn; 1018 struct iscsi_conn *conn = isert_conn->conn;
913 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1019 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
914 struct scatterlist *sg; 1020 struct scatterlist *sg;
@@ -1015,9 +1121,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1015 1121
1016static int 1122static int
1017isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1123isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1018 struct iser_rx_desc *rx_desc, unsigned char *buf) 1124 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1125 unsigned char *buf)
1019{ 1126{
1020 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1021 struct iscsi_conn *conn = isert_conn->conn; 1127 struct iscsi_conn *conn = isert_conn->conn;
1022 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1128 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1023 int rc; 1129 int rc;
@@ -1034,9 +1140,9 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1034 1140
1035static int 1141static int
1036isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1142isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1037 struct iser_rx_desc *rx_desc, struct iscsi_text *hdr) 1143 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1144 struct iscsi_text *hdr)
1038{ 1145{
1039 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1040 struct iscsi_conn *conn = isert_conn->conn; 1146 struct iscsi_conn *conn = isert_conn->conn;
1041 u32 payload_length = ntoh24(hdr->dlength); 1147 u32 payload_length = ntoh24(hdr->dlength);
1042 int rc; 1148 int rc;
@@ -1081,26 +1187,26 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1081 1187
1082 switch (opcode) { 1188 switch (opcode) {
1083 case ISCSI_OP_SCSI_CMD: 1189 case ISCSI_OP_SCSI_CMD:
1084 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1190 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1085 if (!cmd) 1191 if (!cmd)
1086 break; 1192 break;
1087 1193
1088 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1194 isert_cmd = iscsit_priv_cmd(cmd);
1089 isert_cmd->read_stag = read_stag; 1195 isert_cmd->read_stag = read_stag;
1090 isert_cmd->read_va = read_va; 1196 isert_cmd->read_va = read_va;
1091 isert_cmd->write_stag = write_stag; 1197 isert_cmd->write_stag = write_stag;
1092 isert_cmd->write_va = write_va; 1198 isert_cmd->write_va = write_va;
1093 1199
1094 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, 1200 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1095 rx_desc, (unsigned char *)hdr); 1201 rx_desc, (unsigned char *)hdr);
1096 break; 1202 break;
1097 case ISCSI_OP_NOOP_OUT: 1203 case ISCSI_OP_NOOP_OUT:
1098 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1204 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1099 if (!cmd) 1205 if (!cmd)
1100 break; 1206 break;
1101 1207
1102 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1208 isert_cmd = iscsit_priv_cmd(cmd);
1103 ret = isert_handle_nop_out(isert_conn, isert_cmd, 1209 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1104 rx_desc, (unsigned char *)hdr); 1210 rx_desc, (unsigned char *)hdr);
1105 break; 1211 break;
1106 case ISCSI_OP_SCSI_DATA_OUT: 1212 case ISCSI_OP_SCSI_DATA_OUT:
@@ -1108,7 +1214,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1108 (unsigned char *)hdr); 1214 (unsigned char *)hdr);
1109 break; 1215 break;
1110 case ISCSI_OP_SCSI_TMFUNC: 1216 case ISCSI_OP_SCSI_TMFUNC:
1111 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1217 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1112 if (!cmd) 1218 if (!cmd)
1113 break; 1219 break;
1114 1220
@@ -1116,7 +1222,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1116 (unsigned char *)hdr); 1222 (unsigned char *)hdr);
1117 break; 1223 break;
1118 case ISCSI_OP_LOGOUT: 1224 case ISCSI_OP_LOGOUT:
1119 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1225 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1120 if (!cmd) 1226 if (!cmd)
1121 break; 1227 break;
1122 1228
@@ -1127,12 +1233,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1127 HZ); 1233 HZ);
1128 break; 1234 break;
1129 case ISCSI_OP_TEXT: 1235 case ISCSI_OP_TEXT:
1130 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1236 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1131 if (!cmd) 1237 if (!cmd)
1132 break; 1238 break;
1133 1239
1134 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1240 isert_cmd = iscsit_priv_cmd(cmd);
1135 ret = isert_handle_text_cmd(isert_conn, isert_cmd, 1241 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1136 rx_desc, (struct iscsi_text *)hdr); 1242 rx_desc, (struct iscsi_text *)hdr);
1137 break; 1243 break;
1138 default: 1244 default:
@@ -1243,26 +1349,65 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1243 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1244 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1350 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1245 1351
1246 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); 1352 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1353 if (wr->sge) {
1354 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1355 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1356 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1357 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1358 wr->sge = NULL;
1359 }
1360
1361 if (wr->send_wr) {
1362 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1363 kfree(wr->send_wr);
1364 wr->send_wr = NULL;
1365 }
1366
1367 if (wr->ib_sge) {
1368 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1369 kfree(wr->ib_sge);
1370 wr->ib_sge = NULL;
1371 }
1372}
1373
1374static void
1375isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1376{
1377 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1378 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1379 LIST_HEAD(unmap_list);
1380
1381 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
1382
1383 if (wr->fr_desc) {
1384 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1385 isert_cmd, wr->fr_desc);
1386 spin_lock_bh(&isert_conn->conn_lock);
1387 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
1388 spin_unlock_bh(&isert_conn->conn_lock);
1389 wr->fr_desc = NULL;
1390 }
1247 1391
1248 if (wr->sge) { 1392 if (wr->sge) {
1249 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1393 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
1394 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1395 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1396 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1250 wr->sge = NULL; 1397 wr->sge = NULL;
1251 } 1398 }
1252 1399
1253 kfree(wr->send_wr); 1400 wr->ib_sge = NULL;
1254 wr->send_wr = NULL; 1401 wr->send_wr = NULL;
1255
1256 kfree(isert_cmd->ib_sge);
1257 isert_cmd->ib_sge = NULL;
1258} 1402}
1259 1403
1260static void 1404static void
1261isert_put_cmd(struct isert_cmd *isert_cmd) 1405isert_put_cmd(struct isert_cmd *isert_cmd)
1262{ 1406{
1263 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1407 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1264 struct isert_conn *isert_conn = isert_cmd->conn; 1408 struct isert_conn *isert_conn = isert_cmd->conn;
1265 struct iscsi_conn *conn = isert_conn->conn; 1409 struct iscsi_conn *conn = isert_conn->conn;
1410 struct isert_device *device = isert_conn->conn_device;
1266 1411
1267 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1412 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1268 1413
@@ -1276,7 +1421,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1276 if (cmd->data_direction == DMA_TO_DEVICE) 1421 if (cmd->data_direction == DMA_TO_DEVICE)
1277 iscsit_stop_dataout_timer(cmd); 1422 iscsit_stop_dataout_timer(cmd);
1278 1423
1279 isert_unmap_cmd(isert_cmd, isert_conn); 1424 device->unreg_rdma_mem(isert_cmd, isert_conn);
1280 transport_generic_free_cmd(&cmd->se_cmd, 0); 1425 transport_generic_free_cmd(&cmd->se_cmd, 0);
1281 break; 1426 break;
1282 case ISCSI_OP_SCSI_TMFUNC: 1427 case ISCSI_OP_SCSI_TMFUNC:
@@ -1311,7 +1456,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1311 * Fall-through 1456 * Fall-through
1312 */ 1457 */
1313 default: 1458 default:
1314 isert_release_cmd(cmd); 1459 iscsit_release_cmd(cmd);
1315 break; 1460 break;
1316 } 1461 }
1317} 1462}
@@ -1347,27 +1492,16 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1347 struct isert_cmd *isert_cmd) 1492 struct isert_cmd *isert_cmd)
1348{ 1493{
1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1494 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1350 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1495 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1351 struct se_cmd *se_cmd = &cmd->se_cmd; 1496 struct se_cmd *se_cmd = &cmd->se_cmd;
1352 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; 1497 struct isert_conn *isert_conn = isert_cmd->conn;
1498 struct isert_device *device = isert_conn->conn_device;
1353 1499
1354 iscsit_stop_dataout_timer(cmd); 1500 iscsit_stop_dataout_timer(cmd);
1501 device->unreg_rdma_mem(isert_cmd, isert_conn);
1502 cmd->write_data_done = wr->cur_rdma_length;
1355 1503
1356 if (wr->sge) { 1504 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1357 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1358 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1359 wr->sge = NULL;
1360 }
1361
1362 if (isert_cmd->ib_sge) {
1363 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1364 kfree(isert_cmd->ib_sge);
1365 isert_cmd->ib_sge = NULL;
1366 }
1367
1368 cmd->write_data_done = se_cmd->data_length;
1369
1370 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1371 spin_lock_bh(&cmd->istate_lock); 1505 spin_lock_bh(&cmd->istate_lock);
1372 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1506 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1373 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1507 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1383,7 +1517,7 @@ isert_do_control_comp(struct work_struct *work)
1383 struct isert_cmd, comp_work); 1517 struct isert_cmd, comp_work);
1384 struct isert_conn *isert_conn = isert_cmd->conn; 1518 struct isert_conn *isert_conn = isert_cmd->conn;
1385 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1519 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1386 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1520 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1387 1521
1388 switch (cmd->i_state) { 1522 switch (cmd->i_state) {
1389 case ISTATE_SEND_TASKMGTRSP: 1523 case ISTATE_SEND_TASKMGTRSP:
@@ -1429,7 +1563,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1429 struct isert_conn *isert_conn, 1563 struct isert_conn *isert_conn,
1430 struct ib_device *ib_dev) 1564 struct ib_device *ib_dev)
1431{ 1565{
1432 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1566 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1433 1567
1434 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1568 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1435 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1569 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1621,8 +1755,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1621static int 1755static int
1622isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1756isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1623{ 1757{
1624 struct isert_cmd *isert_cmd = container_of(cmd, 1758 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1625 struct isert_cmd, iscsi_cmd);
1626 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1759 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1627 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1760 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1628 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1761 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
@@ -1671,8 +1804,7 @@ static int
1671isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1804isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1672 bool nopout_response) 1805 bool nopout_response)
1673{ 1806{
1674 struct isert_cmd *isert_cmd = container_of(cmd, 1807 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1675 struct isert_cmd, iscsi_cmd);
1676 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1808 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1677 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1809 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1678 1810
@@ -1691,8 +1823,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1691static int 1823static int
1692isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1824isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1693{ 1825{
1694 struct isert_cmd *isert_cmd = container_of(cmd, 1826 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1695 struct isert_cmd, iscsi_cmd);
1696 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1827 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1697 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1828 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1698 1829
@@ -1710,8 +1841,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1710static int 1841static int
1711isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1842isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1712{ 1843{
1713 struct isert_cmd *isert_cmd = container_of(cmd, 1844 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1714 struct isert_cmd, iscsi_cmd);
1715 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1845 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1716 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1846 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1717 1847
@@ -1729,8 +1859,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1729static int 1859static int
1730isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1860isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1731{ 1861{
1732 struct isert_cmd *isert_cmd = container_of(cmd, 1862 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1733 struct isert_cmd, iscsi_cmd);
1734 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1863 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1735 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1864 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1736 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1865 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1762,8 +1891,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1762static int 1891static int
1763isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1892isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1764{ 1893{
1765 struct isert_cmd *isert_cmd = container_of(cmd, 1894 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1766 struct isert_cmd, iscsi_cmd);
1767 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1895 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1768 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1896 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1769 struct iscsi_text_rsp *hdr = 1897 struct iscsi_text_rsp *hdr =
@@ -1805,7 +1933,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1805 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 1933 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1806 u32 data_left, u32 offset) 1934 u32 data_left, u32 offset)
1807{ 1935{
1808 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1936 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1809 struct scatterlist *sg_start, *tmp_sg; 1937 struct scatterlist *sg_start, *tmp_sg;
1810 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1938 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1811 u32 sg_off, page_off; 1939 u32 sg_off, page_off;
@@ -1832,8 +1960,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1832 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 1960 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1833 ib_sge->lkey = isert_conn->conn_mr->lkey; 1961 ib_sge->lkey = isert_conn->conn_mr->lkey;
1834 1962
1835 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", 1963 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
1836 ib_sge->addr, ib_sge->length); 1964 ib_sge->addr, ib_sge->length, ib_sge->lkey);
1837 page_off = 0; 1965 page_off = 0;
1838 data_left -= ib_sge->length; 1966 data_left -= ib_sge->length;
1839 ib_sge++; 1967 ib_sge++;
@@ -1847,200 +1975,373 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1847} 1975}
1848 1976
1849static int 1977static int
1850isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1978isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1979 struct isert_rdma_wr *wr)
1851{ 1980{
1852 struct se_cmd *se_cmd = &cmd->se_cmd; 1981 struct se_cmd *se_cmd = &cmd->se_cmd;
1853 struct isert_cmd *isert_cmd = container_of(cmd, 1982 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1854 struct isert_cmd, iscsi_cmd);
1855 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1856 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1983 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1857 struct ib_send_wr *wr_failed, *send_wr;
1858 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1984 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1985 struct ib_send_wr *send_wr;
1859 struct ib_sge *ib_sge; 1986 struct ib_sge *ib_sge;
1860 struct scatterlist *sg; 1987 struct scatterlist *sg_start;
1861 u32 offset = 0, data_len, data_left, rdma_write_max; 1988 u32 sg_off = 0, sg_nents;
1862 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; 1989 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
1863 1990 int ret = 0, count, i, ib_sge_cnt;
1864 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); 1991
1992 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1993 data_left = se_cmd->data_length;
1994 iscsit_increment_maxcmdsn(cmd, conn->sess);
1995 cmd->stat_sn = conn->stat_sn++;
1996 } else {
1997 sg_off = cmd->write_data_done / PAGE_SIZE;
1998 data_left = se_cmd->data_length - cmd->write_data_done;
1999 offset = cmd->write_data_done;
2000 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2001 }
1865 2002
1866 sg = &se_cmd->t_data_sg[0]; 2003 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1867 sg_nents = se_cmd->t_data_nents; 2004 sg_nents = se_cmd->t_data_nents - sg_off;
1868 2005
1869 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 2006 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2007 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2008 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1870 if (unlikely(!count)) { 2009 if (unlikely(!count)) {
1871 pr_err("Unable to map put_datain SGs\n"); 2010 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
1872 return -EINVAL; 2011 return -EINVAL;
1873 } 2012 }
1874 wr->sge = sg; 2013 wr->sge = sg_start;
1875 wr->num_sge = sg_nents; 2014 wr->num_sge = sg_nents;
1876 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", 2015 wr->cur_rdma_length = data_left;
1877 count, sg, sg_nents); 2016 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2017 isert_cmd, count, sg_start, sg_nents, data_left);
1878 2018
1879 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2019 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1880 if (!ib_sge) { 2020 if (!ib_sge) {
1881 pr_warn("Unable to allocate datain ib_sge\n"); 2021 pr_warn("Unable to allocate ib_sge\n");
1882 ret = -ENOMEM; 2022 ret = -ENOMEM;
1883 goto unmap_sg; 2023 goto unmap_sg;
1884 } 2024 }
1885 isert_cmd->ib_sge = ib_sge; 2025 wr->ib_sge = ib_sge;
1886
1887 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1888 ib_sge, se_cmd->t_data_nents);
1889 2026
1890 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2027 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1891 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2028 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1892 GFP_KERNEL); 2029 GFP_KERNEL);
1893 if (!wr->send_wr) { 2030 if (!wr->send_wr) {
1894 pr_err("Unable to allocate wr->send_wr\n"); 2031 pr_debug("Unable to allocate wr->send_wr\n");
1895 ret = -ENOMEM; 2032 ret = -ENOMEM;
1896 goto unmap_sg; 2033 goto unmap_sg;
1897 } 2034 }
1898 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1899 wr->send_wr, wr->send_wr_num);
1900
1901 iscsit_increment_maxcmdsn(cmd, conn->sess);
1902 cmd->stat_sn = conn->stat_sn++;
1903 2035
1904 wr->isert_cmd = isert_cmd; 2036 wr->isert_cmd = isert_cmd;
1905 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2037 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1906 data_left = se_cmd->data_length;
1907 2038
1908 for (i = 0; i < wr->send_wr_num; i++) { 2039 for (i = 0; i < wr->send_wr_num; i++) {
1909 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2040 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1910 data_len = min(data_left, rdma_write_max); 2041 data_len = min(data_left, rdma_write_max);
1911 2042
1912 send_wr->opcode = IB_WR_RDMA_WRITE;
1913 send_wr->send_flags = 0; 2043 send_wr->send_flags = 0;
1914 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2044 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1915 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2045 send_wr->opcode = IB_WR_RDMA_WRITE;
2046 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2047 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2048 if (i + 1 == wr->send_wr_num)
2049 send_wr->next = &isert_cmd->tx_desc.send_wr;
2050 else
2051 send_wr->next = &wr->send_wr[i + 1];
2052 } else {
2053 send_wr->opcode = IB_WR_RDMA_READ;
2054 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2055 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2056 if (i + 1 == wr->send_wr_num)
2057 send_wr->send_flags = IB_SEND_SIGNALED;
2058 else
2059 send_wr->next = &wr->send_wr[i + 1];
2060 }
1916 2061
1917 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2062 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1918 send_wr, data_len, offset); 2063 send_wr, data_len, offset);
1919 ib_sge += ib_sge_cnt; 2064 ib_sge += ib_sge_cnt;
1920 2065
1921 if (i + 1 == wr->send_wr_num)
1922 send_wr->next = &isert_cmd->tx_desc.send_wr;
1923 else
1924 send_wr->next = &wr->send_wr[i + 1];
1925
1926 offset += data_len; 2066 offset += data_len;
2067 va_offset += data_len;
1927 data_left -= data_len; 2068 data_left -= data_len;
1928 } 2069 }
1929 /*
1930 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1931 */
1932 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1933 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1934 &isert_cmd->tx_desc.iscsi_header);
1935 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1936 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1937 2070
1938 atomic_inc(&isert_conn->post_send_buf_count); 2071 return 0;
2072unmap_sg:
2073 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2074 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2075 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2076 return ret;
2077}
1939 2078
1940 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2079static int
1941 if (rc) { 2080isert_map_fr_pagelist(struct ib_device *ib_dev,
1942 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2081 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
1943 atomic_dec(&isert_conn->post_send_buf_count); 2082{
2083 u64 start_addr, end_addr, page, chunk_start = 0;
2084 struct scatterlist *tmp_sg;
2085 int i = 0, new_chunk, last_ent, n_pages;
2086
2087 n_pages = 0;
2088 new_chunk = 1;
2089 last_ent = sg_nents - 1;
2090 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2091 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2092 if (new_chunk)
2093 chunk_start = start_addr;
2094 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2095
2096 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2097 i, (unsigned long long)tmp_sg->dma_address,
2098 tmp_sg->length);
2099
2100 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2101 new_chunk = 0;
2102 continue;
2103 }
2104 new_chunk = 1;
2105
2106 page = chunk_start & PAGE_MASK;
2107 do {
2108 fr_pl[n_pages++] = page;
2109 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2110 n_pages - 1, page);
2111 page += PAGE_SIZE;
2112 } while (page < end_addr);
1944 } 2113 }
1945 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1946 return 1;
1947 2114
1948unmap_sg: 2115 return n_pages;
1949 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 2116}
2117
2118static int
2119isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2120 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2121 struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2122{
2123 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2124 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2125 struct scatterlist *sg_start;
2126 u32 sg_off, page_off;
2127 struct ib_send_wr fr_wr, inv_wr;
2128 struct ib_send_wr *bad_wr, *wr = NULL;
2129 u8 key;
2130 int ret, sg_nents, pagelist_len;
2131
2132 sg_off = offset / PAGE_SIZE;
2133 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2134 sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2135 ISCSI_ISER_SG_TABLESIZE);
2136 page_off = offset % PAGE_SIZE;
2137
2138 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2139 isert_cmd, fr_desc, sg_nents, sg_off, offset);
2140
2141 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2142 &fr_desc->data_frpl->page_list[0]);
2143
2144 if (!fr_desc->valid) {
2145 memset(&inv_wr, 0, sizeof(inv_wr));
2146 inv_wr.opcode = IB_WR_LOCAL_INV;
2147 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2148 wr = &inv_wr;
2149 /* Bump the key */
2150 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2151 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2152 }
2153
2154 /* Prepare FASTREG WR */
2155 memset(&fr_wr, 0, sizeof(fr_wr));
2156 fr_wr.opcode = IB_WR_FAST_REG_MR;
2157 fr_wr.wr.fast_reg.iova_start =
2158 fr_desc->data_frpl->page_list[0] + page_off;
2159 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2160 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2161 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2162 fr_wr.wr.fast_reg.length = data_len;
2163 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2164 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2165
2166 if (!wr)
2167 wr = &fr_wr;
2168 else
2169 wr->next = &fr_wr;
2170
2171 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2172 if (ret) {
2173 pr_err("fast registration failed, ret:%d\n", ret);
2174 return ret;
2175 }
2176 fr_desc->valid = false;
2177
2178 ib_sge->lkey = fr_desc->data_mr->lkey;
2179 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2180 ib_sge->length = data_len;
2181
2182 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2183 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2184
1950 return ret; 2185 return ret;
1951} 2186}
1952 2187
1953static int 2188static int
1954isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2189isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2190 struct isert_rdma_wr *wr)
1955{ 2191{
1956 struct se_cmd *se_cmd = &cmd->se_cmd; 2192 struct se_cmd *se_cmd = &cmd->se_cmd;
1957 struct isert_cmd *isert_cmd = container_of(cmd, 2193 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1958 struct isert_cmd, iscsi_cmd);
1959 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1960 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2194 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1961 struct ib_send_wr *wr_failed, *send_wr;
1962 struct ib_sge *ib_sge;
1963 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2195 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2196 struct ib_send_wr *send_wr;
2197 struct ib_sge *ib_sge;
1964 struct scatterlist *sg_start; 2198 struct scatterlist *sg_start;
1965 u32 sg_off, sg_nents, page_off, va_offset = 0; 2199 struct fast_reg_descriptor *fr_desc;
2200 u32 sg_off = 0, sg_nents;
1966 u32 offset = 0, data_len, data_left, rdma_write_max; 2201 u32 offset = 0, data_len, data_left, rdma_write_max;
1967 int rc, ret = 0, count, i, ib_sge_cnt; 2202 int ret = 0, count;
2203 unsigned long flags;
1968 2204
1969 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n", 2205 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1970 se_cmd->data_length, cmd->write_data_done); 2206 data_left = se_cmd->data_length;
2207 iscsit_increment_maxcmdsn(cmd, conn->sess);
2208 cmd->stat_sn = conn->stat_sn++;
2209 } else {
2210 sg_off = cmd->write_data_done / PAGE_SIZE;
2211 data_left = se_cmd->data_length - cmd->write_data_done;
2212 offset = cmd->write_data_done;
2213 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2214 }
1971 2215
1972 sg_off = cmd->write_data_done / PAGE_SIZE;
1973 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2216 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1974 page_off = cmd->write_data_done % PAGE_SIZE;
1975
1976 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1977 sg_off, sg_start, page_off);
1978
1979 data_left = se_cmd->data_length - cmd->write_data_done;
1980 sg_nents = se_cmd->t_data_nents - sg_off; 2217 sg_nents = se_cmd->t_data_nents - sg_off;
1981 2218
1982 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n", 2219 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
1983 data_left, sg_nents); 2220 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1984 2221 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1985 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1986 if (unlikely(!count)) { 2222 if (unlikely(!count)) {
1987 pr_err("Unable to map get_dataout SGs\n"); 2223 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
1988 return -EINVAL; 2224 return -EINVAL;
1989 } 2225 }
1990 wr->sge = sg_start; 2226 wr->sge = sg_start;
1991 wr->num_sge = sg_nents; 2227 wr->num_sge = sg_nents;
1992 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n", 2228 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1993 count, sg_start, sg_nents); 2229 isert_cmd, count, sg_start, sg_nents, data_left);
1994 2230
1995 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2231 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
1996 if (!ib_sge) { 2232 ib_sge = &wr->s_ib_sge;
1997 pr_warn("Unable to allocate dataout ib_sge\n"); 2233 wr->ib_sge = ib_sge;
1998 ret = -ENOMEM; 2234
1999 goto unmap_sg; 2235 wr->send_wr_num = 1;
2236 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2237 wr->send_wr = &wr->s_send_wr;
2238
2239 wr->isert_cmd = isert_cmd;
2240 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2241
2242 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2243 send_wr->sg_list = ib_sge;
2244 send_wr->num_sge = 1;
2245 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2246 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2247 send_wr->opcode = IB_WR_RDMA_WRITE;
2248 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2249 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2250 send_wr->send_flags = 0;
2251 send_wr->next = &isert_cmd->tx_desc.send_wr;
2252 } else {
2253 send_wr->opcode = IB_WR_RDMA_READ;
2254 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2255 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2256 send_wr->send_flags = IB_SEND_SIGNALED;
2000 } 2257 }
2001 isert_cmd->ib_sge = ib_sge;
2002 2258
2003 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n", 2259 data_len = min(data_left, rdma_write_max);
2004 ib_sge, sg_nents); 2260 wr->cur_rdma_length = data_len;
2005 2261
2006 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2262 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2007 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2263 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2008 GFP_KERNEL); 2264 struct fast_reg_descriptor, list);
2009 if (!wr->send_wr) { 2265 list_del(&fr_desc->list);
2010 pr_debug("Unable to allocate wr->send_wr\n"); 2266 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2011 ret = -ENOMEM; 2267 wr->fr_desc = fr_desc;
2268
2269 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2270 ib_sge, offset, data_len);
2271 if (ret) {
2272 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2012 goto unmap_sg; 2273 goto unmap_sg;
2013 } 2274 }
2014 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
2015 wr->send_wr, wr->send_wr_num);
2016 2275
2017 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2276 return 0;
2018 2277
2019 wr->iser_ib_op = ISER_IB_RDMA_READ; 2278unmap_sg:
2020 wr->isert_cmd = isert_cmd; 2279 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2021 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2280 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2022 offset = cmd->write_data_done; 2281 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2282 return ret;
2283}
2023 2284
2024 for (i = 0; i < wr->send_wr_num; i++) { 2285static int
2025 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2286isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2026 data_len = min(data_left, rdma_write_max); 2287{
2288 struct se_cmd *se_cmd = &cmd->se_cmd;
2289 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2290 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2291 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2292 struct isert_device *device = isert_conn->conn_device;
2293 struct ib_send_wr *wr_failed;
2294 int rc;
2027 2295
2028 send_wr->opcode = IB_WR_RDMA_READ; 2296 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2029 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2297 isert_cmd, se_cmd->data_length);
2030 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2298 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2299 rc = device->reg_rdma_mem(conn, cmd, wr);
2300 if (rc) {
2301 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2302 return rc;
2303 }
2031 2304
2032 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2305 /*
2033 send_wr, data_len, offset); 2306 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2034 ib_sge += ib_sge_cnt; 2307 */
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
2310 &isert_cmd->tx_desc.iscsi_header);
2311 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
2035 2313
2036 if (i + 1 == wr->send_wr_num) 2314 atomic_inc(&isert_conn->post_send_buf_count);
2037 send_wr->send_flags = IB_SEND_SIGNALED;
2038 else
2039 send_wr->next = &wr->send_wr[i + 1];
2040 2315
2041 offset += data_len; 2316 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2042 va_offset += data_len; 2317 if (rc) {
2043 data_left -= data_len; 2318 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2319 atomic_dec(&isert_conn->post_send_buf_count);
2320 }
2321 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2322 isert_cmd);
2323
2324 return 1;
2325}
2326
2327static int
2328isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2329{
2330 struct se_cmd *se_cmd = &cmd->se_cmd;
2331 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2332 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2333 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2334 struct isert_device *device = isert_conn->conn_device;
2335 struct ib_send_wr *wr_failed;
2336 int rc;
2337
2338 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2339 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2340 wr->iser_ib_op = ISER_IB_RDMA_READ;
2341 rc = device->reg_rdma_mem(conn, cmd, wr);
2342 if (rc) {
2343 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2344 return rc;
2044 } 2345 }
2045 2346
2046 atomic_inc(&isert_conn->post_send_buf_count); 2347 atomic_inc(&isert_conn->post_send_buf_count);
@@ -2050,12 +2351,10 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2050 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2351 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2051 atomic_dec(&isert_conn->post_send_buf_count); 2352 atomic_dec(&isert_conn->post_send_buf_count);
2052 } 2353 }
2053 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); 2354 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2054 return 0; 2355 isert_cmd);
2055 2356
2056unmap_sg: 2357 return 0;
2057 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2058 return ret;
2059} 2358}
2060 2359
2061static int 2360static int
@@ -2224,6 +2523,14 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2224 int ret; 2523 int ret;
2225 2524
2226 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 2525 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2526 /*
2527 * For login requests after the first PDU, isert_rx_login_req() will
2528 * kick schedule_delayed_work(&conn->login_work) as the packet is
2529 * received, which turns this callback from iscsi_target_do_login_rx()
2530 * into a NOP.
2531 */
2532 if (!login->first_request)
2533 return 0;
2227 2534
2228 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 2535 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2229 if (ret) 2536 if (ret)
@@ -2393,12 +2700,12 @@ static void isert_free_conn(struct iscsi_conn *conn)
2393static struct iscsit_transport iser_target_transport = { 2700static struct iscsit_transport iser_target_transport = {
2394 .name = "IB/iSER", 2701 .name = "IB/iSER",
2395 .transport_type = ISCSI_INFINIBAND, 2702 .transport_type = ISCSI_INFINIBAND,
2703 .priv_size = sizeof(struct isert_cmd),
2396 .owner = THIS_MODULE, 2704 .owner = THIS_MODULE,
2397 .iscsit_setup_np = isert_setup_np, 2705 .iscsit_setup_np = isert_setup_np,
2398 .iscsit_accept_np = isert_accept_np, 2706 .iscsit_accept_np = isert_accept_np,
2399 .iscsit_free_np = isert_free_np, 2707 .iscsit_free_np = isert_free_np,
2400 .iscsit_free_conn = isert_free_conn, 2708 .iscsit_free_conn = isert_free_conn,
2401 .iscsit_alloc_cmd = isert_alloc_cmd,
2402 .iscsit_get_login_rx = isert_get_login_rx, 2709 .iscsit_get_login_rx = isert_get_login_rx,
2403 .iscsit_put_login_tx = isert_put_login_tx, 2710 .iscsit_put_login_tx = isert_put_login_tx,
2404 .iscsit_immediate_queue = isert_immediate_queue, 2711 .iscsit_immediate_queue = isert_immediate_queue,
@@ -2425,21 +2732,10 @@ static int __init isert_init(void)
2425 goto destroy_rx_wq; 2732 goto destroy_rx_wq;
2426 } 2733 }
2427 2734
2428 isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2429 sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2430 0, NULL);
2431 if (!isert_cmd_cache) {
2432 pr_err("Unable to create isert_cmd_cache\n");
2433 ret = -ENOMEM;
2434 goto destroy_tx_cq;
2435 }
2436
2437 iscsit_register_transport(&iser_target_transport); 2735 iscsit_register_transport(&iser_target_transport);
2438 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2736 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2439 return 0; 2737 return 0;
2440 2738
2441destroy_tx_cq:
2442 destroy_workqueue(isert_comp_wq);
2443destroy_rx_wq: 2739destroy_rx_wq:
2444 destroy_workqueue(isert_rx_wq); 2740 destroy_workqueue(isert_rx_wq);
2445 return ret; 2741 return ret;
@@ -2447,7 +2743,6 @@ destroy_rx_wq:
2447 2743
2448static void __exit isert_exit(void) 2744static void __exit isert_exit(void)
2449{ 2745{
2450 kmem_cache_destroy(isert_cmd_cache);
2451 destroy_workqueue(isert_comp_wq); 2746 destroy_workqueue(isert_comp_wq);
2452 destroy_workqueue(isert_rx_wq); 2747 destroy_workqueue(isert_rx_wq);
2453 iscsit_unregister_transport(&iser_target_transport); 2748 iscsit_unregister_transport(&iser_target_transport);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 191117b5b508..631f2090f0b8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -5,6 +5,7 @@
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6 6
7#define ISERT_RDMA_LISTEN_BACKLOG 10 7#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256
8 9
9enum isert_desc_type { 10enum isert_desc_type {
10 ISCSI_TX_CONTROL, 11 ISCSI_TX_CONTROL,
@@ -45,15 +46,26 @@ struct iser_tx_desc {
45 struct ib_send_wr send_wr; 46 struct ib_send_wr send_wr;
46} __packed; 47} __packed;
47 48
49struct fast_reg_descriptor {
50 struct list_head list;
51 struct ib_mr *data_mr;
52 struct ib_fast_reg_page_list *data_frpl;
53 bool valid;
54};
55
48struct isert_rdma_wr { 56struct isert_rdma_wr {
49 struct list_head wr_list; 57 struct list_head wr_list;
50 struct isert_cmd *isert_cmd; 58 struct isert_cmd *isert_cmd;
51 enum iser_ib_op_code iser_ib_op; 59 enum iser_ib_op_code iser_ib_op;
52 struct ib_sge *ib_sge; 60 struct ib_sge *ib_sge;
61 struct ib_sge s_ib_sge;
53 int num_sge; 62 int num_sge;
54 struct scatterlist *sge; 63 struct scatterlist *sge;
55 int send_wr_num; 64 int send_wr_num;
56 struct ib_send_wr *send_wr; 65 struct ib_send_wr *send_wr;
66 struct ib_send_wr s_send_wr;
67 u32 cur_rdma_length;
68 struct fast_reg_descriptor *fr_desc;
57}; 69};
58 70
59struct isert_cmd { 71struct isert_cmd {
@@ -67,8 +79,7 @@ struct isert_cmd {
67 u32 write_va_off; 79 u32 write_va_off;
68 u32 rdma_wr_num; 80 u32 rdma_wr_num;
69 struct isert_conn *conn; 81 struct isert_conn *conn;
70 struct iscsi_cmd iscsi_cmd; 82 struct iscsi_cmd *iscsi_cmd;
71 struct ib_sge *ib_sge;
72 struct iser_tx_desc tx_desc; 83 struct iser_tx_desc tx_desc;
73 struct isert_rdma_wr rdma_wr; 84 struct isert_rdma_wr rdma_wr;
74 struct work_struct comp_work; 85 struct work_struct comp_work;
@@ -106,6 +117,10 @@ struct isert_conn {
106 wait_queue_head_t conn_wait; 117 wait_queue_head_t conn_wait;
107 wait_queue_head_t conn_wait_comp_err; 118 wait_queue_head_t conn_wait_comp_err;
108 struct kref conn_kref; 119 struct kref conn_kref;
120 struct list_head conn_frwr_pool;
121 int conn_frwr_pool_size;
122 /* lock to protect frwr_pool */
123 spinlock_t conn_lock;
109}; 124};
110 125
111#define ISERT_MAX_CQ 64 126#define ISERT_MAX_CQ 64
@@ -118,6 +133,7 @@ struct isert_cq_desc {
118}; 133};
119 134
120struct isert_device { 135struct isert_device {
136 int use_frwr;
121 int cqs_used; 137 int cqs_used;
122 int refcount; 138 int refcount;
123 int cq_active_qps[ISERT_MAX_CQ]; 139 int cq_active_qps[ISERT_MAX_CQ];
@@ -128,6 +144,12 @@ struct isert_device {
128 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 144 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
129 struct isert_cq_desc *cq_desc; 145 struct isert_cq_desc *cq_desc;
130 struct list_head dev_node; 146 struct list_head dev_node;
147 struct ib_device_attr dev_attr;
148 int (*reg_rdma_mem)(struct iscsi_conn *conn,
149 struct iscsi_cmd *cmd,
150 struct isert_rdma_wr *wr);
151 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
152 struct isert_conn *isert_conn);
131}; 153};
132 154
133struct isert_np { 155struct isert_np {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ff12d4677cc4..596480022b0a 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -10,7 +10,7 @@
10 * 10 *
11 * Forward port and refactoring to modern qla2xxx and target/configfs 11 * Forward port and refactoring to modern qla2xxx and target/configfs
12 * 12 *
13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index a6da313e253b..f85b9e5c1f05 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -2,12 +2,9 @@
2 * This file contains tcm implementation using v4 configfs fabric infrastructure 2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs 3 * for QLogic target mode HBAs
4 * 4 *
5 * ?? Copyright 2010-2011 RisingTide Systems LLC. 5 * (c) Copyright 2010-2013 Datera, Inc.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) 7 * Author: Nicholas A. Bellinger <nab@daterainc.com>
8 * version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 * 8 *
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module. 10 * the TCM_FC / Open-FCoE.org fabric module.
@@ -360,6 +357,14 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
360 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; 357 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
361} 358}
362 359
360static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
361{
362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
363 struct tcm_qla2xxx_tpg, se_tpg);
364
365 return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
366}
367
363static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 368static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
364 struct se_portal_group *se_tpg) 369 struct se_portal_group *se_tpg)
365{ 370{
@@ -489,38 +494,13 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
489 return 0; 494 return 0;
490} 495}
491 496
492/*
493 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
494 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
495 * that data is coming from the target (eg handling a READ). However,
496 * this is just the opposite of what we have to tell the DMA mapping
497 * layer -- eg when handling a READ, the HBA will have to DMA the data
498 * out of memory so it can send it to the initiator, which means we
499 * need to use DMA_TO_DEVICE when we map the data.
500 */
501static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
502{
503 if (se_cmd->se_cmd_flags & SCF_BIDI)
504 return DMA_BIDIRECTIONAL;
505
506 switch (se_cmd->data_direction) {
507 case DMA_TO_DEVICE:
508 return DMA_FROM_DEVICE;
509 case DMA_FROM_DEVICE:
510 return DMA_TO_DEVICE;
511 case DMA_NONE:
512 default:
513 return DMA_NONE;
514 }
515}
516
517static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 497static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
518{ 498{
519 struct qla_tgt_cmd *cmd = container_of(se_cmd, 499 struct qla_tgt_cmd *cmd = container_of(se_cmd,
520 struct qla_tgt_cmd, se_cmd); 500 struct qla_tgt_cmd, se_cmd);
521 501
522 cmd->bufflen = se_cmd->data_length; 502 cmd->bufflen = se_cmd->data_length;
523 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 503 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
524 504
525 cmd->sg_cnt = se_cmd->t_data_nents; 505 cmd->sg_cnt = se_cmd->t_data_nents;
526 cmd->sg = se_cmd->t_data_sg; 506 cmd->sg = se_cmd->t_data_sg;
@@ -656,7 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
656 struct qla_tgt_cmd, se_cmd); 636 struct qla_tgt_cmd, se_cmd);
657 637
658 cmd->bufflen = se_cmd->data_length; 638 cmd->bufflen = se_cmd->data_length;
659 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 639 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
660 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 640 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
661 641
662 cmd->sg_cnt = se_cmd->t_data_nents; 642 cmd->sg_cnt = se_cmd->t_data_nents;
@@ -680,7 +660,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
680 cmd->sg = NULL; 660 cmd->sg = NULL;
681 cmd->sg_cnt = 0; 661 cmd->sg_cnt = 0;
682 cmd->offset = 0; 662 cmd->offset = 0;
683 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); 663 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
684 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 664 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
685 665
686 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 666 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -939,11 +919,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
939DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 919DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
940QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 920QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
941 921
922/*
923 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
924 */
925DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
926DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
927QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
928
942static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 929static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
943 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, 930 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
944 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, 931 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
945 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, 932 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
946 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, 933 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
934 &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
947 NULL, 935 NULL,
948}; 936};
949 937
@@ -1042,6 +1030,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1042 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; 1030 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1043 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; 1031 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1044 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; 1032 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1033 QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
1045 1034
1046 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1047 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1736,7 +1725,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1736 tcm_qla2xxx_check_demo_write_protect, 1725 tcm_qla2xxx_check_demo_write_protect,
1737 .tpg_check_prod_mode_write_protect = 1726 .tpg_check_prod_mode_write_protect =
1738 tcm_qla2xxx_check_prod_write_protect, 1727 tcm_qla2xxx_check_prod_write_protect,
1739 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, 1728 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1740 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1729 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1741 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1730 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1742 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1731 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
@@ -1784,7 +1773,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1784 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, 1773 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1785 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, 1774 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1786 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, 1775 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1787 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, 1776 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1788 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1777 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1789 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1778 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1790 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1779 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 9ba075fe9781..329327528a55 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -29,6 +29,7 @@ struct tcm_qla2xxx_tpg_attrib {
29 int cache_dynamic_acls; 29 int cache_dynamic_acls;
30 int demo_mode_write_protect; 30 int demo_mode_write_protect;
31 int prod_mode_write_protect; 31 int prod_mode_write_protect;
32 int demo_mode_login_only;
32}; 33};
33 34
34struct tcm_qla2xxx_tpg { 35struct tcm_qla2xxx_tpg {
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 9fdcb561422f..85b012d2f89b 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,7 +13,8 @@ target_core_mod-y := target_core_configfs.o \
13 target_core_spc.o \ 13 target_core_spc.o \
14 target_core_ua.o \ 14 target_core_ua.o \
15 target_core_rd.o \ 15 target_core_rd.o \
16 target_core_stat.o 16 target_core_stat.o \
17 target_core_xcopy.o
17 18
18obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 19obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
19 20
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3a179302b904..35b61f7d6c63 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver. 2 * This file contains main functions related to the iSCSI Target Core Driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -63,7 +61,6 @@ spinlock_t sess_idr_lock;
63 61
64struct iscsit_global *iscsit_global; 62struct iscsit_global *iscsit_global;
65 63
66struct kmem_cache *lio_cmd_cache;
67struct kmem_cache *lio_qr_cache; 64struct kmem_cache *lio_qr_cache;
68struct kmem_cache *lio_dr_cache; 65struct kmem_cache *lio_dr_cache;
69struct kmem_cache *lio_ooo_cache; 66struct kmem_cache *lio_ooo_cache;
@@ -220,11 +217,6 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
220 spin_unlock_bh(&np->np_thread_lock); 217 spin_unlock_bh(&np->np_thread_lock);
221 return -1; 218 return -1;
222 } 219 }
223 if (np->np_login_tpg) {
224 pr_err("np->np_login_tpg() is not NULL!\n");
225 spin_unlock_bh(&np->np_thread_lock);
226 return -1;
227 }
228 spin_unlock_bh(&np->np_thread_lock); 220 spin_unlock_bh(&np->np_thread_lock);
229 /* 221 /*
230 * Determine if the portal group is accepting storage traffic. 222 * Determine if the portal group is accepting storage traffic.
@@ -239,26 +231,38 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
239 /* 231 /*
240 * Here we serialize access across the TIQN+TPG Tuple. 232 * Here we serialize access across the TIQN+TPG Tuple.
241 */ 233 */
242 ret = mutex_lock_interruptible(&tpg->np_login_lock); 234 ret = down_interruptible(&tpg->np_login_sem);
243 if ((ret != 0) || signal_pending(current)) 235 if ((ret != 0) || signal_pending(current))
244 return -1; 236 return -1;
245 237
246 spin_lock_bh(&np->np_thread_lock); 238 spin_lock_bh(&tpg->tpg_state_lock);
247 np->np_login_tpg = tpg; 239 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
248 spin_unlock_bh(&np->np_thread_lock); 240 spin_unlock_bh(&tpg->tpg_state_lock);
241 up(&tpg->np_login_sem);
242 return -1;
243 }
244 spin_unlock_bh(&tpg->tpg_state_lock);
249 245
250 return 0; 246 return 0;
251} 247}
252 248
253int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 249void iscsit_login_kref_put(struct kref *kref)
250{
251 struct iscsi_tpg_np *tpg_np = container_of(kref,
252 struct iscsi_tpg_np, tpg_np_kref);
253
254 complete(&tpg_np->tpg_np_comp);
255}
256
257int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
258 struct iscsi_tpg_np *tpg_np)
254{ 259{
255 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 260 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
256 261
257 spin_lock_bh(&np->np_thread_lock); 262 up(&tpg->np_login_sem);
258 np->np_login_tpg = NULL;
259 spin_unlock_bh(&np->np_thread_lock);
260 263
261 mutex_unlock(&tpg->np_login_lock); 264 if (tpg_np)
265 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
262 266
263 if (tiqn) 267 if (tiqn)
264 iscsit_put_tiqn_for_login(tiqn); 268 iscsit_put_tiqn_for_login(tiqn);
@@ -410,20 +414,10 @@ struct iscsi_np *iscsit_add_np(
410int iscsit_reset_np_thread( 414int iscsit_reset_np_thread(
411 struct iscsi_np *np, 415 struct iscsi_np *np,
412 struct iscsi_tpg_np *tpg_np, 416 struct iscsi_tpg_np *tpg_np,
413 struct iscsi_portal_group *tpg) 417 struct iscsi_portal_group *tpg,
418 bool shutdown)
414{ 419{
415 spin_lock_bh(&np->np_thread_lock); 420 spin_lock_bh(&np->np_thread_lock);
416 if (tpg && tpg_np) {
417 /*
418 * The reset operation need only be performed when the
419 * passed struct iscsi_portal_group has a login in progress
420 * to one of the network portals.
421 */
422 if (tpg_np->tpg_np->np_login_tpg != tpg) {
423 spin_unlock_bh(&np->np_thread_lock);
424 return 0;
425 }
426 }
427 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 421 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
428 spin_unlock_bh(&np->np_thread_lock); 422 spin_unlock_bh(&np->np_thread_lock);
429 return 0; 423 return 0;
@@ -438,6 +432,12 @@ int iscsit_reset_np_thread(
438 } 432 }
439 spin_unlock_bh(&np->np_thread_lock); 433 spin_unlock_bh(&np->np_thread_lock);
440 434
435 if (tpg_np && shutdown) {
436 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
437
438 wait_for_completion(&tpg_np->tpg_np_comp);
439 }
440
441 return 0; 441 return 0;
442} 442}
443 443
@@ -497,7 +497,6 @@ static struct iscsit_transport iscsi_target_transport = {
497 .iscsit_setup_np = iscsit_setup_np, 497 .iscsit_setup_np = iscsit_setup_np,
498 .iscsit_accept_np = iscsit_accept_np, 498 .iscsit_accept_np = iscsit_accept_np,
499 .iscsit_free_np = iscsit_free_np, 499 .iscsit_free_np = iscsit_free_np,
500 .iscsit_alloc_cmd = iscsit_alloc_cmd,
501 .iscsit_get_login_rx = iscsit_get_login_rx, 500 .iscsit_get_login_rx = iscsit_get_login_rx,
502 .iscsit_put_login_tx = iscsit_put_login_tx, 501 .iscsit_put_login_tx = iscsit_put_login_tx,
503 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, 502 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
@@ -538,22 +537,13 @@ static int __init iscsi_target_init_module(void)
538 goto ts_out1; 537 goto ts_out1;
539 } 538 }
540 539
541 lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
542 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
543 0, NULL);
544 if (!lio_cmd_cache) {
545 pr_err("Unable to kmem_cache_create() for"
546 " lio_cmd_cache\n");
547 goto ts_out2;
548 }
549
550 lio_qr_cache = kmem_cache_create("lio_qr_cache", 540 lio_qr_cache = kmem_cache_create("lio_qr_cache",
551 sizeof(struct iscsi_queue_req), 541 sizeof(struct iscsi_queue_req),
552 __alignof__(struct iscsi_queue_req), 0, NULL); 542 __alignof__(struct iscsi_queue_req), 0, NULL);
553 if (!lio_qr_cache) { 543 if (!lio_qr_cache) {
554 pr_err("nable to kmem_cache_create() for" 544 pr_err("nable to kmem_cache_create() for"
555 " lio_qr_cache\n"); 545 " lio_qr_cache\n");
556 goto cmd_out; 546 goto ts_out2;
557 } 547 }
558 548
559 lio_dr_cache = kmem_cache_create("lio_dr_cache", 549 lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -597,8 +587,6 @@ dr_out:
597 kmem_cache_destroy(lio_dr_cache); 587 kmem_cache_destroy(lio_dr_cache);
598qr_out: 588qr_out:
599 kmem_cache_destroy(lio_qr_cache); 589 kmem_cache_destroy(lio_qr_cache);
600cmd_out:
601 kmem_cache_destroy(lio_cmd_cache);
602ts_out2: 590ts_out2:
603 iscsi_deallocate_thread_sets(); 591 iscsi_deallocate_thread_sets();
604ts_out1: 592ts_out1:
@@ -616,7 +604,6 @@ static void __exit iscsi_target_cleanup_module(void)
616 iscsi_thread_set_free(); 604 iscsi_thread_set_free();
617 iscsit_release_discovery_tpg(); 605 iscsit_release_discovery_tpg();
618 iscsit_unregister_transport(&iscsi_target_transport); 606 iscsit_unregister_transport(&iscsi_target_transport);
619 kmem_cache_destroy(lio_cmd_cache);
620 kmem_cache_destroy(lio_qr_cache); 607 kmem_cache_destroy(lio_qr_cache);
621 kmem_cache_destroy(lio_dr_cache); 608 kmem_cache_destroy(lio_dr_cache);
622 kmem_cache_destroy(lio_ooo_cache); 609 kmem_cache_destroy(lio_ooo_cache);
@@ -3447,12 +3434,10 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3447 bool inaddr_any = iscsit_check_inaddr_any(np); 3434 bool inaddr_any = iscsit_check_inaddr_any(np);
3448 3435
3449 len = sprintf(buf, "TargetAddress=" 3436 len = sprintf(buf, "TargetAddress="
3450 "%s%s%s:%hu,%hu", 3437 "%s:%hu,%hu",
3451 (np->np_sockaddr.ss_family == AF_INET6) ? 3438 (inaddr_any == false) ?
3452 "[" : "", (inaddr_any == false) ?
3453 np->np_ip : conn->local_ip, 3439 np->np_ip : conn->local_ip,
3454 (np->np_sockaddr.ss_family == AF_INET6) ? 3440 (inaddr_any == false) ?
3455 "]" : "", (inaddr_any == false) ?
3456 np->np_port : conn->local_port, 3441 np->np_port : conn->local_port,
3457 tpg->tpgt); 3442 tpg->tpgt);
3458 len += 1; 3443 len += 1;
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 2c437cb8ca00..e936d56fb523 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -7,13 +7,15 @@ extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
7extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); 7extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
8extern void iscsit_del_tiqn(struct iscsi_tiqn *); 8extern void iscsit_del_tiqn(struct iscsi_tiqn *);
9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *); 9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *); 10extern void iscsit_login_kref_put(struct kref *);
11extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
12 struct iscsi_tpg_np *);
11extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *, 13extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
12 struct iscsi_np *, int); 14 struct iscsi_np *, int);
13extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *, 15extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
14 char *, int); 16 char *, int);
15extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, 17extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
16 struct iscsi_portal_group *); 18 struct iscsi_portal_group *, bool);
17extern int iscsit_del_np(struct iscsi_np *); 19extern int iscsit_del_np(struct iscsi_np *);
18extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *); 20extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
19extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); 21extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
@@ -37,7 +39,6 @@ extern struct target_fabric_configfs *lio_target_fabric_configfs;
37 39
38extern struct kmem_cache *lio_dr_cache; 40extern struct kmem_cache *lio_dr_cache;
39extern struct kmem_cache *lio_ooo_cache; 41extern struct kmem_cache *lio_ooo_cache;
40extern struct kmem_cache *lio_cmd_cache;
41extern struct kmem_cache *lio_qr_cache; 42extern struct kmem_cache *lio_qr_cache;
42extern struct kmem_cache *lio_r2t_cache; 43extern struct kmem_cache *lio_r2t_cache;
43 44
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index cee17543278c..7505fddca15f 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file houses the main functions for the iSCSI CHAP support 2 * This file houses the main functions for the iSCSI CHAP support
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bbfd28893164..fd145259361d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -2,9 +2,7 @@
2 * This file contains the configfs implementation for iSCSI Target mode 2 * This file contains the configfs implementation for iSCSI Target mode
3 * from the LIO-Target Project. 3 * from the LIO-Target Project.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
@@ -265,9 +263,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
265 *port_str = '\0'; /* Terminate string for IP */ 263 *port_str = '\0'; /* Terminate string for IP */
266 port_str++; /* Skip over ":" */ 264 port_str++; /* Skip over ":" */
267 265
268 ret = strict_strtoul(port_str, 0, &port); 266 ret = kstrtoul(port_str, 0, &port);
269 if (ret < 0) { 267 if (ret < 0) {
270 pr_err("strict_strtoul() failed for port_str: %d\n", ret); 268 pr_err("kstrtoul() failed for port_str: %d\n", ret);
271 return ERR_PTR(ret); 269 return ERR_PTR(ret);
272 } 270 }
273 sock_in6 = (struct sockaddr_in6 *)&sockaddr; 271 sock_in6 = (struct sockaddr_in6 *)&sockaddr;
@@ -290,9 +288,9 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
290 *port_str = '\0'; /* Terminate string for IP */ 288 *port_str = '\0'; /* Terminate string for IP */
291 port_str++; /* Skip over ":" */ 289 port_str++; /* Skip over ":" */
292 290
293 ret = strict_strtoul(port_str, 0, &port); 291 ret = kstrtoul(port_str, 0, &port);
294 if (ret < 0) { 292 if (ret < 0) {
295 pr_err("strict_strtoul() failed for port_str: %d\n", ret); 293 pr_err("kstrtoul() failed for port_str: %d\n", ret);
296 return ERR_PTR(ret); 294 return ERR_PTR(ret);
297 } 295 }
298 sock_in = (struct sockaddr_in *)&sockaddr; 296 sock_in = (struct sockaddr_in *)&sockaddr;
@@ -1481,7 +1479,7 @@ static ssize_t lio_target_wwn_show_attr_lio_version(
1481 struct target_fabric_configfs *tf, 1479 struct target_fabric_configfs *tf,
1482 char *page) 1480 char *page)
1483{ 1481{
1484 return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n"); 1482 return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
1485} 1483}
1486 1484
1487TF_WWN_ATTR_RO(lio_target, lio_version); 1485TF_WWN_ATTR_RO(lio_target, lio_version);
@@ -1925,7 +1923,7 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
1925 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1923 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1926 1924
1927 pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd); 1925 pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
1928 cmd->release_cmd(cmd); 1926 iscsit_release_cmd(cmd);
1929} 1927}
1930 1928
1931/* End functions for target_core_fabric_ops */ 1929/* End functions for target_core_fabric_ops */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 4f77a78edef9..9a5721b8ff96 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -9,7 +9,7 @@
9#include <scsi/iscsi_proto.h> 9#include <scsi/iscsi_proto.h>
10#include <target/target_core_base.h> 10#include <target/target_core_base.h>
11 11
12#define ISCSIT_VERSION "v4.1.0-rc2" 12#define ISCSIT_VERSION "v4.1.0"
13#define ISCSI_MAX_DATASN_MISSING_COUNT 16 13#define ISCSI_MAX_DATASN_MISSING_COUNT 16
14#define ISCSI_TX_THREAD_TCP_TIMEOUT 2 14#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
15#define ISCSI_RX_THREAD_TCP_TIMEOUT 2 15#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
@@ -17,6 +17,9 @@
17#define SECONDS_FOR_ASYNC_TEXT 10 17#define SECONDS_FOR_ASYNC_TEXT 10
18#define SECONDS_FOR_LOGOUT_COMP 15 18#define SECONDS_FOR_LOGOUT_COMP 15
19#define WHITE_SPACE " \t\v\f\n\r" 19#define WHITE_SPACE " \t\v\f\n\r"
20#define ISCSIT_MIN_TAGS 16
21#define ISCSIT_EXTRA_TAGS 8
22#define ISCSIT_TCP_BACKLOG 256
20 23
21/* struct iscsi_node_attrib sanity values */ 24/* struct iscsi_node_attrib sanity values */
22#define NA_DATAOUT_TIMEOUT 3 25#define NA_DATAOUT_TIMEOUT 3
@@ -47,7 +50,7 @@
47#define TA_NETIF_TIMEOUT_MAX 15 50#define TA_NETIF_TIMEOUT_MAX 15
48#define TA_NETIF_TIMEOUT_MIN 2 51#define TA_NETIF_TIMEOUT_MIN 2
49#define TA_GENERATE_NODE_ACLS 0 52#define TA_GENERATE_NODE_ACLS 0
50#define TA_DEFAULT_CMDSN_DEPTH 16 53#define TA_DEFAULT_CMDSN_DEPTH 64
51#define TA_DEFAULT_CMDSN_DEPTH_MAX 512 54#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
52#define TA_DEFAULT_CMDSN_DEPTH_MIN 1 55#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
53#define TA_CACHE_DYNAMIC_ACLS 0 56#define TA_CACHE_DYNAMIC_ACLS 0
@@ -489,7 +492,6 @@ struct iscsi_cmd {
489 u32 first_data_sg_off; 492 u32 first_data_sg_off;
490 u32 kmapped_nents; 493 u32 kmapped_nents;
491 sense_reason_t sense_reason; 494 sense_reason_t sense_reason;
492 void (*release_cmd)(struct iscsi_cmd *);
493} ____cacheline_aligned; 495} ____cacheline_aligned;
494 496
495struct iscsi_tmr_req { 497struct iscsi_tmr_req {
@@ -554,9 +556,19 @@ struct iscsi_conn {
554 struct completion rx_half_close_comp; 556 struct completion rx_half_close_comp;
555 /* socket used by this connection */ 557 /* socket used by this connection */
556 struct socket *sock; 558 struct socket *sock;
559 void (*orig_data_ready)(struct sock *, int);
560 void (*orig_state_change)(struct sock *);
561#define LOGIN_FLAGS_READ_ACTIVE 1
562#define LOGIN_FLAGS_CLOSED 2
563#define LOGIN_FLAGS_READY 4
564 unsigned long login_flags;
565 struct delayed_work login_work;
566 struct delayed_work login_cleanup_work;
567 struct iscsi_login *login;
557 struct timer_list nopin_timer; 568 struct timer_list nopin_timer;
558 struct timer_list nopin_response_timer; 569 struct timer_list nopin_response_timer;
559 struct timer_list transport_timer; 570 struct timer_list transport_timer;
571 struct task_struct *login_kworker;
560 /* Spinlock used for add/deleting cmd's from conn_cmd_list */ 572 /* Spinlock used for add/deleting cmd's from conn_cmd_list */
561 spinlock_t cmd_lock; 573 spinlock_t cmd_lock;
562 spinlock_t conn_usage_lock; 574 spinlock_t conn_usage_lock;
@@ -584,6 +596,7 @@ struct iscsi_conn {
584 void *context; 596 void *context;
585 struct iscsi_login_thread_s *login_thread; 597 struct iscsi_login_thread_s *login_thread;
586 struct iscsi_portal_group *tpg; 598 struct iscsi_portal_group *tpg;
599 struct iscsi_tpg_np *tpg_np;
587 /* Pointer to parent session */ 600 /* Pointer to parent session */
588 struct iscsi_session *sess; 601 struct iscsi_session *sess;
589 /* Pointer to thread_set in use for this conn's threads */ 602 /* Pointer to thread_set in use for this conn's threads */
@@ -682,6 +695,7 @@ struct iscsi_login {
682 u8 version_max; 695 u8 version_max;
683 u8 login_complete; 696 u8 login_complete;
684 u8 login_failed; 697 u8 login_failed;
698 bool zero_tsih;
685 char isid[6]; 699 char isid[6];
686 u32 cmd_sn; 700 u32 cmd_sn;
687 itt_t init_task_tag; 701 itt_t init_task_tag;
@@ -694,6 +708,7 @@ struct iscsi_login {
694 char *req_buf; 708 char *req_buf;
695 char *rsp_buf; 709 char *rsp_buf;
696 struct iscsi_conn *conn; 710 struct iscsi_conn *conn;
711 struct iscsi_np *np;
697} ____cacheline_aligned; 712} ____cacheline_aligned;
698 713
699struct iscsi_node_attrib { 714struct iscsi_node_attrib {
@@ -773,7 +788,6 @@ struct iscsi_np {
773 struct __kernel_sockaddr_storage np_sockaddr; 788 struct __kernel_sockaddr_storage np_sockaddr;
774 struct task_struct *np_thread; 789 struct task_struct *np_thread;
775 struct timer_list np_login_timer; 790 struct timer_list np_login_timer;
776 struct iscsi_portal_group *np_login_tpg;
777 void *np_context; 791 void *np_context;
778 struct iscsit_transport *np_transport; 792 struct iscsit_transport *np_transport;
779 struct list_head np_list; 793 struct list_head np_list;
@@ -788,6 +802,8 @@ struct iscsi_tpg_np {
788 struct list_head tpg_np_parent_list; 802 struct list_head tpg_np_parent_list;
789 struct se_tpg_np se_tpg_np; 803 struct se_tpg_np se_tpg_np;
790 spinlock_t tpg_np_parent_lock; 804 spinlock_t tpg_np_parent_lock;
805 struct completion tpg_np_comp;
806 struct kref tpg_np_kref;
791}; 807};
792 808
793struct iscsi_portal_group { 809struct iscsi_portal_group {
@@ -809,7 +825,7 @@ struct iscsi_portal_group {
809 spinlock_t tpg_state_lock; 825 spinlock_t tpg_state_lock;
810 struct se_portal_group tpg_se_tpg; 826 struct se_portal_group tpg_se_tpg;
811 struct mutex tpg_access_lock; 827 struct mutex tpg_access_lock;
812 struct mutex np_login_lock; 828 struct semaphore np_login_sem;
813 struct iscsi_tpg_attrib tpg_attrib; 829 struct iscsi_tpg_attrib tpg_attrib;
814 struct iscsi_node_auth tpg_demo_auth; 830 struct iscsi_node_auth tpg_demo_auth;
815 /* Pointer to default list of iSCSI parameters for TPG */ 831 /* Pointer to default list of iSCSI parameters for TPG */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 848fee768948..e93d5a7a3f81 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the iSCSI Target DataIN value generation functions. 2 * This file contains the iSCSI Target DataIN value generation functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 1b74033510a0..6c7a5104a4cd 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -2,9 +2,7 @@
2 * This file contains the iSCSI Virtual Device and Disk Transport 2 * This file contains the iSCSI Virtual Device and Disk Transport
3 * agnostic related functions. 3 * agnostic related functions.
4 * 4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 08bd87833321..41052e512d92 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -2,9 +2,7 @@
2 * This file contains error recovery level zero functions used by 2 * This file contains error recovery level zero functions used by
3 * the iSCSI Target driver. 3 * the iSCSI Target driver.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 586c268679a4..e048d6439f4a 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains error recovery level one used by the iSCSI Target driver. 2 * This file contains error recovery level one used by the iSCSI Target driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 45a5afd5ea13..33be1fb1df32 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -2,9 +2,7 @@
2 * This file contains error recovery level two functions used by 2 * This file contains error recovery level two functions used by
3 * the iSCSI Target driver. 3 * the iSCSI Target driver.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bc788c52b6cc..1794c753954a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the login functions used by the iSCSI Target driver. 2 * This file contains the login functions used by the iSCSI Target driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -50,6 +48,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
50 pr_err("Unable to allocate memory for struct iscsi_login.\n"); 48 pr_err("Unable to allocate memory for struct iscsi_login.\n");
51 return NULL; 49 return NULL;
52 } 50 }
51 conn->login = login;
53 login->conn = conn; 52 login->conn = conn;
54 login->first_request = 1; 53 login->first_request = 1;
55 54
@@ -428,7 +427,7 @@ static int iscsi_login_zero_tsih_s2(
428 ISCSI_LOGIN_STATUS_NO_RESOURCES); 427 ISCSI_LOGIN_STATUS_NO_RESOURCES);
429 return -1; 428 return -1;
430 } 429 }
431 rc = strict_strtoul(param->value, 0, &mrdsl); 430 rc = kstrtoul(param->value, 0, &mrdsl);
432 if (rc < 0) { 431 if (rc < 0) {
433 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 432 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
434 ISCSI_LOGIN_STATUS_NO_RESOURCES); 433 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -684,7 +683,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
684 iscsit_start_nopin_timer(conn); 683 iscsit_start_nopin_timer(conn);
685} 684}
686 685
687static int iscsi_post_login_handler( 686int iscsi_post_login_handler(
688 struct iscsi_np *np, 687 struct iscsi_np *np,
689 struct iscsi_conn *conn, 688 struct iscsi_conn *conn,
690 u8 zero_tsih) 689 u8 zero_tsih)
@@ -872,7 +871,7 @@ int iscsit_setup_np(
872 struct __kernel_sockaddr_storage *sockaddr) 871 struct __kernel_sockaddr_storage *sockaddr)
873{ 872{
874 struct socket *sock = NULL; 873 struct socket *sock = NULL;
875 int backlog = 5, ret, opt = 0, len; 874 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
876 875
877 switch (np->np_network_transport) { 876 switch (np->np_network_transport) {
878 case ISCSI_TCP: 877 case ISCSI_TCP:
@@ -1007,16 +1006,24 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
1007 rc = conn->sock->ops->getname(conn->sock, 1006 rc = conn->sock->ops->getname(conn->sock,
1008 (struct sockaddr *)&sock_in6, &err, 1); 1007 (struct sockaddr *)&sock_in6, &err, 1);
1009 if (!rc) { 1008 if (!rc) {
1010 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1009 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
1011 &sock_in6.sin6_addr.in6_u); 1010 snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]",
1011 &sock_in6.sin6_addr.in6_u);
1012 else
1013 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4",
1014 &sock_in6.sin6_addr.s6_addr32[3]);
1012 conn->login_port = ntohs(sock_in6.sin6_port); 1015 conn->login_port = ntohs(sock_in6.sin6_port);
1013 } 1016 }
1014 1017
1015 rc = conn->sock->ops->getname(conn->sock, 1018 rc = conn->sock->ops->getname(conn->sock,
1016 (struct sockaddr *)&sock_in6, &err, 0); 1019 (struct sockaddr *)&sock_in6, &err, 0);
1017 if (!rc) { 1020 if (!rc) {
1018 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", 1021 if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
1019 &sock_in6.sin6_addr.in6_u); 1022 snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]",
1023 &sock_in6.sin6_addr.in6_u);
1024 else
1025 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4",
1026 &sock_in6.sin6_addr.s6_addr32[3]);
1020 conn->local_port = ntohs(sock_in6.sin6_port); 1027 conn->local_port = ntohs(sock_in6.sin6_port);
1021 } 1028 }
1022 } else { 1029 } else {
@@ -1116,6 +1123,77 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
1116 return 0; 1123 return 0;
1117} 1124}
1118 1125
1126void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1127 struct iscsi_np *np, bool zero_tsih, bool new_sess)
1128{
1129 if (new_sess == false)
1130 goto old_sess_out;
1131
1132 pr_err("iSCSI Login negotiation failed.\n");
1133 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
1134 ISCSI_LOGIN_STATUS_INIT_ERR);
1135 if (!zero_tsih || !conn->sess)
1136 goto old_sess_out;
1137 if (conn->sess->se_sess)
1138 transport_free_session(conn->sess->se_sess);
1139 if (conn->sess->session_index != 0) {
1140 spin_lock_bh(&sess_idr_lock);
1141 idr_remove(&sess_idr, conn->sess->session_index);
1142 spin_unlock_bh(&sess_idr_lock);
1143 }
1144 kfree(conn->sess->sess_ops);
1145 kfree(conn->sess);
1146
1147old_sess_out:
1148 iscsi_stop_login_thread_timer(np);
1149 /*
1150 * If login negotiation fails check if the Time2Retain timer
1151 * needs to be restarted.
1152 */
1153 if (!zero_tsih && conn->sess) {
1154 spin_lock_bh(&conn->sess->conn_lock);
1155 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1156 struct se_portal_group *se_tpg =
1157 &ISCSI_TPG_C(conn)->tpg_se_tpg;
1158
1159 atomic_set(&conn->sess->session_continuation, 0);
1160 spin_unlock_bh(&conn->sess->conn_lock);
1161 spin_lock_bh(&se_tpg->session_lock);
1162 iscsit_start_time2retain_handler(conn->sess);
1163 spin_unlock_bh(&se_tpg->session_lock);
1164 } else
1165 spin_unlock_bh(&conn->sess->conn_lock);
1166 iscsit_dec_session_usage_count(conn->sess);
1167 }
1168
1169 if (!IS_ERR(conn->conn_rx_hash.tfm))
1170 crypto_free_hash(conn->conn_rx_hash.tfm);
1171 if (!IS_ERR(conn->conn_tx_hash.tfm))
1172 crypto_free_hash(conn->conn_tx_hash.tfm);
1173
1174 if (conn->conn_cpumask)
1175 free_cpumask_var(conn->conn_cpumask);
1176
1177 kfree(conn->conn_ops);
1178
1179 if (conn->param_list) {
1180 iscsi_release_param_list(conn->param_list);
1181 conn->param_list = NULL;
1182 }
1183 iscsi_target_nego_release(conn);
1184
1185 if (conn->sock) {
1186 sock_release(conn->sock);
1187 conn->sock = NULL;
1188 }
1189
1190 if (conn->conn_transport->iscsit_free_conn)
1191 conn->conn_transport->iscsit_free_conn(conn);
1192
1193 iscsit_put_transport(conn->conn_transport);
1194 kfree(conn);
1195}
1196
1119static int __iscsi_target_login_thread(struct iscsi_np *np) 1197static int __iscsi_target_login_thread(struct iscsi_np *np)
1120{ 1198{
1121 u8 *buffer, zero_tsih = 0; 1199 u8 *buffer, zero_tsih = 0;
@@ -1124,6 +1202,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1124 struct iscsi_login *login; 1202 struct iscsi_login *login;
1125 struct iscsi_portal_group *tpg = NULL; 1203 struct iscsi_portal_group *tpg = NULL;
1126 struct iscsi_login_req *pdu; 1204 struct iscsi_login_req *pdu;
1205 struct iscsi_tpg_np *tpg_np;
1206 bool new_sess = false;
1127 1207
1128 flush_signals(current); 1208 flush_signals(current);
1129 1209
@@ -1264,6 +1344,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1264 tpg = conn->tpg; 1344 tpg = conn->tpg;
1265 goto new_sess_out; 1345 goto new_sess_out;
1266 } 1346 }
1347 login->zero_tsih = zero_tsih;
1267 1348
1268 tpg = conn->tpg; 1349 tpg = conn->tpg;
1269 if (!tpg) { 1350 if (!tpg) {
@@ -1279,7 +1360,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1279 goto old_sess_out; 1360 goto old_sess_out;
1280 } 1361 }
1281 1362
1282 if (iscsi_target_start_negotiation(login, conn) < 0) 1363 ret = iscsi_target_start_negotiation(login, conn);
1364 if (ret < 0)
1283 goto new_sess_out; 1365 goto new_sess_out;
1284 1366
1285 if (!conn->sess) { 1367 if (!conn->sess) {
@@ -1292,84 +1374,32 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1292 if (signal_pending(current)) 1374 if (signal_pending(current))
1293 goto new_sess_out; 1375 goto new_sess_out;
1294 1376
1295 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1377 if (ret == 1) {
1378 tpg_np = conn->tpg_np;
1296 1379
1297 if (ret < 0) 1380 ret = iscsi_post_login_handler(np, conn, zero_tsih);
1298 goto new_sess_out; 1381 if (ret < 0)
1382 goto new_sess_out;
1383
1384 iscsit_deaccess_np(np, tpg, tpg_np);
1385 }
1299 1386
1300 iscsit_deaccess_np(np, tpg);
1301 tpg = NULL; 1387 tpg = NULL;
1388 tpg_np = NULL;
1302 /* Get another socket */ 1389 /* Get another socket */
1303 return 1; 1390 return 1;
1304 1391
1305new_sess_out: 1392new_sess_out:
1306 pr_err("iSCSI Login negotiation failed.\n"); 1393 new_sess = true;
1307 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
1308 ISCSI_LOGIN_STATUS_INIT_ERR);
1309 if (!zero_tsih || !conn->sess)
1310 goto old_sess_out;
1311 if (conn->sess->se_sess)
1312 transport_free_session(conn->sess->se_sess);
1313 if (conn->sess->session_index != 0) {
1314 spin_lock_bh(&sess_idr_lock);
1315 idr_remove(&sess_idr, conn->sess->session_index);
1316 spin_unlock_bh(&sess_idr_lock);
1317 }
1318 kfree(conn->sess->sess_ops);
1319 kfree(conn->sess);
1320old_sess_out: 1394old_sess_out:
1321 iscsi_stop_login_thread_timer(np); 1395 tpg_np = conn->tpg_np;
1322 /* 1396 iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
1323 * If login negotiation fails check if the Time2Retain timer 1397 new_sess = false;
1324 * needs to be restarted.
1325 */
1326 if (!zero_tsih && conn->sess) {
1327 spin_lock_bh(&conn->sess->conn_lock);
1328 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1329 struct se_portal_group *se_tpg =
1330 &ISCSI_TPG_C(conn)->tpg_se_tpg;
1331
1332 atomic_set(&conn->sess->session_continuation, 0);
1333 spin_unlock_bh(&conn->sess->conn_lock);
1334 spin_lock_bh(&se_tpg->session_lock);
1335 iscsit_start_time2retain_handler(conn->sess);
1336 spin_unlock_bh(&se_tpg->session_lock);
1337 } else
1338 spin_unlock_bh(&conn->sess->conn_lock);
1339 iscsit_dec_session_usage_count(conn->sess);
1340 }
1341
1342 if (!IS_ERR(conn->conn_rx_hash.tfm))
1343 crypto_free_hash(conn->conn_rx_hash.tfm);
1344 if (!IS_ERR(conn->conn_tx_hash.tfm))
1345 crypto_free_hash(conn->conn_tx_hash.tfm);
1346
1347 if (conn->conn_cpumask)
1348 free_cpumask_var(conn->conn_cpumask);
1349
1350 kfree(conn->conn_ops);
1351
1352 if (conn->param_list) {
1353 iscsi_release_param_list(conn->param_list);
1354 conn->param_list = NULL;
1355 }
1356 iscsi_target_nego_release(conn);
1357
1358 if (conn->sock) {
1359 sock_release(conn->sock);
1360 conn->sock = NULL;
1361 }
1362
1363 if (conn->conn_transport->iscsit_free_conn)
1364 conn->conn_transport->iscsit_free_conn(conn);
1365
1366 iscsit_put_transport(conn->conn_transport);
1367
1368 kfree(conn);
1369 1398
1370 if (tpg) { 1399 if (tpg) {
1371 iscsit_deaccess_np(np, tpg); 1400 iscsit_deaccess_np(np, tpg, tpg_np);
1372 tpg = NULL; 1401 tpg = NULL;
1402 tpg_np = NULL;
1373 } 1403 }
1374 1404
1375out: 1405out:
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 63efd2878451..29d098324b7f 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,6 +12,9 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool);
15extern int iscsi_target_login_thread(void *); 18extern int iscsi_target_login_thread(void *);
16extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); 19extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
17 20
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index c4675b4ceb49..14d1aed5af1d 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation. 2 * This file contains main functions related to iSCSI Parameter negotiation.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -377,15 +375,284 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
377 return 0; 375 return 0;
378} 376}
379 377
380static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 378static void iscsi_target_sk_data_ready(struct sock *sk, int count)
381{ 379{
382 if (iscsi_target_do_tx_login_io(conn, login) < 0) 380 struct iscsi_conn *conn = sk->sk_user_data;
383 return -1; 381 bool rc;
384 382
385 if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0) 383 pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
386 return -1;
387 384
388 return 0; 385 write_lock_bh(&sk->sk_callback_lock);
386 if (!sk->sk_user_data) {
387 write_unlock_bh(&sk->sk_callback_lock);
388 return;
389 }
390 if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
391 write_unlock_bh(&sk->sk_callback_lock);
392 pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
393 return;
394 }
395 if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
396 write_unlock_bh(&sk->sk_callback_lock);
397 pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
398 return;
399 }
400 if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
401 write_unlock_bh(&sk->sk_callback_lock);
402 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
403 return;
404 }
405
406 rc = schedule_delayed_work(&conn->login_work, 0);
407 if (rc == false) {
408 pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
409 " got false\n");
410 }
411 write_unlock_bh(&sk->sk_callback_lock);
412}
413
414static void iscsi_target_sk_state_change(struct sock *);
415
416static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
417{
418 struct sock *sk;
419
420 if (!conn->sock)
421 return;
422
423 sk = conn->sock->sk;
424 pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
425
426 write_lock_bh(&sk->sk_callback_lock);
427 sk->sk_user_data = conn;
428 conn->orig_data_ready = sk->sk_data_ready;
429 conn->orig_state_change = sk->sk_state_change;
430 sk->sk_data_ready = iscsi_target_sk_data_ready;
431 sk->sk_state_change = iscsi_target_sk_state_change;
432 write_unlock_bh(&sk->sk_callback_lock);
433
434 sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
435 sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
436}
437
438static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
439{
440 struct sock *sk;
441
442 if (!conn->sock)
443 return;
444
445 sk = conn->sock->sk;
446 pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
447
448 write_lock_bh(&sk->sk_callback_lock);
449 if (!sk->sk_user_data) {
450 write_unlock_bh(&sk->sk_callback_lock);
451 return;
452 }
453 sk->sk_user_data = NULL;
454 sk->sk_data_ready = conn->orig_data_ready;
455 sk->sk_state_change = conn->orig_state_change;
456 write_unlock_bh(&sk->sk_callback_lock);
457
458 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
459 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
460}
461
462static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
463
464static bool iscsi_target_sk_state_check(struct sock *sk)
465{
466 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
467 pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
468 "returning FALSE\n");
469 return false;
470 }
471 return true;
472}
473
474static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
475{
476 struct iscsi_np *np = login->np;
477 bool zero_tsih = login->zero_tsih;
478
479 iscsi_remove_failed_auth_entry(conn);
480 iscsi_target_nego_release(conn);
481 iscsi_target_login_sess_out(conn, np, zero_tsih, true);
482}
483
484static void iscsi_target_login_timeout(unsigned long data)
485{
486 struct iscsi_conn *conn = (struct iscsi_conn *)data;
487
488 pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
489
490 if (conn->login_kworker) {
491 pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
492 conn->login_kworker->comm, conn->login_kworker->pid);
493 send_sig(SIGINT, conn->login_kworker, 1);
494 }
495}
496
497static void iscsi_target_do_login_rx(struct work_struct *work)
498{
499 struct iscsi_conn *conn = container_of(work,
500 struct iscsi_conn, login_work.work);
501 struct iscsi_login *login = conn->login;
502 struct iscsi_np *np = login->np;
503 struct iscsi_portal_group *tpg = conn->tpg;
504 struct iscsi_tpg_np *tpg_np = conn->tpg_np;
505 struct timer_list login_timer;
506 int rc, zero_tsih = login->zero_tsih;
507 bool state;
508
509 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
510 conn, current->comm, current->pid);
511
512 spin_lock(&tpg->tpg_state_lock);
513 state = (tpg->tpg_state == TPG_STATE_ACTIVE);
514 spin_unlock(&tpg->tpg_state_lock);
515
516 if (state == false) {
517 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
518 iscsi_target_restore_sock_callbacks(conn);
519 iscsi_target_login_drop(conn, login);
520 iscsit_deaccess_np(np, tpg, tpg_np);
521 return;
522 }
523
524 if (conn->sock) {
525 struct sock *sk = conn->sock->sk;
526
527 read_lock_bh(&sk->sk_callback_lock);
528 state = iscsi_target_sk_state_check(sk);
529 read_unlock_bh(&sk->sk_callback_lock);
530
531 if (state == false) {
532 pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
533 iscsi_target_restore_sock_callbacks(conn);
534 iscsi_target_login_drop(conn, login);
535 iscsit_deaccess_np(np, tpg, tpg_np);
536 return;
537 }
538 }
539
540 conn->login_kworker = current;
541 allow_signal(SIGINT);
542
543 init_timer(&login_timer);
544 login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
545 login_timer.data = (unsigned long)conn;
546 login_timer.function = iscsi_target_login_timeout;
547 add_timer(&login_timer);
548 pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid);
549
550 rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
551 del_timer_sync(&login_timer);
552 flush_signals(current);
553 conn->login_kworker = NULL;
554
555 if (rc < 0) {
556 iscsi_target_restore_sock_callbacks(conn);
557 iscsi_target_login_drop(conn, login);
558 iscsit_deaccess_np(np, tpg, tpg_np);
559 return;
560 }
561
562 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
563 conn, current->comm, current->pid);
564
565 rc = iscsi_target_do_login(conn, login);
566 if (rc < 0) {
567 iscsi_target_restore_sock_callbacks(conn);
568 iscsi_target_login_drop(conn, login);
569 iscsit_deaccess_np(np, tpg, tpg_np);
570 } else if (!rc) {
571 if (conn->sock) {
572 struct sock *sk = conn->sock->sk;
573
574 write_lock_bh(&sk->sk_callback_lock);
575 clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
576 write_unlock_bh(&sk->sk_callback_lock);
577 }
578 } else if (rc == 1) {
579 iscsi_target_nego_release(conn);
580 iscsi_post_login_handler(np, conn, zero_tsih);
581 iscsit_deaccess_np(np, tpg, tpg_np);
582 }
583}
584
585static void iscsi_target_do_cleanup(struct work_struct *work)
586{
587 struct iscsi_conn *conn = container_of(work,
588 struct iscsi_conn, login_cleanup_work.work);
589 struct sock *sk = conn->sock->sk;
590 struct iscsi_login *login = conn->login;
591 struct iscsi_np *np = login->np;
592 struct iscsi_portal_group *tpg = conn->tpg;
593 struct iscsi_tpg_np *tpg_np = conn->tpg_np;
594
595 pr_debug("Entering iscsi_target_do_cleanup\n");
596
597 cancel_delayed_work_sync(&conn->login_work);
598 conn->orig_state_change(sk);
599
600 iscsi_target_restore_sock_callbacks(conn);
601 iscsi_target_login_drop(conn, login);
602 iscsit_deaccess_np(np, tpg, tpg_np);
603
604 pr_debug("iscsi_target_do_cleanup done()\n");
605}
606
607static void iscsi_target_sk_state_change(struct sock *sk)
608{
609 struct iscsi_conn *conn;
610 void (*orig_state_change)(struct sock *);
611 bool state;
612
613 pr_debug("Entering iscsi_target_sk_state_change\n");
614
615 write_lock_bh(&sk->sk_callback_lock);
616 conn = sk->sk_user_data;
617 if (!conn) {
618 write_unlock_bh(&sk->sk_callback_lock);
619 return;
620 }
621 orig_state_change = conn->orig_state_change;
622
623 if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
624 pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
625 conn);
626 write_unlock_bh(&sk->sk_callback_lock);
627 orig_state_change(sk);
628 return;
629 }
630 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
631 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
632 " conn: %p\n", conn);
633 write_unlock_bh(&sk->sk_callback_lock);
634 orig_state_change(sk);
635 return;
636 }
637 if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
638 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
639 conn);
640 write_unlock_bh(&sk->sk_callback_lock);
641 orig_state_change(sk);
642 return;
643 }
644
645 state = iscsi_target_sk_state_check(sk);
646 write_unlock_bh(&sk->sk_callback_lock);
647
648 pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
649
650 if (!state) {
651 pr_debug("iscsi_target_sk_state_change got failed state\n");
652 schedule_delayed_work(&conn->login_cleanup_work, 0);
653 return;
654 }
655 orig_state_change(sk);
389} 656}
390 657
391/* 658/*
@@ -643,10 +910,11 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
643 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 910 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
644 login->tsih = conn->sess->tsih; 911 login->tsih = conn->sess->tsih;
645 login->login_complete = 1; 912 login->login_complete = 1;
913 iscsi_target_restore_sock_callbacks(conn);
646 if (iscsi_target_do_tx_login_io(conn, 914 if (iscsi_target_do_tx_login_io(conn,
647 login) < 0) 915 login) < 0)
648 return -1; 916 return -1;
649 return 0; 917 return 1;
650 } 918 }
651 break; 919 break;
652 default: 920 default:
@@ -656,13 +924,29 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
656 break; 924 break;
657 } 925 }
658 926
659 if (iscsi_target_do_login_io(conn, login) < 0) 927 if (iscsi_target_do_tx_login_io(conn, login) < 0)
660 return -1; 928 return -1;
661 929
662 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 930 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
663 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT; 931 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
664 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 932 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
665 } 933 }
934 break;
935 }
936
937 if (conn->sock) {
938 struct sock *sk = conn->sock->sk;
939 bool state;
940
941 read_lock_bh(&sk->sk_callback_lock);
942 state = iscsi_target_sk_state_check(sk);
943 read_unlock_bh(&sk->sk_callback_lock);
944
945 if (!state) {
946 pr_debug("iscsi_target_do_login() failed state for"
947 " conn: %p\n", conn);
948 return -1;
949 }
666 } 950 }
667 951
668 return 0; 952 return 0;
@@ -695,9 +979,17 @@ int iscsi_target_locate_portal(
695 char *tmpbuf, *start = NULL, *end = NULL, *key, *value; 979 char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
696 struct iscsi_session *sess = conn->sess; 980 struct iscsi_session *sess = conn->sess;
697 struct iscsi_tiqn *tiqn; 981 struct iscsi_tiqn *tiqn;
982 struct iscsi_tpg_np *tpg_np = NULL;
698 struct iscsi_login_req *login_req; 983 struct iscsi_login_req *login_req;
699 u32 payload_length; 984 struct se_node_acl *se_nacl;
700 int sessiontype = 0, ret = 0; 985 u32 payload_length, queue_depth = 0;
986 int sessiontype = 0, ret = 0, tag_num, tag_size;
987
988 INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
989 INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
990 iscsi_target_set_sock_callbacks(conn);
991
992 login->np = np;
701 993
702 login_req = (struct iscsi_login_req *) login->req; 994 login_req = (struct iscsi_login_req *) login->req;
703 payload_length = ntoh24(login_req->dlength); 995 payload_length = ntoh24(login_req->dlength);
@@ -791,7 +1083,7 @@ int iscsi_target_locate_portal(
791 goto out; 1083 goto out;
792 } 1084 }
793 ret = 0; 1085 ret = 0;
794 goto out; 1086 goto alloc_tags;
795 } 1087 }
796 1088
797get_target: 1089get_target:
@@ -822,7 +1114,7 @@ get_target:
822 /* 1114 /*
823 * Locate Target Portal Group from Storage Node. 1115 * Locate Target Portal Group from Storage Node.
824 */ 1116 */
825 conn->tpg = iscsit_get_tpg_from_np(tiqn, np); 1117 conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
826 if (!conn->tpg) { 1118 if (!conn->tpg) {
827 pr_err("Unable to locate Target Portal Group" 1119 pr_err("Unable to locate Target Portal Group"
828 " on %s\n", tiqn->tiqn); 1120 " on %s\n", tiqn->tiqn);
@@ -832,12 +1124,16 @@ get_target:
832 ret = -1; 1124 ret = -1;
833 goto out; 1125 goto out;
834 } 1126 }
1127 conn->tpg_np = tpg_np;
835 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt); 1128 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
836 /* 1129 /*
837 * Setup crc32c modules from libcrypto 1130 * Setup crc32c modules from libcrypto
838 */ 1131 */
839 if (iscsi_login_setup_crypto(conn) < 0) { 1132 if (iscsi_login_setup_crypto(conn) < 0) {
840 pr_err("iscsi_login_setup_crypto() failed\n"); 1133 pr_err("iscsi_login_setup_crypto() failed\n");
1134 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
1135 iscsit_put_tiqn_for_login(tiqn);
1136 conn->tpg = NULL;
841 ret = -1; 1137 ret = -1;
842 goto out; 1138 goto out;
843 } 1139 }
@@ -846,11 +1142,12 @@ get_target:
846 * process login attempt. 1142 * process login attempt.
847 */ 1143 */
848 if (iscsit_access_np(np, conn->tpg) < 0) { 1144 if (iscsit_access_np(np, conn->tpg) < 0) {
1145 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
849 iscsit_put_tiqn_for_login(tiqn); 1146 iscsit_put_tiqn_for_login(tiqn);
850 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1147 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
851 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 1148 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
852 ret = -1;
853 conn->tpg = NULL; 1149 conn->tpg = NULL;
1150 ret = -1;
854 goto out; 1151 goto out;
855 } 1152 }
856 1153
@@ -883,8 +1180,27 @@ get_target:
883 ret = -1; 1180 ret = -1;
884 goto out; 1181 goto out;
885 } 1182 }
1183 se_nacl = sess->se_sess->se_node_acl;
1184 queue_depth = se_nacl->queue_depth;
1185 /*
1186 * Setup pre-allocated tags based upon allowed per NodeACL CmdSN
1187 * depth for non immediate commands, plus extra tags for immediate
1188 * commands.
1189 *
1190 * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
1191 * in per-cpu-ida tag allocation logic + small queue_depth.
1192 */
1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
886 1197
887 ret = 0; 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
1199 if (ret < 0) {
1200 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1201 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1202 ret = -1;
1203 }
888out: 1204out:
889 kfree(tmpbuf); 1205 kfree(tmpbuf);
890 return ret; 1206 return ret;
@@ -897,10 +1213,23 @@ int iscsi_target_start_negotiation(
897 int ret; 1213 int ret;
898 1214
899 ret = iscsi_target_do_login(conn, login); 1215 ret = iscsi_target_do_login(conn, login);
900 if (ret != 0) 1216 if (!ret) {
1217 if (conn->sock) {
1218 struct sock *sk = conn->sock->sk;
1219
1220 write_lock_bh(&sk->sk_callback_lock);
1221 set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
1222 write_unlock_bh(&sk->sk_callback_lock);
1223 }
1224 } else if (ret < 0) {
1225 cancel_delayed_work_sync(&conn->login_work);
1226 cancel_delayed_work_sync(&conn->login_cleanup_work);
1227 iscsi_target_restore_sock_callbacks(conn);
901 iscsi_remove_failed_auth_entry(conn); 1228 iscsi_remove_failed_auth_entry(conn);
1229 }
1230 if (ret != 0)
1231 iscsi_target_nego_release(conn);
902 1232
903 iscsi_target_nego_release(conn);
904 return ret; 1233 return ret;
905} 1234}
906 1235
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 11dc2936af76..93bdc475eb00 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the main functions related to Initiator Node Attributes. 2 * This file contains the main functions related to Initiator Node Attributes.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 35fd6439eb01..4d2e23fc76fd 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation. 2 * This file contains main functions related to iSCSI Parameter negotiation.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -1182,7 +1180,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
1182 unsigned long long tmp; 1180 unsigned long long tmp;
1183 int rc; 1181 int rc;
1184 1182
1185 rc = strict_strtoull(param->value, 0, &tmp); 1183 rc = kstrtoull(param->value, 0, &tmp);
1186 if (rc < 0) 1184 if (rc < 0)
1187 return -1; 1185 return -1;
1188 1186
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index edb592a368ef..ca41b583f2f6 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -2,9 +2,7 @@
2 * This file contains main functions related to iSCSI DataSequenceInOrder=No 2 * This file contains main functions related to iSCSI DataSequenceInOrder=No
3 * and DataPDUInOrder=No. 3 * and DataPDUInOrder=No.
4 * 4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * (c) Copyright 2007-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 464b4206a51e..f788e8b5e855 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -2,9 +2,7 @@
2 * Modern ConfigFS group context specific iSCSI statistics based on original 2 * Modern ConfigFS group context specific iSCSI statistics based on original
3 * iscsi_target_mib.c code 3 * iscsi_target_mib.c code
4 * 4 *
5 * Copyright (c) 2011 Rising Tide Systems 5 * Copyright (c) 2011-2013 Datera, Inc.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 6 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 8 *
@@ -177,7 +175,7 @@ ISCSI_STAT_INSTANCE_ATTR_RO(description);
177static ssize_t iscsi_stat_instance_show_attr_vendor( 175static ssize_t iscsi_stat_instance_show_attr_vendor(
178 struct iscsi_wwn_stat_grps *igrps, char *page) 176 struct iscsi_wwn_stat_grps *igrps, char *page)
179{ 177{
180 return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n"); 178 return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
181} 179}
182ISCSI_STAT_INSTANCE_ATTR_RO(vendor); 180ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
183 181
@@ -432,13 +430,7 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
432 int ret; 430 int ret;
433 431
434 spin_lock(&lstat->lock); 432 spin_lock(&lstat->lock);
435 if (lstat->last_intr_fail_ip_family == AF_INET6) { 433 ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
436 ret = snprintf(page, PAGE_SIZE, "[%s]\n",
437 lstat->last_intr_fail_ip_addr);
438 } else {
439 ret = snprintf(page, PAGE_SIZE, "%s\n",
440 lstat->last_intr_fail_ip_addr);
441 }
442 spin_unlock(&lstat->lock); 434 spin_unlock(&lstat->lock);
443 435
444 return ret; 436 return ret;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index b997e5da47d3..78404b1cc0bf 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the iSCSI Target specific Task Management functions. 2 * This file contains the iSCSI Target specific Task Management functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 439260b7d87f..4faeb47fa5e1 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains iSCSI Target Portal Group related functions. 2 * This file contains iSCSI Target Portal Group related functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -49,7 +47,7 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1
49 INIT_LIST_HEAD(&tpg->tpg_gnp_list); 47 INIT_LIST_HEAD(&tpg->tpg_gnp_list);
50 INIT_LIST_HEAD(&tpg->tpg_list); 48 INIT_LIST_HEAD(&tpg->tpg_list);
51 mutex_init(&tpg->tpg_access_lock); 49 mutex_init(&tpg->tpg_access_lock);
52 mutex_init(&tpg->np_login_lock); 50 sema_init(&tpg->np_login_sem, 1);
53 spin_lock_init(&tpg->tpg_state_lock); 51 spin_lock_init(&tpg->tpg_state_lock);
54 spin_lock_init(&tpg->tpg_np_lock); 52 spin_lock_init(&tpg->tpg_np_lock);
55 53
@@ -129,7 +127,8 @@ void iscsit_release_discovery_tpg(void)
129 127
130struct iscsi_portal_group *iscsit_get_tpg_from_np( 128struct iscsi_portal_group *iscsit_get_tpg_from_np(
131 struct iscsi_tiqn *tiqn, 129 struct iscsi_tiqn *tiqn,
132 struct iscsi_np *np) 130 struct iscsi_np *np,
131 struct iscsi_tpg_np **tpg_np_out)
133{ 132{
134 struct iscsi_portal_group *tpg = NULL; 133 struct iscsi_portal_group *tpg = NULL;
135 struct iscsi_tpg_np *tpg_np; 134 struct iscsi_tpg_np *tpg_np;
@@ -147,6 +146,8 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
147 spin_lock(&tpg->tpg_np_lock); 146 spin_lock(&tpg->tpg_np_lock);
148 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { 147 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
149 if (tpg_np->tpg_np == np) { 148 if (tpg_np->tpg_np == np) {
149 *tpg_np_out = tpg_np;
150 kref_get(&tpg_np->tpg_np_kref);
150 spin_unlock(&tpg->tpg_np_lock); 151 spin_unlock(&tpg->tpg_np_lock);
151 spin_unlock(&tiqn->tiqn_tpg_lock); 152 spin_unlock(&tiqn->tiqn_tpg_lock);
152 return tpg; 153 return tpg;
@@ -175,18 +176,20 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg)
175 176
176static void iscsit_clear_tpg_np_login_thread( 177static void iscsit_clear_tpg_np_login_thread(
177 struct iscsi_tpg_np *tpg_np, 178 struct iscsi_tpg_np *tpg_np,
178 struct iscsi_portal_group *tpg) 179 struct iscsi_portal_group *tpg,
180 bool shutdown)
179{ 181{
180 if (!tpg_np->tpg_np) { 182 if (!tpg_np->tpg_np) {
181 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); 183 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
182 return; 184 return;
183 } 185 }
184 186
185 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg); 187 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
186} 188}
187 189
188void iscsit_clear_tpg_np_login_threads( 190void iscsit_clear_tpg_np_login_threads(
189 struct iscsi_portal_group *tpg) 191 struct iscsi_portal_group *tpg,
192 bool shutdown)
190{ 193{
191 struct iscsi_tpg_np *tpg_np; 194 struct iscsi_tpg_np *tpg_np;
192 195
@@ -197,7 +200,7 @@ void iscsit_clear_tpg_np_login_threads(
197 continue; 200 continue;
198 } 201 }
199 spin_unlock(&tpg->tpg_np_lock); 202 spin_unlock(&tpg->tpg_np_lock);
200 iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 203 iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
201 spin_lock(&tpg->tpg_np_lock); 204 spin_lock(&tpg->tpg_np_lock);
202 } 205 }
203 spin_unlock(&tpg->tpg_np_lock); 206 spin_unlock(&tpg->tpg_np_lock);
@@ -268,6 +271,8 @@ int iscsit_tpg_del_portal_group(
268 tpg->tpg_state = TPG_STATE_INACTIVE; 271 tpg->tpg_state = TPG_STATE_INACTIVE;
269 spin_unlock(&tpg->tpg_state_lock); 272 spin_unlock(&tpg->tpg_state_lock);
270 273
274 iscsit_clear_tpg_np_login_threads(tpg, true);
275
271 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 276 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
272 pr_err("Unable to delete iSCSI Target Portal Group:" 277 pr_err("Unable to delete iSCSI Target Portal Group:"
273 " %hu while active sessions exist, and force=0\n", 278 " %hu while active sessions exist, and force=0\n",
@@ -368,7 +373,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
368 tpg->tpg_state = TPG_STATE_INACTIVE; 373 tpg->tpg_state = TPG_STATE_INACTIVE;
369 spin_unlock(&tpg->tpg_state_lock); 374 spin_unlock(&tpg->tpg_state_lock);
370 375
371 iscsit_clear_tpg_np_login_threads(tpg); 376 iscsit_clear_tpg_np_login_threads(tpg, false);
372 377
373 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 378 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
374 spin_lock(&tpg->tpg_state_lock); 379 spin_lock(&tpg->tpg_state_lock);
@@ -490,6 +495,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
490 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list); 495 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
491 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list); 496 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
492 spin_lock_init(&tpg_np->tpg_np_parent_lock); 497 spin_lock_init(&tpg_np->tpg_np_parent_lock);
498 init_completion(&tpg_np->tpg_np_comp);
499 kref_init(&tpg_np->tpg_np_kref);
493 tpg_np->tpg_np = np; 500 tpg_np->tpg_np = np;
494 tpg_np->tpg = tpg; 501 tpg_np->tpg = tpg;
495 502
@@ -520,7 +527,7 @@ static int iscsit_tpg_release_np(
520 struct iscsi_portal_group *tpg, 527 struct iscsi_portal_group *tpg,
521 struct iscsi_np *np) 528 struct iscsi_np *np)
522{ 529{
523 iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 530 iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
524 531
525 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 532 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
526 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 533 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index dda48c141a8c..b77693e2c209 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -5,10 +5,10 @@ extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *,
5extern int iscsit_load_discovery_tpg(void); 5extern int iscsit_load_discovery_tpg(void);
6extern void iscsit_release_discovery_tpg(void); 6extern void iscsit_release_discovery_tpg(void);
7extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, 7extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
8 struct iscsi_np *); 8 struct iscsi_np *, struct iscsi_tpg_np **);
9extern int iscsit_get_tpg(struct iscsi_portal_group *); 9extern int iscsit_get_tpg(struct iscsi_portal_group *);
10extern void iscsit_put_tpg(struct iscsi_portal_group *); 10extern void iscsit_put_tpg(struct iscsi_portal_group *);
11extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *); 11extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);
12extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); 12extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
13extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); 13extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
14extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, 14extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 81289520f96b..601e9cc61e98 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the iSCSI Login Thread and Thread Queue functions. 2 * This file contains the iSCSI Login Thread and Thread Queue functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -105,12 +103,11 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count)
105 ts->status = ISCSI_THREAD_SET_FREE; 103 ts->status = ISCSI_THREAD_SET_FREE;
106 INIT_LIST_HEAD(&ts->ts_list); 104 INIT_LIST_HEAD(&ts->ts_list);
107 spin_lock_init(&ts->ts_state_lock); 105 spin_lock_init(&ts->ts_state_lock);
108 init_completion(&ts->rx_post_start_comp);
109 init_completion(&ts->tx_post_start_comp);
110 init_completion(&ts->rx_restart_comp); 106 init_completion(&ts->rx_restart_comp);
111 init_completion(&ts->tx_restart_comp); 107 init_completion(&ts->tx_restart_comp);
112 init_completion(&ts->rx_start_comp); 108 init_completion(&ts->rx_start_comp);
113 init_completion(&ts->tx_start_comp); 109 init_completion(&ts->tx_start_comp);
110 sema_init(&ts->ts_activate_sem, 0);
114 111
115 ts->create_threads = 1; 112 ts->create_threads = 1;
116 ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", 113 ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
@@ -139,35 +136,44 @@ int iscsi_allocate_thread_sets(u32 thread_pair_count)
139 return allocated_thread_pair_count; 136 return allocated_thread_pair_count;
140} 137}
141 138
142void iscsi_deallocate_thread_sets(void) 139static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
143{ 140{
144 u32 released_count = 0; 141 spin_lock_bh(&ts->ts_state_lock);
145 struct iscsi_thread_set *ts = NULL; 142 ts->status = ISCSI_THREAD_SET_DIE;
146
147 while ((ts = iscsi_get_ts_from_inactive_list())) {
148 143
144 if (ts->rx_thread) {
145 complete(&ts->rx_start_comp);
146 spin_unlock_bh(&ts->ts_state_lock);
147 kthread_stop(ts->rx_thread);
149 spin_lock_bh(&ts->ts_state_lock); 148 spin_lock_bh(&ts->ts_state_lock);
150 ts->status = ISCSI_THREAD_SET_DIE; 149 }
150 if (ts->tx_thread) {
151 complete(&ts->tx_start_comp);
151 spin_unlock_bh(&ts->ts_state_lock); 152 spin_unlock_bh(&ts->ts_state_lock);
153 kthread_stop(ts->tx_thread);
154 spin_lock_bh(&ts->ts_state_lock);
155 }
156 spin_unlock_bh(&ts->ts_state_lock);
157 /*
158 * Release this thread_id in the thread_set_bitmap
159 */
160 spin_lock(&ts_bitmap_lock);
161 bitmap_release_region(iscsit_global->ts_bitmap,
162 ts->thread_id, get_order(1));
163 spin_unlock(&ts_bitmap_lock);
152 164
153 if (ts->rx_thread) { 165 kfree(ts);
154 send_sig(SIGINT, ts->rx_thread, 1); 166}
155 kthread_stop(ts->rx_thread);
156 }
157 if (ts->tx_thread) {
158 send_sig(SIGINT, ts->tx_thread, 1);
159 kthread_stop(ts->tx_thread);
160 }
161 /*
162 * Release this thread_id in the thread_set_bitmap
163 */
164 spin_lock(&ts_bitmap_lock);
165 bitmap_release_region(iscsit_global->ts_bitmap,
166 ts->thread_id, get_order(1));
167 spin_unlock(&ts_bitmap_lock);
168 167
168void iscsi_deallocate_thread_sets(void)
169{
170 struct iscsi_thread_set *ts = NULL;
171 u32 released_count = 0;
172
173 while ((ts = iscsi_get_ts_from_inactive_list())) {
174
175 iscsi_deallocate_thread_one(ts);
169 released_count++; 176 released_count++;
170 kfree(ts);
171 } 177 }
172 178
173 if (released_count) 179 if (released_count)
@@ -187,34 +193,13 @@ static void iscsi_deallocate_extra_thread_sets(void)
187 if (!ts) 193 if (!ts)
188 break; 194 break;
189 195
190 spin_lock_bh(&ts->ts_state_lock); 196 iscsi_deallocate_thread_one(ts);
191 ts->status = ISCSI_THREAD_SET_DIE;
192 spin_unlock_bh(&ts->ts_state_lock);
193
194 if (ts->rx_thread) {
195 send_sig(SIGINT, ts->rx_thread, 1);
196 kthread_stop(ts->rx_thread);
197 }
198 if (ts->tx_thread) {
199 send_sig(SIGINT, ts->tx_thread, 1);
200 kthread_stop(ts->tx_thread);
201 }
202 /*
203 * Release this thread_id in the thread_set_bitmap
204 */
205 spin_lock(&ts_bitmap_lock);
206 bitmap_release_region(iscsit_global->ts_bitmap,
207 ts->thread_id, get_order(1));
208 spin_unlock(&ts_bitmap_lock);
209
210 released_count++; 197 released_count++;
211 kfree(ts);
212 } 198 }
213 199
214 if (released_count) { 200 if (released_count)
215 pr_debug("Stopped %d thread set(s) (%d total threads)." 201 pr_debug("Stopped %d thread set(s) (%d total threads)."
216 "\n", released_count, released_count * 2); 202 "\n", released_count, released_count * 2);
217 }
218} 203}
219 204
220void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) 205void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
@@ -224,37 +209,23 @@ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set
224 spin_lock_bh(&ts->ts_state_lock); 209 spin_lock_bh(&ts->ts_state_lock);
225 conn->thread_set = ts; 210 conn->thread_set = ts;
226 ts->conn = conn; 211 ts->conn = conn;
212 ts->status = ISCSI_THREAD_SET_ACTIVE;
227 spin_unlock_bh(&ts->ts_state_lock); 213 spin_unlock_bh(&ts->ts_state_lock);
228 /* 214
229 * Start up the RX thread and wait on rx_post_start_comp. The RX
230 * Thread will then do the same for the TX Thread in
231 * iscsi_rx_thread_pre_handler().
232 */
233 complete(&ts->rx_start_comp); 215 complete(&ts->rx_start_comp);
234 wait_for_completion(&ts->rx_post_start_comp); 216 complete(&ts->tx_start_comp);
217
218 down(&ts->ts_activate_sem);
235} 219}
236 220
237struct iscsi_thread_set *iscsi_get_thread_set(void) 221struct iscsi_thread_set *iscsi_get_thread_set(void)
238{ 222{
239 int allocate_ts = 0; 223 struct iscsi_thread_set *ts;
240 struct completion comp; 224
241 struct iscsi_thread_set *ts = NULL;
242 /*
243 * If no inactive thread set is available on the first call to
244 * iscsi_get_ts_from_inactive_list(), sleep for a second and
245 * try again. If still none are available after two attempts,
246 * allocate a set ourselves.
247 */
248get_set: 225get_set:
249 ts = iscsi_get_ts_from_inactive_list(); 226 ts = iscsi_get_ts_from_inactive_list();
250 if (!ts) { 227 if (!ts) {
251 if (allocate_ts == 2) 228 iscsi_allocate_thread_sets(1);
252 iscsi_allocate_thread_sets(1);
253
254 init_completion(&comp);
255 wait_for_completion_timeout(&comp, 1 * HZ);
256
257 allocate_ts++;
258 goto get_set; 229 goto get_set;
259 } 230 }
260 231
@@ -263,6 +234,7 @@ get_set:
263 ts->thread_count = 2; 234 ts->thread_count = 2;
264 init_completion(&ts->rx_restart_comp); 235 init_completion(&ts->rx_restart_comp);
265 init_completion(&ts->tx_restart_comp); 236 init_completion(&ts->tx_restart_comp);
237 sema_init(&ts->ts_activate_sem, 0);
266 238
267 return ts; 239 return ts;
268} 240}
@@ -400,7 +372,8 @@ static void iscsi_check_to_add_additional_sets(void)
400static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) 372static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
401{ 373{
402 spin_lock_bh(&ts->ts_state_lock); 374 spin_lock_bh(&ts->ts_state_lock);
403 if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) { 375 if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
376 signal_pending(current)) {
404 spin_unlock_bh(&ts->ts_state_lock); 377 spin_unlock_bh(&ts->ts_state_lock);
405 return -1; 378 return -1;
406 } 379 }
@@ -419,7 +392,8 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
419 goto sleep; 392 goto sleep;
420 } 393 }
421 394
422 flush_signals(current); 395 if (ts->status != ISCSI_THREAD_SET_DIE)
396 flush_signals(current);
423 397
424 if (ts->delay_inactive && (--ts->thread_count == 0)) { 398 if (ts->delay_inactive && (--ts->thread_count == 0)) {
425 spin_unlock_bh(&ts->ts_state_lock); 399 spin_unlock_bh(&ts->ts_state_lock);
@@ -446,18 +420,19 @@ sleep:
446 if (iscsi_signal_thread_pre_handler(ts) < 0) 420 if (iscsi_signal_thread_pre_handler(ts) < 0)
447 return NULL; 421 return NULL;
448 422
423 iscsi_check_to_add_additional_sets();
424
425 spin_lock_bh(&ts->ts_state_lock);
449 if (!ts->conn) { 426 if (!ts->conn) {
450 pr_err("struct iscsi_thread_set->conn is NULL for" 427 pr_err("struct iscsi_thread_set->conn is NULL for"
451 " thread_id: %d, going back to sleep\n", ts->thread_id); 428 " RX thread_id: %s/%d\n", current->comm, current->pid);
452 goto sleep; 429 spin_unlock_bh(&ts->ts_state_lock);
430 return NULL;
453 } 431 }
454 iscsi_check_to_add_additional_sets();
455 /*
456 * The RX Thread starts up the TX Thread and sleeps.
457 */
458 ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; 432 ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
459 complete(&ts->tx_start_comp); 433 spin_unlock_bh(&ts->ts_state_lock);
460 wait_for_completion(&ts->tx_post_start_comp); 434
435 up(&ts->ts_activate_sem);
461 436
462 return ts->conn; 437 return ts->conn;
463} 438}
@@ -472,7 +447,8 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
472 goto sleep; 447 goto sleep;
473 } 448 }
474 449
475 flush_signals(current); 450 if (ts->status != ISCSI_THREAD_SET_DIE)
451 flush_signals(current);
476 452
477 if (ts->delay_inactive && (--ts->thread_count == 0)) { 453 if (ts->delay_inactive && (--ts->thread_count == 0)) {
478 spin_unlock_bh(&ts->ts_state_lock); 454 spin_unlock_bh(&ts->ts_state_lock);
@@ -498,27 +474,20 @@ sleep:
498 if (iscsi_signal_thread_pre_handler(ts) < 0) 474 if (iscsi_signal_thread_pre_handler(ts) < 0)
499 return NULL; 475 return NULL;
500 476
501 if (!ts->conn) {
502 pr_err("struct iscsi_thread_set->conn is NULL for "
503 " thread_id: %d, going back to sleep\n",
504 ts->thread_id);
505 goto sleep;
506 }
507
508 iscsi_check_to_add_additional_sets(); 477 iscsi_check_to_add_additional_sets();
509 /*
510 * From the TX thread, up the tx_post_start_comp that the RX Thread is
511 * sleeping on in iscsi_rx_thread_pre_handler(), then up the
512 * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
513 */
514 ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
515 complete(&ts->tx_post_start_comp);
516 complete(&ts->rx_post_start_comp);
517 478
518 spin_lock_bh(&ts->ts_state_lock); 479 spin_lock_bh(&ts->ts_state_lock);
519 ts->status = ISCSI_THREAD_SET_ACTIVE; 480 if (!ts->conn) {
481 pr_err("struct iscsi_thread_set->conn is NULL for"
482 " TX thread_id: %s/%d\n", current->comm, current->pid);
483 spin_unlock_bh(&ts->ts_state_lock);
484 return NULL;
485 }
486 ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
520 spin_unlock_bh(&ts->ts_state_lock); 487 spin_unlock_bh(&ts->ts_state_lock);
521 488
489 up(&ts->ts_activate_sem);
490
522 return ts->conn; 491 return ts->conn;
523} 492}
524 493
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
index 547d11831282..cc1eede5ab3a 100644
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -64,10 +64,6 @@ struct iscsi_thread_set {
64 struct iscsi_conn *conn; 64 struct iscsi_conn *conn;
65 /* used for controlling ts state accesses */ 65 /* used for controlling ts state accesses */
66 spinlock_t ts_state_lock; 66 spinlock_t ts_state_lock;
67 /* Used for rx side post startup */
68 struct completion rx_post_start_comp;
69 /* Used for tx side post startup */
70 struct completion tx_post_start_comp;
71 /* used for restarting thread queue */ 67 /* used for restarting thread queue */
72 struct completion rx_restart_comp; 68 struct completion rx_restart_comp;
73 /* used for restarting thread queue */ 69 /* used for restarting thread queue */
@@ -82,6 +78,7 @@ struct iscsi_thread_set {
82 struct task_struct *tx_thread; 78 struct task_struct *tx_thread;
83 /* struct iscsi_thread_set in list list head*/ 79 /* struct iscsi_thread_set in list list head*/
84 struct list_head ts_list; 80 struct list_head ts_list;
81 struct semaphore ts_activate_sem;
85}; 82};
86 83
87#endif /*** ISCSI_THREAD_QUEUE_H ***/ 84#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1df06d5e4e01..f2de28e178fd 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1,9 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions. 2 * This file contains the iSCSI Target specific utility functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 5 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 7 *
@@ -19,6 +17,7 @@
19 ******************************************************************************/ 17 ******************************************************************************/
20 18
21#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/percpu_ida.h>
22#include <scsi/scsi_tcq.h> 21#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h> 23#include <target/target_core_base.h>
@@ -149,18 +148,6 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
149 spin_unlock_bh(&cmd->r2t_lock); 148 spin_unlock_bh(&cmd->r2t_lock);
150} 149}
151 150
152struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
153{
154 struct iscsi_cmd *cmd;
155
156 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
157 if (!cmd)
158 return NULL;
159
160 cmd->release_cmd = &iscsit_release_cmd;
161 return cmd;
162}
163
164/* 151/*
165 * May be called from software interrupt (timer) context for allocating 152 * May be called from software interrupt (timer) context for allocating
166 * iSCSI NopINs. 153 * iSCSI NopINs.
@@ -168,12 +155,15 @@ struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
168struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 155struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
169{ 156{
170 struct iscsi_cmd *cmd; 157 struct iscsi_cmd *cmd;
158 struct se_session *se_sess = conn->sess->se_sess;
159 int size, tag;
171 160
172 cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask); 161 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
173 if (!cmd) { 162 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
174 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 163 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
175 return NULL; 164 memset(cmd, 0, size);
176 } 165
166 cmd->se_cmd.map_tag = tag;
177 cmd->conn = conn; 167 cmd->conn = conn;
178 INIT_LIST_HEAD(&cmd->i_conn_node); 168 INIT_LIST_HEAD(&cmd->i_conn_node);
179 INIT_LIST_HEAD(&cmd->datain_list); 169 INIT_LIST_HEAD(&cmd->datain_list);
@@ -689,6 +679,16 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
689 679
690void iscsit_release_cmd(struct iscsi_cmd *cmd) 680void iscsit_release_cmd(struct iscsi_cmd *cmd)
691{ 681{
682 struct iscsi_session *sess;
683 struct se_cmd *se_cmd = &cmd->se_cmd;
684
685 if (cmd->conn)
686 sess = cmd->conn->sess;
687 else
688 sess = cmd->sess;
689
690 BUG_ON(!sess || !sess->se_sess);
691
692 kfree(cmd->buf_ptr); 692 kfree(cmd->buf_ptr);
693 kfree(cmd->pdu_list); 693 kfree(cmd->pdu_list);
694 kfree(cmd->seq_list); 694 kfree(cmd->seq_list);
@@ -696,8 +696,9 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
696 kfree(cmd->iov_data); 696 kfree(cmd->iov_data);
697 kfree(cmd->text_in_ptr); 697 kfree(cmd->text_in_ptr);
698 698
699 kmem_cache_free(lio_cmd_cache, cmd); 699 percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
700} 700}
701EXPORT_SYMBOL(iscsit_release_cmd);
701 702
702static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 703static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
703 bool check_queues) 704 bool check_queues)
@@ -761,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
761 /* Fall-through */ 762 /* Fall-through */
762 default: 763 default:
763 __iscsit_free_cmd(cmd, false, shutdown); 764 __iscsit_free_cmd(cmd, false, shutdown);
764 cmd->release_cmd(cmd); 765 iscsit_release_cmd(cmd);
765 break; 766 break;
766 } 767 }
767} 768}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 568ad25f25d3..0f6d69dabca1 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -3,7 +3,7 @@
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports 4 * for emulated SAS initiator ports
5 * 5 *
6 * © Copyright 2011 RisingTide Systems LLC. 6 * © Copyright 2011-2013 Datera, Inc.
7 * 7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * 9 *
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index cbe48ab41745..47244102281e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 * 5 *
6 * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 * (c) Copyright 2009-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
@@ -557,6 +557,9 @@ target_alua_state_check(struct se_cmd *cmd)
557 * a ALUA logical unit group. 557 * a ALUA logical unit group.
558 */ 558 */
559 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 559 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
560 if (!tg_pt_gp_mem)
561 return 0;
562
560 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 563 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
561 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 564 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
562 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 565 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
@@ -730,7 +733,7 @@ static int core_alua_write_tpg_metadata(
730 if (ret < 0) 733 if (ret < 0)
731 pr_err("Error writing ALUA metadata file: %s\n", path); 734 pr_err("Error writing ALUA metadata file: %s\n", path);
732 fput(file); 735 fput(file);
733 return ret ? -EIO : 0; 736 return (ret < 0) ? -EIO : 0;
734} 737}
735 738
736/* 739/*
@@ -1756,10 +1759,10 @@ ssize_t core_alua_store_access_type(
1756 unsigned long tmp; 1759 unsigned long tmp;
1757 int ret; 1760 int ret;
1758 1761
1759 ret = strict_strtoul(page, 0, &tmp); 1762 ret = kstrtoul(page, 0, &tmp);
1760 if (ret < 0) { 1763 if (ret < 0) {
1761 pr_err("Unable to extract alua_access_type\n"); 1764 pr_err("Unable to extract alua_access_type\n");
1762 return -EINVAL; 1765 return ret;
1763 } 1766 }
1764 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1767 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1765 pr_err("Illegal value for alua_access_type:" 1768 pr_err("Illegal value for alua_access_type:"
@@ -1794,10 +1797,10 @@ ssize_t core_alua_store_nonop_delay_msecs(
1794 unsigned long tmp; 1797 unsigned long tmp;
1795 int ret; 1798 int ret;
1796 1799
1797 ret = strict_strtoul(page, 0, &tmp); 1800 ret = kstrtoul(page, 0, &tmp);
1798 if (ret < 0) { 1801 if (ret < 0) {
1799 pr_err("Unable to extract nonop_delay_msecs\n"); 1802 pr_err("Unable to extract nonop_delay_msecs\n");
1800 return -EINVAL; 1803 return ret;
1801 } 1804 }
1802 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1805 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1803 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 1806 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
@@ -1825,10 +1828,10 @@ ssize_t core_alua_store_trans_delay_msecs(
1825 unsigned long tmp; 1828 unsigned long tmp;
1826 int ret; 1829 int ret;
1827 1830
1828 ret = strict_strtoul(page, 0, &tmp); 1831 ret = kstrtoul(page, 0, &tmp);
1829 if (ret < 0) { 1832 if (ret < 0) {
1830 pr_err("Unable to extract trans_delay_msecs\n"); 1833 pr_err("Unable to extract trans_delay_msecs\n");
1831 return -EINVAL; 1834 return ret;
1832 } 1835 }
1833 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1836 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1834 pr_err("Passed trans_delay_msecs: %lu, exceeds" 1837 pr_err("Passed trans_delay_msecs: %lu, exceeds"
@@ -1856,10 +1859,10 @@ ssize_t core_alua_store_implict_trans_secs(
1856 unsigned long tmp; 1859 unsigned long tmp;
1857 int ret; 1860 int ret;
1858 1861
1859 ret = strict_strtoul(page, 0, &tmp); 1862 ret = kstrtoul(page, 0, &tmp);
1860 if (ret < 0) { 1863 if (ret < 0) {
1861 pr_err("Unable to extract implict_trans_secs\n"); 1864 pr_err("Unable to extract implict_trans_secs\n");
1862 return -EINVAL; 1865 return ret;
1863 } 1866 }
1864 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { 1867 if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
1865 pr_err("Passed implict_trans_secs: %lu, exceeds" 1868 pr_err("Passed implict_trans_secs: %lu, exceeds"
@@ -1887,10 +1890,10 @@ ssize_t core_alua_store_preferred_bit(
1887 unsigned long tmp; 1890 unsigned long tmp;
1888 int ret; 1891 int ret;
1889 1892
1890 ret = strict_strtoul(page, 0, &tmp); 1893 ret = kstrtoul(page, 0, &tmp);
1891 if (ret < 0) { 1894 if (ret < 0) {
1892 pr_err("Unable to extract preferred ALUA value\n"); 1895 pr_err("Unable to extract preferred ALUA value\n");
1893 return -EINVAL; 1896 return ret;
1894 } 1897 }
1895 if ((tmp != 0) && (tmp != 1)) { 1898 if ((tmp != 0) && (tmp != 1)) {
1896 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 1899 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
@@ -1922,10 +1925,10 @@ ssize_t core_alua_store_offline_bit(
1922 if (!lun->lun_sep) 1925 if (!lun->lun_sep)
1923 return -ENODEV; 1926 return -ENODEV;
1924 1927
1925 ret = strict_strtoul(page, 0, &tmp); 1928 ret = kstrtoul(page, 0, &tmp);
1926 if (ret < 0) { 1929 if (ret < 0) {
1927 pr_err("Unable to extract alua_tg_pt_offline value\n"); 1930 pr_err("Unable to extract alua_tg_pt_offline value\n");
1928 return -EINVAL; 1931 return ret;
1929 } 1932 }
1930 if ((tmp != 0) && (tmp != 1)) { 1933 if ((tmp != 0) && (tmp != 1)) {
1931 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 1934 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
@@ -1961,10 +1964,10 @@ ssize_t core_alua_store_secondary_status(
1961 unsigned long tmp; 1964 unsigned long tmp;
1962 int ret; 1965 int ret;
1963 1966
1964 ret = strict_strtoul(page, 0, &tmp); 1967 ret = kstrtoul(page, 0, &tmp);
1965 if (ret < 0) { 1968 if (ret < 0) {
1966 pr_err("Unable to extract alua_tg_pt_status\n"); 1969 pr_err("Unable to extract alua_tg_pt_status\n");
1967 return -EINVAL; 1970 return ret;
1968 } 1971 }
1969 if ((tmp != ALUA_STATUS_NONE) && 1972 if ((tmp != ALUA_STATUS_NONE) &&
1970 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1973 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
@@ -1994,10 +1997,10 @@ ssize_t core_alua_store_secondary_write_metadata(
1994 unsigned long tmp; 1997 unsigned long tmp;
1995 int ret; 1998 int ret;
1996 1999
1997 ret = strict_strtoul(page, 0, &tmp); 2000 ret = kstrtoul(page, 0, &tmp);
1998 if (ret < 0) { 2001 if (ret < 0) {
1999 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2002 pr_err("Unable to extract alua_tg_pt_write_md\n");
2000 return -EINVAL; 2003 return ret;
2001 } 2004 }
2002 if ((tmp != 0) && (tmp != 1)) { 2005 if ((tmp != 0) && (tmp != 1)) {
2003 pr_err("Illegal value for alua_tg_pt_write_md:" 2006 pr_err("Illegal value for alua_tg_pt_write_md:"
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e4d22933efaf..82e81c542e43 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project. 4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 * 5 *
6 * (c) Copyright 2008-2012 RisingTide Systems LLC. 6 * (c) Copyright 2008-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
@@ -48,6 +48,7 @@
48#include "target_core_alua.h" 48#include "target_core_alua.h"
49#include "target_core_pr.h" 49#include "target_core_pr.h"
50#include "target_core_rd.h" 50#include "target_core_rd.h"
51#include "target_core_xcopy.h"
51 52
52extern struct t10_alua_lu_gp *default_lu_gp; 53extern struct t10_alua_lu_gp *default_lu_gp;
53 54
@@ -268,7 +269,7 @@ static struct configfs_subsystem target_core_fabrics = {
268 }, 269 },
269}; 270};
270 271
271static struct configfs_subsystem *target_core_subsystem[] = { 272struct configfs_subsystem *target_core_subsystem[] = {
272 &target_core_fabrics, 273 &target_core_fabrics,
273 NULL, 274 NULL,
274}; 275};
@@ -577,9 +578,9 @@ static ssize_t target_core_dev_store_attr_##_name( \
577 unsigned long val; \ 578 unsigned long val; \
578 int ret; \ 579 int ret; \
579 \ 580 \
580 ret = strict_strtoul(page, 0, &val); \ 581 ret = kstrtoul(page, 0, &val); \
581 if (ret < 0) { \ 582 if (ret < 0) { \
582 pr_err("strict_strtoul() failed with" \ 583 pr_err("kstrtoul() failed with" \
583 " ret: %d\n", ret); \ 584 " ret: %d\n", ret); \
584 return -EINVAL; \ 585 return -EINVAL; \
585 } \ 586 } \
@@ -636,6 +637,12 @@ SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
636DEF_DEV_ATTRIB(emulate_tpws); 637DEF_DEV_ATTRIB(emulate_tpws);
637SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); 638SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
638 639
640DEF_DEV_ATTRIB(emulate_caw);
641SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
642
643DEF_DEV_ATTRIB(emulate_3pc);
644SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
645
639DEF_DEV_ATTRIB(enforce_pr_isids); 646DEF_DEV_ATTRIB(enforce_pr_isids);
640SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); 647SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
641 648
@@ -693,6 +700,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
693 &target_core_dev_attrib_emulate_tas.attr, 700 &target_core_dev_attrib_emulate_tas.attr,
694 &target_core_dev_attrib_emulate_tpu.attr, 701 &target_core_dev_attrib_emulate_tpu.attr,
695 &target_core_dev_attrib_emulate_tpws.attr, 702 &target_core_dev_attrib_emulate_tpws.attr,
703 &target_core_dev_attrib_emulate_caw.attr,
704 &target_core_dev_attrib_emulate_3pc.attr,
696 &target_core_dev_attrib_enforce_pr_isids.attr, 705 &target_core_dev_attrib_enforce_pr_isids.attr,
697 &target_core_dev_attrib_is_nonrot.attr, 706 &target_core_dev_attrib_is_nonrot.attr,
698 &target_core_dev_attrib_emulate_rest_reord.attr, 707 &target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1310,9 +1319,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1310 ret = -ENOMEM; 1319 ret = -ENOMEM;
1311 goto out; 1320 goto out;
1312 } 1321 }
1313 ret = strict_strtoull(arg_p, 0, &tmp_ll); 1322 ret = kstrtoull(arg_p, 0, &tmp_ll);
1314 if (ret < 0) { 1323 if (ret < 0) {
1315 pr_err("strict_strtoull() failed for" 1324 pr_err("kstrtoull() failed for"
1316 " sa_res_key=\n"); 1325 " sa_res_key=\n");
1317 goto out; 1326 goto out;
1318 } 1327 }
@@ -1836,11 +1845,11 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
1836 unsigned long lu_gp_id; 1845 unsigned long lu_gp_id;
1837 int ret; 1846 int ret;
1838 1847
1839 ret = strict_strtoul(page, 0, &lu_gp_id); 1848 ret = kstrtoul(page, 0, &lu_gp_id);
1840 if (ret < 0) { 1849 if (ret < 0) {
1841 pr_err("strict_strtoul() returned %d for" 1850 pr_err("kstrtoul() returned %d for"
1842 " lu_gp_id\n", ret); 1851 " lu_gp_id\n", ret);
1843 return -EINVAL; 1852 return ret;
1844 } 1853 }
1845 if (lu_gp_id > 0x0000ffff) { 1854 if (lu_gp_id > 0x0000ffff) {
1846 pr_err("ALUA lu_gp_id: %lu exceeds maximum:" 1855 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
@@ -2032,11 +2041,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2032 return -EINVAL; 2041 return -EINVAL;
2033 } 2042 }
2034 2043
2035 ret = strict_strtoul(page, 0, &tmp); 2044 ret = kstrtoul(page, 0, &tmp);
2036 if (ret < 0) { 2045 if (ret < 0) {
2037 pr_err("Unable to extract new ALUA access state from" 2046 pr_err("Unable to extract new ALUA access state from"
2038 " %s\n", page); 2047 " %s\n", page);
2039 return -EINVAL; 2048 return ret;
2040 } 2049 }
2041 new_state = (int)tmp; 2050 new_state = (int)tmp;
2042 2051
@@ -2079,11 +2088,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2079 return -EINVAL; 2088 return -EINVAL;
2080 } 2089 }
2081 2090
2082 ret = strict_strtoul(page, 0, &tmp); 2091 ret = kstrtoul(page, 0, &tmp);
2083 if (ret < 0) { 2092 if (ret < 0) {
2084 pr_err("Unable to extract new ALUA access status" 2093 pr_err("Unable to extract new ALUA access status"
2085 " from %s\n", page); 2094 " from %s\n", page);
2086 return -EINVAL; 2095 return ret;
2087 } 2096 }
2088 new_status = (int)tmp; 2097 new_status = (int)tmp;
2089 2098
@@ -2139,10 +2148,10 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2139 unsigned long tmp; 2148 unsigned long tmp;
2140 int ret; 2149 int ret;
2141 2150
2142 ret = strict_strtoul(page, 0, &tmp); 2151 ret = kstrtoul(page, 0, &tmp);
2143 if (ret < 0) { 2152 if (ret < 0) {
2144 pr_err("Unable to extract alua_write_metadata\n"); 2153 pr_err("Unable to extract alua_write_metadata\n");
2145 return -EINVAL; 2154 return ret;
2146 } 2155 }
2147 2156
2148 if ((tmp != 0) && (tmp != 1)) { 2157 if ((tmp != 0) && (tmp != 1)) {
@@ -2263,11 +2272,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2263 unsigned long tg_pt_gp_id; 2272 unsigned long tg_pt_gp_id;
2264 int ret; 2273 int ret;
2265 2274
2266 ret = strict_strtoul(page, 0, &tg_pt_gp_id); 2275 ret = kstrtoul(page, 0, &tg_pt_gp_id);
2267 if (ret < 0) { 2276 if (ret < 0) {
2268 pr_err("strict_strtoul() returned %d for" 2277 pr_err("kstrtoul() returned %d for"
2269 " tg_pt_gp_id\n", ret); 2278 " tg_pt_gp_id\n", ret);
2270 return -EINVAL; 2279 return ret;
2271 } 2280 }
2272 if (tg_pt_gp_id > 0x0000ffff) { 2281 if (tg_pt_gp_id > 0x0000ffff) {
2273 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" 2282 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
@@ -2676,10 +2685,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2676 if (transport->pmode_enable_hba == NULL) 2685 if (transport->pmode_enable_hba == NULL)
2677 return -EINVAL; 2686 return -EINVAL;
2678 2687
2679 ret = strict_strtoul(page, 0, &mode_flag); 2688 ret = kstrtoul(page, 0, &mode_flag);
2680 if (ret < 0) { 2689 if (ret < 0) {
2681 pr_err("Unable to extract hba mode flag: %d\n", ret); 2690 pr_err("Unable to extract hba mode flag: %d\n", ret);
2682 return -EINVAL; 2691 return ret;
2683 } 2692 }
2684 2693
2685 if (hba->dev_count) { 2694 if (hba->dev_count) {
@@ -2767,11 +2776,11 @@ static struct config_group *target_core_call_addhbatotarget(
2767 str++; /* Skip to start of plugin dependent ID */ 2776 str++; /* Skip to start of plugin dependent ID */
2768 } 2777 }
2769 2778
2770 ret = strict_strtoul(str, 0, &plugin_dep_id); 2779 ret = kstrtoul(str, 0, &plugin_dep_id);
2771 if (ret < 0) { 2780 if (ret < 0) {
2772 pr_err("strict_strtoul() returned %d for" 2781 pr_err("kstrtoul() returned %d for"
2773 " plugin_dep_id\n", ret); 2782 " plugin_dep_id\n", ret);
2774 return ERR_PTR(-EINVAL); 2783 return ERR_PTR(ret);
2775 } 2784 }
2776 /* 2785 /*
2777 * Load up TCM subsystem plugins if they have not already been loaded. 2786 * Load up TCM subsystem plugins if they have not already been loaded.
@@ -2927,6 +2936,10 @@ static int __init target_core_init_configfs(void)
2927 if (ret < 0) 2936 if (ret < 0)
2928 goto out; 2937 goto out;
2929 2938
2939 ret = target_xcopy_setup_pt();
2940 if (ret < 0)
2941 goto out;
2942
2930 return 0; 2943 return 0;
2931 2944
2932out: 2945out:
@@ -2999,6 +3012,7 @@ static void __exit target_core_exit_configfs(void)
2999 3012
3000 core_dev_release_virtual_lun0(); 3013 core_dev_release_virtual_lun0();
3001 rd_module_exit(); 3014 rd_module_exit();
3015 target_xcopy_release_pt();
3002 release_se_kmem_caches(); 3016 release_se_kmem_caches();
3003} 3017}
3004 3018
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8f4142fe5f19..d90dbb0f1a69 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -4,7 +4,7 @@
4 * This file contains the TCM Virtual Device and Disk Transport 4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions. 5 * agnostic related functions.
6 * 6 *
7 * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 * (c) Copyright 2003-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
@@ -47,6 +47,9 @@
47#include "target_core_pr.h" 47#include "target_core_pr.h"
48#include "target_core_ua.h" 48#include "target_core_ua.h"
49 49
50DEFINE_MUTEX(g_device_mutex);
51LIST_HEAD(g_device_list);
52
50static struct se_hba *lun0_hba; 53static struct se_hba *lun0_hba;
51/* not static, needed by tpg.c */ 54/* not static, needed by tpg.c */
52struct se_device *g_lun0_dev; 55struct se_device *g_lun0_dev;
@@ -890,6 +893,32 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
890 return 0; 893 return 0;
891} 894}
892 895
896int se_dev_set_emulate_caw(struct se_device *dev, int flag)
897{
898 if (flag != 0 && flag != 1) {
899 pr_err("Illegal value %d\n", flag);
900 return -EINVAL;
901 }
902 dev->dev_attrib.emulate_caw = flag;
903 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
904 dev, flag);
905
906 return 0;
907}
908
909int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
910{
911 if (flag != 0 && flag != 1) {
912 pr_err("Illegal value %d\n", flag);
913 return -EINVAL;
914 }
915 dev->dev_attrib.emulate_3pc = flag;
916 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
917 dev, flag);
918
919 return 0;
920}
921
893int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 922int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
894{ 923{
895 if ((flag != 0) && (flag != 1)) { 924 if ((flag != 0) && (flag != 1)) {
@@ -1393,6 +1422,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1393 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1422 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1394 INIT_LIST_HEAD(&dev->state_list); 1423 INIT_LIST_HEAD(&dev->state_list);
1395 INIT_LIST_HEAD(&dev->qf_cmd_list); 1424 INIT_LIST_HEAD(&dev->qf_cmd_list);
1425 INIT_LIST_HEAD(&dev->g_dev_node);
1396 spin_lock_init(&dev->stats_lock); 1426 spin_lock_init(&dev->stats_lock);
1397 spin_lock_init(&dev->execute_task_lock); 1427 spin_lock_init(&dev->execute_task_lock);
1398 spin_lock_init(&dev->delayed_cmd_lock); 1428 spin_lock_init(&dev->delayed_cmd_lock);
@@ -1400,6 +1430,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1400 spin_lock_init(&dev->se_port_lock); 1430 spin_lock_init(&dev->se_port_lock);
1401 spin_lock_init(&dev->se_tmr_lock); 1431 spin_lock_init(&dev->se_tmr_lock);
1402 spin_lock_init(&dev->qf_cmd_lock); 1432 spin_lock_init(&dev->qf_cmd_lock);
1433 sema_init(&dev->caw_sem, 1);
1403 atomic_set(&dev->dev_ordered_id, 0); 1434 atomic_set(&dev->dev_ordered_id, 0);
1404 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1435 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1405 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1436 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
@@ -1423,6 +1454,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1423 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1454 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1424 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1455 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1425 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1456 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1457 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1458 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1426 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1459 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1427 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1460 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1428 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1461 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1510,6 +1543,11 @@ int target_configure_device(struct se_device *dev)
1510 spin_lock(&hba->device_lock); 1543 spin_lock(&hba->device_lock);
1511 hba->dev_count++; 1544 hba->dev_count++;
1512 spin_unlock(&hba->device_lock); 1545 spin_unlock(&hba->device_lock);
1546
1547 mutex_lock(&g_device_mutex);
1548 list_add_tail(&dev->g_dev_node, &g_device_list);
1549 mutex_unlock(&g_device_mutex);
1550
1513 return 0; 1551 return 0;
1514 1552
1515out_free_alua: 1553out_free_alua:
@@ -1528,6 +1566,10 @@ void target_free_device(struct se_device *dev)
1528 if (dev->dev_flags & DF_CONFIGURED) { 1566 if (dev->dev_flags & DF_CONFIGURED) {
1529 destroy_workqueue(dev->tmr_wq); 1567 destroy_workqueue(dev->tmr_wq);
1530 1568
1569 mutex_lock(&g_device_mutex);
1570 list_del(&dev->g_dev_node);
1571 mutex_unlock(&g_device_mutex);
1572
1531 spin_lock(&hba->device_lock); 1573 spin_lock(&hba->device_lock);
1532 hba->dev_count--; 1574 hba->dev_count--;
1533 spin_unlock(&hba->device_lock); 1575 spin_unlock(&hba->device_lock);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index eb56eb129563..3503996d7d10 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,7 +4,7 @@
4 * This file contains generic fabric module configfs infrastructure for 4 * This file contains generic fabric module configfs infrastructure for
5 * TCM v4.x code 5 * TCM v4.x code
6 * 6 *
7 * (c) Copyright 2010-2012 RisingTide Systems LLC. 7 * (c) Copyright 2010-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Nicholas A. Bellinger <nab@linux-iscsi.org>
10* 10*
@@ -189,9 +189,11 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
189 struct se_node_acl *se_nacl = lacl->se_lun_nacl; 189 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
190 struct se_portal_group *se_tpg = se_nacl->se_tpg; 190 struct se_portal_group *se_tpg = se_nacl->se_tpg;
191 unsigned long op; 191 unsigned long op;
192 int ret;
192 193
193 if (strict_strtoul(page, 0, &op)) 194 ret = kstrtoul(page, 0, &op);
194 return -EINVAL; 195 if (ret)
196 return ret;
195 197
196 if ((op != 1) && (op != 0)) 198 if ((op != 1) && (op != 0))
197 return -EINVAL; 199 return -EINVAL;
@@ -350,7 +352,10 @@ static struct config_group *target_fabric_make_mappedlun(
350 * Determine the Mapped LUN value. This is what the SCSI Initiator 352 * Determine the Mapped LUN value. This is what the SCSI Initiator
351 * Port will actually see. 353 * Port will actually see.
352 */ 354 */
353 if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { 355 ret = kstrtoul(buf + 4, 0, &mapped_lun);
356 if (ret)
357 goto out;
358 if (mapped_lun > UINT_MAX) {
354 ret = -EINVAL; 359 ret = -EINVAL;
355 goto out; 360 goto out;
356 } 361 }
@@ -875,7 +880,10 @@ static struct config_group *target_fabric_make_lun(
875 " \"lun_$LUN_NUMBER\"\n"); 880 " \"lun_$LUN_NUMBER\"\n");
876 return ERR_PTR(-EINVAL); 881 return ERR_PTR(-EINVAL);
877 } 882 }
878 if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) 883 errno = kstrtoul(name + 4, 0, &unpacked_lun);
884 if (errno)
885 return ERR_PTR(errno);
886 if (unpacked_lun > UINT_MAX)
879 return ERR_PTR(-EINVAL); 887 return ERR_PTR(-EINVAL);
880 888
881 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); 889 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 687b0b0a4aa6..0d1cf8b4f49f 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -4,7 +4,7 @@
4 * This file contains generic high level protocol identifier and PR 4 * This file contains generic high level protocol identifier and PR
5 * handlers for TCM fabric modules 5 * handlers for TCM fabric modules
6 * 6 *
7 * (c) Copyright 2010-2012 RisingTide Systems LLC. 7 * (c) Copyright 2010-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b11890d85120..b662f89dedac 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions 4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 * 5 *
6 * (c) Copyright 2005-2012 RisingTide Systems LLC. 6 * (c) Copyright 2005-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
@@ -547,11 +547,9 @@ fd_execute_unmap(struct se_cmd *cmd)
547} 547}
548 548
549static sense_reason_t 549static sense_reason_t
550fd_execute_rw(struct se_cmd *cmd) 550fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
551 enum dma_data_direction data_direction)
551{ 552{
552 struct scatterlist *sgl = cmd->t_data_sg;
553 u32 sgl_nents = cmd->t_data_nents;
554 enum dma_data_direction data_direction = cmd->data_direction;
555 struct se_device *dev = cmd->se_dev; 553 struct se_device *dev = cmd->se_dev;
556 int ret = 0; 554 int ret = 0;
557 555
@@ -635,10 +633,10 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
635 ret = -ENOMEM; 633 ret = -ENOMEM;
636 break; 634 break;
637 } 635 }
638 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 636 ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
639 kfree(arg_p); 637 kfree(arg_p);
640 if (ret < 0) { 638 if (ret < 0) {
641 pr_err("strict_strtoull() failed for" 639 pr_err("kstrtoull() failed for"
642 " fd_dev_size=\n"); 640 " fd_dev_size=\n");
643 goto out; 641 goto out;
644 } 642 }
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index d2616cd48f1e..a25051a37dd7 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains the TCM HBA Transport related functions. 4 * This file contains the TCM HBA Transport related functions.
5 * 5 *
6 * (c) Copyright 2003-2012 RisingTide Systems LLC. 6 * (c) Copyright 2003-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index aa1620abec6d..b9a3394fe479 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -4,7 +4,7 @@
4 * This file contains the Storage Engine <-> Linux BlockIO transport 4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions. 5 * specific functions.
6 * 6 *
7 * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 * (c) Copyright 2003-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
@@ -536,10 +536,10 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
536 ret = -ENOMEM; 536 ret = -ENOMEM;
537 break; 537 break;
538 } 538 }
539 ret = strict_strtoul(arg_p, 0, &tmp_readonly); 539 ret = kstrtoul(arg_p, 0, &tmp_readonly);
540 kfree(arg_p); 540 kfree(arg_p);
541 if (ret < 0) { 541 if (ret < 0) {
542 pr_err("strict_strtoul() failed for" 542 pr_err("kstrtoul() failed for"
543 " readonly=\n"); 543 " readonly=\n");
544 goto out; 544 goto out;
545 } 545 }
@@ -587,11 +587,9 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
587} 587}
588 588
589static sense_reason_t 589static sense_reason_t
590iblock_execute_rw(struct se_cmd *cmd) 590iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
591 enum dma_data_direction data_direction)
591{ 592{
592 struct scatterlist *sgl = cmd->t_data_sg;
593 u32 sgl_nents = cmd->t_data_nents;
594 enum dma_data_direction data_direction = cmd->data_direction;
595 struct se_device *dev = cmd->se_dev; 593 struct se_device *dev = cmd->se_dev;
596 struct iblock_req *ibr; 594 struct iblock_req *ibr;
597 struct bio *bio; 595 struct bio *bio;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18d49df4d0ac..579128abe3f5 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -33,6 +33,8 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
33int se_dev_set_emulate_tas(struct se_device *, int); 33int se_dev_set_emulate_tas(struct se_device *, int);
34int se_dev_set_emulate_tpu(struct se_device *, int); 34int se_dev_set_emulate_tpu(struct se_device *, int);
35int se_dev_set_emulate_tpws(struct se_device *, int); 35int se_dev_set_emulate_tpws(struct se_device *, int);
36int se_dev_set_emulate_caw(struct se_device *, int);
37int se_dev_set_emulate_3pc(struct se_device *, int);
36int se_dev_set_enforce_pr_isids(struct se_device *, int); 38int se_dev_set_enforce_pr_isids(struct se_device *, int);
37int se_dev_set_is_nonrot(struct se_device *, int); 39int se_dev_set_is_nonrot(struct se_device *, int);
38int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 40int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index bd78faf67c6b..d1ae4c5c3ffd 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4,7 +4,7 @@
4 * This file contains SPC-3 compliant persistent reservations and 4 * This file contains SPC-3 compliant persistent reservations and
5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1) 5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
6 * 6 *
7 * (c) Copyright 2009-2012 RisingTide Systems LLC. 7 * (c) Copyright 2009-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
@@ -1949,7 +1949,7 @@ static int __core_scsi3_write_aptpl_to_file(
1949 pr_debug("Error writing APTPL metadata file: %s\n", path); 1949 pr_debug("Error writing APTPL metadata file: %s\n", path);
1950 fput(file); 1950 fput(file);
1951 1951
1952 return ret ? -EIO : 0; 1952 return (ret < 0) ? -EIO : 0;
1953} 1953}
1954 1954
1955/* 1955/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e992b27aa090..551c96ca60ac 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
5 * 5 *
6 * (c) Copyright 2003-2012 RisingTide Systems LLC. 6 * (c) Copyright 2003-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
@@ -1050,9 +1050,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1050 req = blk_get_request(pdv->pdv_sd->request_queue, 1050 req = blk_get_request(pdv->pdv_sd->request_queue,
1051 (data_direction == DMA_TO_DEVICE), 1051 (data_direction == DMA_TO_DEVICE),
1052 GFP_KERNEL); 1052 GFP_KERNEL);
1053 if (!req || IS_ERR(req)) { 1053 if (!req) {
1054 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1054 pr_err("PSCSI: blk_get_request() failed\n");
1055 req ? IS_ERR(req) : -ENOMEM);
1056 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1055 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1057 goto fail; 1056 goto fail;
1058 } 1057 }
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 51127d15d5c5..131327ac7f5b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -4,7 +4,7 @@
4 * This file contains the Storage Engine <-> Ramdisk transport 4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions. 5 * specific functions.
6 * 6 *
7 * (c) Copyright 2003-2012 RisingTide Systems LLC. 7 * (c) Copyright 2003-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
@@ -280,11 +280,9 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
280} 280}
281 281
282static sense_reason_t 282static sense_reason_t
283rd_execute_rw(struct se_cmd *cmd) 283rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
284 enum dma_data_direction data_direction)
284{ 285{
285 struct scatterlist *sgl = cmd->t_data_sg;
286 u32 sgl_nents = cmd->t_data_nents;
287 enum dma_data_direction data_direction = cmd->data_direction;
288 struct se_device *se_dev = cmd->se_dev; 286 struct se_device *se_dev = cmd->se_dev;
289 struct rd_dev *dev = RD_DEV(se_dev); 287 struct rd_dev *dev = RD_DEV(se_dev);
290 struct rd_dev_sg_table *table; 288 struct rd_dev_sg_table *table;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8a462773d0c8..6c17295e8d7c 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SCSI Block Commands (SBC) parsing and emulation. 2 * SCSI Block Commands (SBC) parsing and emulation.
3 * 3 *
4 * (c) Copyright 2002-2012 RisingTide Systems LLC. 4 * (c) Copyright 2002-2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@kernel.org> 6 * Nicholas A. Bellinger <nab@kernel.org>
7 * 7 *
@@ -25,6 +25,7 @@
25#include <linux/ratelimit.h> 25#include <linux/ratelimit.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <scsi/scsi.h> 27#include <scsi/scsi.h>
28#include <scsi/scsi_tcq.h>
28 29
29#include <target/target_core_base.h> 30#include <target/target_core_base.h>
30#include <target/target_core_backend.h> 31#include <target/target_core_backend.h>
@@ -280,13 +281,13 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
280 return 0; 281 return 0;
281} 282}
282 283
283static void xdreadwrite_callback(struct se_cmd *cmd) 284static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
284{ 285{
285 unsigned char *buf, *addr; 286 unsigned char *buf, *addr;
286 struct scatterlist *sg; 287 struct scatterlist *sg;
287 unsigned int offset; 288 unsigned int offset;
288 int i; 289 sense_reason_t ret = TCM_NO_SENSE;
289 int count; 290 int i, count;
290 /* 291 /*
291 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 292 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
292 * 293 *
@@ -301,7 +302,7 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
301 buf = kmalloc(cmd->data_length, GFP_KERNEL); 302 buf = kmalloc(cmd->data_length, GFP_KERNEL);
302 if (!buf) { 303 if (!buf) {
303 pr_err("Unable to allocate xor_callback buf\n"); 304 pr_err("Unable to allocate xor_callback buf\n");
304 return; 305 return TCM_OUT_OF_RESOURCES;
305 } 306 }
306 /* 307 /*
307 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 308 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
@@ -320,8 +321,10 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
320 offset = 0; 321 offset = 0;
321 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 322 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
322 addr = kmap_atomic(sg_page(sg)); 323 addr = kmap_atomic(sg_page(sg));
323 if (!addr) 324 if (!addr) {
325 ret = TCM_OUT_OF_RESOURCES;
324 goto out; 326 goto out;
327 }
325 328
326 for (i = 0; i < sg->length; i++) 329 for (i = 0; i < sg->length; i++)
327 *(addr + sg->offset + i) ^= *(buf + offset + i); 330 *(addr + sg->offset + i) ^= *(buf + offset + i);
@@ -332,6 +335,193 @@ static void xdreadwrite_callback(struct se_cmd *cmd)
332 335
333out: 336out:
334 kfree(buf); 337 kfree(buf);
338 return ret;
339}
340
341static sense_reason_t
342sbc_execute_rw(struct se_cmd *cmd)
343{
344 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
345 cmd->data_direction);
346}
347
348static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{
350 struct se_device *dev = cmd->se_dev;
351
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
353 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission.
356 */
357 up(&dev->caw_sem);
358
359 return TCM_NO_SENSE;
360}
361
362static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{
364 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr;
367 struct sg_mapping_iter m;
368 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb;
370 unsigned int block_size = dev->dev_attrib.block_size;
371 unsigned int compare_len = (nlbas * block_size);
372 sense_reason_t ret = TCM_NO_SENSE;
373 int rc, i;
374
375 /*
376 * Handle early failure in transport_generic_request_failure(),
377 * which will not have taken ->caw_mutex yet..
378 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE;
381
382 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) {
384 pr_err("Unable to allocate compare_and_write buf\n");
385 ret = TCM_OUT_OF_RESOURCES;
386 goto out;
387 }
388
389 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
390 GFP_KERNEL);
391 if (!write_sg) {
392 pr_err("Unable to allocate compare_and_write sg\n");
393 ret = TCM_OUT_OF_RESOURCES;
394 goto out;
395 }
396 /*
397 * Setup verify and write data payloads from total NumberLBAs.
398 */
399 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
400 cmd->data_length);
401 if (!rc) {
402 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
403 ret = TCM_OUT_OF_RESOURCES;
404 goto out;
405 }
406 /*
407 * Compare against SCSI READ payload against verify payload
408 */
409 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
410 addr = (unsigned char *)kmap_atomic(sg_page(sg));
411 if (!addr) {
412 ret = TCM_OUT_OF_RESOURCES;
413 goto out;
414 }
415
416 len = min(sg->length, compare_len);
417
418 if (memcmp(addr, buf + offset, len)) {
419 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
420 addr, buf + offset);
421 kunmap_atomic(addr);
422 goto miscompare;
423 }
424 kunmap_atomic(addr);
425
426 offset += len;
427 compare_len -= len;
428 if (!compare_len)
429 break;
430 }
431
432 i = 0;
433 len = cmd->t_task_nolb * block_size;
434 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
435 /*
436 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
437 */
438 while (len) {
439 sg_miter_next(&m);
440
441 if (block_size < PAGE_SIZE) {
442 sg_set_page(&write_sg[i], m.page, block_size,
443 block_size);
444 } else {
445 sg_miter_next(&m);
446 sg_set_page(&write_sg[i], m.page, block_size,
447 0);
448 }
449 len -= block_size;
450 i++;
451 }
452 sg_miter_stop(&m);
453 /*
454 * Save the original SGL + nents values before updating to new
455 * assignments, to be released in transport_free_pages() ->
456 * transport_reset_sgl_orig()
457 */
458 cmd->t_data_sg_orig = cmd->t_data_sg;
459 cmd->t_data_sg = write_sg;
460 cmd->t_data_nents_orig = cmd->t_data_nents;
461 cmd->t_data_nents = 1;
462
463 cmd->sam_task_attr = MSG_HEAD_TAG;
464 cmd->transport_complete_callback = compare_and_write_post;
465 /*
466 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
467 * for submitting the adjusted SGL to write instance user-data.
468 */
469 cmd->execute_cmd = sbc_execute_rw;
470
471 spin_lock_irq(&cmd->t_state_lock);
472 cmd->t_state = TRANSPORT_PROCESSING;
473 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
474 spin_unlock_irq(&cmd->t_state_lock);
475
476 __target_execute_cmd(cmd);
477
478 kfree(buf);
479 return ret;
480
481miscompare:
482 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
483 dev->transport->name);
484 ret = TCM_MISCOMPARE_VERIFY;
485out:
486 /*
487 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
488 * sbc_compare_and_write() before the original READ I/O submission.
489 */
490 up(&dev->caw_sem);
491 kfree(write_sg);
492 kfree(buf);
493 return ret;
494}
495
496static sense_reason_t
497sbc_compare_and_write(struct se_cmd *cmd)
498{
499 struct se_device *dev = cmd->se_dev;
500 sense_reason_t ret;
501 int rc;
502 /*
503 * Submit the READ first for COMPARE_AND_WRITE to perform the
504 * comparision using SGLs at cmd->t_bidi_data_sg..
505 */
506 rc = down_interruptible(&dev->caw_sem);
507 if ((rc != 0) || signal_pending(current)) {
508 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 }
511
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE);
514 if (ret) {
515 cmd->transport_complete_callback = NULL;
516 up(&dev->caw_sem);
517 return ret;
518 }
519 /*
520 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
521 * upon MISCOMPARE, or in compare_and_write_done() upon completion
522 * of WRITE instance user-data.
523 */
524 return TCM_NO_SENSE;
335} 525}
336 526
337sense_reason_t 527sense_reason_t
@@ -348,31 +538,36 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
348 sectors = transport_get_sectors_6(cdb); 538 sectors = transport_get_sectors_6(cdb);
349 cmd->t_task_lba = transport_lba_21(cdb); 539 cmd->t_task_lba = transport_lba_21(cdb);
350 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 540 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
351 cmd->execute_cmd = ops->execute_rw; 541 cmd->execute_rw = ops->execute_rw;
542 cmd->execute_cmd = sbc_execute_rw;
352 break; 543 break;
353 case READ_10: 544 case READ_10:
354 sectors = transport_get_sectors_10(cdb); 545 sectors = transport_get_sectors_10(cdb);
355 cmd->t_task_lba = transport_lba_32(cdb); 546 cmd->t_task_lba = transport_lba_32(cdb);
356 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 547 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
357 cmd->execute_cmd = ops->execute_rw; 548 cmd->execute_rw = ops->execute_rw;
549 cmd->execute_cmd = sbc_execute_rw;
358 break; 550 break;
359 case READ_12: 551 case READ_12:
360 sectors = transport_get_sectors_12(cdb); 552 sectors = transport_get_sectors_12(cdb);
361 cmd->t_task_lba = transport_lba_32(cdb); 553 cmd->t_task_lba = transport_lba_32(cdb);
362 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 554 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
363 cmd->execute_cmd = ops->execute_rw; 555 cmd->execute_rw = ops->execute_rw;
556 cmd->execute_cmd = sbc_execute_rw;
364 break; 557 break;
365 case READ_16: 558 case READ_16:
366 sectors = transport_get_sectors_16(cdb); 559 sectors = transport_get_sectors_16(cdb);
367 cmd->t_task_lba = transport_lba_64(cdb); 560 cmd->t_task_lba = transport_lba_64(cdb);
368 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 561 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
369 cmd->execute_cmd = ops->execute_rw; 562 cmd->execute_rw = ops->execute_rw;
563 cmd->execute_cmd = sbc_execute_rw;
370 break; 564 break;
371 case WRITE_6: 565 case WRITE_6:
372 sectors = transport_get_sectors_6(cdb); 566 sectors = transport_get_sectors_6(cdb);
373 cmd->t_task_lba = transport_lba_21(cdb); 567 cmd->t_task_lba = transport_lba_21(cdb);
374 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 568 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
375 cmd->execute_cmd = ops->execute_rw; 569 cmd->execute_rw = ops->execute_rw;
570 cmd->execute_cmd = sbc_execute_rw;
376 break; 571 break;
377 case WRITE_10: 572 case WRITE_10:
378 case WRITE_VERIFY: 573 case WRITE_VERIFY:
@@ -381,7 +576,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
381 if (cdb[1] & 0x8) 576 if (cdb[1] & 0x8)
382 cmd->se_cmd_flags |= SCF_FUA; 577 cmd->se_cmd_flags |= SCF_FUA;
383 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 578 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
384 cmd->execute_cmd = ops->execute_rw; 579 cmd->execute_rw = ops->execute_rw;
580 cmd->execute_cmd = sbc_execute_rw;
385 break; 581 break;
386 case WRITE_12: 582 case WRITE_12:
387 sectors = transport_get_sectors_12(cdb); 583 sectors = transport_get_sectors_12(cdb);
@@ -389,7 +585,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
389 if (cdb[1] & 0x8) 585 if (cdb[1] & 0x8)
390 cmd->se_cmd_flags |= SCF_FUA; 586 cmd->se_cmd_flags |= SCF_FUA;
391 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 587 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
392 cmd->execute_cmd = ops->execute_rw; 588 cmd->execute_rw = ops->execute_rw;
589 cmd->execute_cmd = sbc_execute_rw;
393 break; 590 break;
394 case WRITE_16: 591 case WRITE_16:
395 sectors = transport_get_sectors_16(cdb); 592 sectors = transport_get_sectors_16(cdb);
@@ -397,7 +594,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
397 if (cdb[1] & 0x8) 594 if (cdb[1] & 0x8)
398 cmd->se_cmd_flags |= SCF_FUA; 595 cmd->se_cmd_flags |= SCF_FUA;
399 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 596 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
400 cmd->execute_cmd = ops->execute_rw; 597 cmd->execute_rw = ops->execute_rw;
598 cmd->execute_cmd = sbc_execute_rw;
401 break; 599 break;
402 case XDWRITEREAD_10: 600 case XDWRITEREAD_10:
403 if (cmd->data_direction != DMA_TO_DEVICE || 601 if (cmd->data_direction != DMA_TO_DEVICE ||
@@ -411,7 +609,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
411 /* 609 /*
412 * Setup BIDI XOR callback to be run after I/O completion. 610 * Setup BIDI XOR callback to be run after I/O completion.
413 */ 611 */
414 cmd->execute_cmd = ops->execute_rw; 612 cmd->execute_rw = ops->execute_rw;
613 cmd->execute_cmd = sbc_execute_rw;
415 cmd->transport_complete_callback = &xdreadwrite_callback; 614 cmd->transport_complete_callback = &xdreadwrite_callback;
416 if (cdb[1] & 0x8) 615 if (cdb[1] & 0x8)
417 cmd->se_cmd_flags |= SCF_FUA; 616 cmd->se_cmd_flags |= SCF_FUA;
@@ -434,7 +633,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
434 * Setup BIDI XOR callback to be run during after I/O 633 * Setup BIDI XOR callback to be run during after I/O
435 * completion. 634 * completion.
436 */ 635 */
437 cmd->execute_cmd = ops->execute_rw; 636 cmd->execute_rw = ops->execute_rw;
637 cmd->execute_cmd = sbc_execute_rw;
438 cmd->transport_complete_callback = &xdreadwrite_callback; 638 cmd->transport_complete_callback = &xdreadwrite_callback;
439 if (cdb[1] & 0x8) 639 if (cdb[1] & 0x8)
440 cmd->se_cmd_flags |= SCF_FUA; 640 cmd->se_cmd_flags |= SCF_FUA;
@@ -461,6 +661,28 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
461 } 661 }
462 break; 662 break;
463 } 663 }
664 case COMPARE_AND_WRITE:
665 sectors = cdb[13];
666 /*
667 * Currently enforce COMPARE_AND_WRITE for a single sector
668 */
669 if (sectors > 1) {
670 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
671 " than 1\n", sectors);
672 return TCM_INVALID_CDB_FIELD;
673 }
674 /*
675 * Double size because we have two buffers, note that
676 * zero is not an error..
677 */
678 size = 2 * sbc_get_size(cmd, sectors);
679 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
680 cmd->t_task_nolb = sectors;
681 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
682 cmd->execute_rw = ops->execute_rw;
683 cmd->execute_cmd = sbc_compare_and_write;
684 cmd->transport_complete_callback = compare_and_write_callback;
685 break;
464 case READ_CAPACITY: 686 case READ_CAPACITY:
465 size = READ_CAP_LEN; 687 size = READ_CAP_LEN;
466 cmd->execute_cmd = sbc_emulate_readcapacity; 688 cmd->execute_cmd = sbc_emulate_readcapacity;
@@ -600,7 +822,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
600 return TCM_ADDRESS_OUT_OF_RANGE; 822 return TCM_ADDRESS_OUT_OF_RANGE;
601 } 823 }
602 824
603 size = sbc_get_size(cmd, sectors); 825 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
826 size = sbc_get_size(cmd, sectors);
604 } 827 }
605 828
606 return target_cmd_size_check(cmd, size); 829 return target_cmd_size_check(cmd, size);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9fabbf7214cd..074539558a54 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SCSI Primary Commands (SPC) parsing and emulation. 2 * SCSI Primary Commands (SPC) parsing and emulation.
3 * 3 *
4 * (c) Copyright 2002-2012 RisingTide Systems LLC. 4 * (c) Copyright 2002-2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@kernel.org> 6 * Nicholas A. Bellinger <nab@kernel.org>
7 * 7 *
@@ -35,7 +35,7 @@
35#include "target_core_alua.h" 35#include "target_core_alua.h"
36#include "target_core_pr.h" 36#include "target_core_pr.h"
37#include "target_core_ua.h" 37#include "target_core_ua.h"
38 38#include "target_core_xcopy.h"
39 39
40static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 40static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41{ 41{
@@ -95,6 +95,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
95 */ 95 */
96 spc_fill_alua_data(lun->lun_sep, buf); 96 spc_fill_alua_data(lun->lun_sep, buf);
97 97
98 /*
99 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
100 */
101 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8;
103
98 buf[7] = 0x2; /* CmdQue=1 */ 104 buf[7] = 0x2; /* CmdQue=1 */
99 105
100 memcpy(&buf[8], "LIO-ORG ", 8); 106 memcpy(&buf[8], "LIO-ORG ", 8);
@@ -129,8 +135,8 @@ spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
129 return 0; 135 return 0;
130} 136}
131 137
132static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 138void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
133 unsigned char *buf) 139 unsigned char *buf)
134{ 140{
135 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 141 unsigned char *p = &dev->t10_wwn.unit_serial[0];
136 int cnt; 142 int cnt;
@@ -460,6 +466,11 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
460 466
461 /* Set WSNZ to 1 */ 467 /* Set WSNZ to 1 */
462 buf[4] = 0x01; 468 buf[4] = 0x01;
469 /*
470 * Set MAXIMUM COMPARE AND WRITE LENGTH
471 */
472 if (dev->dev_attrib.emulate_caw)
473 buf[5] = 0x01;
463 474
464 /* 475 /*
465 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 476 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
@@ -1250,8 +1261,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1250 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1261 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1251 break; 1262 break;
1252 case EXTENDED_COPY: 1263 case EXTENDED_COPY:
1253 case READ_ATTRIBUTE: 1264 *size = get_unaligned_be32(&cdb[10]);
1265 cmd->execute_cmd = target_do_xcopy;
1266 break;
1254 case RECEIVE_COPY_RESULTS: 1267 case RECEIVE_COPY_RESULTS:
1268 *size = get_unaligned_be32(&cdb[10]);
1269 cmd->execute_cmd = target_do_receive_copy_results;
1270 break;
1271 case READ_ATTRIBUTE:
1255 case WRITE_ATTRIBUTE: 1272 case WRITE_ATTRIBUTE:
1256 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1273 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1257 (cdb[12] << 8) | cdb[13]; 1274 (cdb[12] << 8) | cdb[13];
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index d154ce797180..9c642e02cba1 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -4,7 +4,7 @@
4 * Modern ConfigFS group context specific statistics based on original 4 * Modern ConfigFS group context specific statistics based on original
5 * target_core_mib.c code 5 * target_core_mib.c code
6 * 6 *
7 * (c) Copyright 2006-2012 RisingTide Systems LLC. 7 * (c) Copyright 2006-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 0d7cacb91107..250009909d49 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains SPC-3 task management infrastructure 4 * This file contains SPC-3 task management infrastructure
5 * 5 *
6 * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 * (c) Copyright 2009-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index aac9d2727e3c..b9a6ec0aa5fe 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains generic Target Portal Group related functions. 4 * This file contains generic Target Portal Group related functions.
5 * 5 *
6 * (c) Copyright 2002-2012 RisingTide Systems LLC. 6 * (c) Copyright 2002-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d8e49d79f8cc..84747cc1aac0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains the Generic Target Engine Core. 4 * This file contains the Generic Target Engine Core.
5 * 5 *
6 * (c) Copyright 2002-2012 RisingTide Systems LLC. 6 * (c) Copyright 2002-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
@@ -67,7 +67,6 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
67static void transport_complete_task_attr(struct se_cmd *cmd); 67static void transport_complete_task_attr(struct se_cmd *cmd);
68static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
69 struct se_device *dev); 69 struct se_device *dev);
70static int transport_generic_get_mem(struct se_cmd *cmd);
71static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
72static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
73 72
@@ -232,6 +231,50 @@ struct se_session *transport_init_session(void)
232} 231}
233EXPORT_SYMBOL(transport_init_session); 232EXPORT_SYMBOL(transport_init_session);
234 233
234int transport_alloc_session_tags(struct se_session *se_sess,
235 unsigned int tag_num, unsigned int tag_size)
236{
237 int rc;
238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL);
240 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
242 return -ENOMEM;
243 }
244
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM;
252 }
253
254 return 0;
255}
256EXPORT_SYMBOL(transport_alloc_session_tags);
257
258struct se_session *transport_init_session_tags(unsigned int tag_num,
259 unsigned int tag_size)
260{
261 struct se_session *se_sess;
262 int rc;
263
264 se_sess = transport_init_session();
265 if (IS_ERR(se_sess))
266 return se_sess;
267
268 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
269 if (rc < 0) {
270 transport_free_session(se_sess);
271 return ERR_PTR(-ENOMEM);
272 }
273
274 return se_sess;
275}
276EXPORT_SYMBOL(transport_init_session_tags);
277
235/* 278/*
236 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 279 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
237 */ 280 */
@@ -367,6 +410,10 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
367 410
368void transport_free_session(struct se_session *se_sess) 411void transport_free_session(struct se_session *se_sess)
369{ 412{
413 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map);
416 }
370 kmem_cache_free(se_sess_cache, se_sess); 417 kmem_cache_free(se_sess_cache, se_sess);
371} 418}
372EXPORT_SYMBOL(transport_free_session); 419EXPORT_SYMBOL(transport_free_session);
@@ -1206,7 +1253,7 @@ int transport_handle_cdb_direct(
1206} 1253}
1207EXPORT_SYMBOL(transport_handle_cdb_direct); 1254EXPORT_SYMBOL(transport_handle_cdb_direct);
1208 1255
1209static sense_reason_t 1256sense_reason_t
1210transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1257transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1211 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1258 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1212{ 1259{
@@ -1512,6 +1559,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1512 * For SAM Task Attribute emulation for failed struct se_cmd 1559 * For SAM Task Attribute emulation for failed struct se_cmd
1513 */ 1560 */
1514 transport_complete_task_attr(cmd); 1561 transport_complete_task_attr(cmd);
1562 /*
1563 * Handle special case for COMPARE_AND_WRITE failure, where the
1564 * callback is expected to drop the per device ->caw_mutex.
1565 */
1566 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1567 cmd->transport_complete_callback)
1568 cmd->transport_complete_callback(cmd);
1515 1569
1516 switch (sense_reason) { 1570 switch (sense_reason) {
1517 case TCM_NON_EXISTENT_LUN: 1571 case TCM_NON_EXISTENT_LUN:
@@ -1579,7 +1633,7 @@ queue_full:
1579} 1633}
1580EXPORT_SYMBOL(transport_generic_request_failure); 1634EXPORT_SYMBOL(transport_generic_request_failure);
1581 1635
1582static void __target_execute_cmd(struct se_cmd *cmd) 1636void __target_execute_cmd(struct se_cmd *cmd)
1583{ 1637{
1584 sense_reason_t ret; 1638 sense_reason_t ret;
1585 1639
@@ -1784,7 +1838,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1784 ret = cmd->se_tfo->queue_data_in(cmd); 1838 ret = cmd->se_tfo->queue_data_in(cmd);
1785 break; 1839 break;
1786 case DMA_TO_DEVICE: 1840 case DMA_TO_DEVICE:
1787 if (cmd->t_bidi_data_sg) { 1841 if (cmd->se_cmd_flags & SCF_BIDI) {
1788 ret = cmd->se_tfo->queue_data_in(cmd); 1842 ret = cmd->se_tfo->queue_data_in(cmd);
1789 if (ret < 0) 1843 if (ret < 0)
1790 break; 1844 break;
@@ -1856,10 +1910,25 @@ static void target_complete_ok_work(struct work_struct *work)
1856 } 1910 }
1857 /* 1911 /*
1858 * Check for a callback, used by amongst other things 1912 * Check for a callback, used by amongst other things
1859 * XDWRITE_READ_10 emulation. 1913 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
1860 */ 1914 */
1861 if (cmd->transport_complete_callback) 1915 if (cmd->transport_complete_callback) {
1862 cmd->transport_complete_callback(cmd); 1916 sense_reason_t rc;
1917
1918 rc = cmd->transport_complete_callback(cmd);
1919 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
1920 return;
1921 } else if (rc) {
1922 ret = transport_send_check_condition_and_sense(cmd,
1923 rc, 0);
1924 if (ret == -EAGAIN || ret == -ENOMEM)
1925 goto queue_full;
1926
1927 transport_lun_remove_cmd(cmd);
1928 transport_cmd_check_stop_to_fabric(cmd);
1929 return;
1930 }
1931 }
1863 1932
1864 switch (cmd->data_direction) { 1933 switch (cmd->data_direction) {
1865 case DMA_FROM_DEVICE: 1934 case DMA_FROM_DEVICE:
@@ -1885,7 +1954,7 @@ static void target_complete_ok_work(struct work_struct *work)
1885 /* 1954 /*
1886 * Check if we need to send READ payload for BIDI-COMMAND 1955 * Check if we need to send READ payload for BIDI-COMMAND
1887 */ 1956 */
1888 if (cmd->t_bidi_data_sg) { 1957 if (cmd->se_cmd_flags & SCF_BIDI) {
1889 spin_lock(&cmd->se_lun->lun_sep_lock); 1958 spin_lock(&cmd->se_lun->lun_sep_lock);
1890 if (cmd->se_lun->lun_sep) { 1959 if (cmd->se_lun->lun_sep) {
1891 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1960 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -1930,10 +1999,29 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
1930 kfree(sgl); 1999 kfree(sgl);
1931} 2000}
1932 2001
2002static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2003{
2004 /*
2005 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2006 * emulation, and free + reset pointers if necessary..
2007 */
2008 if (!cmd->t_data_sg_orig)
2009 return;
2010
2011 kfree(cmd->t_data_sg);
2012 cmd->t_data_sg = cmd->t_data_sg_orig;
2013 cmd->t_data_sg_orig = NULL;
2014 cmd->t_data_nents = cmd->t_data_nents_orig;
2015 cmd->t_data_nents_orig = 0;
2016}
2017
1933static inline void transport_free_pages(struct se_cmd *cmd) 2018static inline void transport_free_pages(struct se_cmd *cmd)
1934{ 2019{
1935 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 2020 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2021 transport_reset_sgl_orig(cmd);
1936 return; 2022 return;
2023 }
2024 transport_reset_sgl_orig(cmd);
1937 2025
1938 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2026 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
1939 cmd->t_data_sg = NULL; 2027 cmd->t_data_sg = NULL;
@@ -2029,24 +2117,22 @@ void transport_kunmap_data_sg(struct se_cmd *cmd)
2029} 2117}
2030EXPORT_SYMBOL(transport_kunmap_data_sg); 2118EXPORT_SYMBOL(transport_kunmap_data_sg);
2031 2119
2032static int 2120int
2033transport_generic_get_mem(struct se_cmd *cmd) 2121target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2122 bool zero_page)
2034{ 2123{
2035 u32 length = cmd->data_length; 2124 struct scatterlist *sg;
2036 unsigned int nents;
2037 struct page *page; 2125 struct page *page;
2038 gfp_t zero_flag; 2126 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2127 unsigned int nent;
2039 int i = 0; 2128 int i = 0;
2040 2129
2041 nents = DIV_ROUND_UP(length, PAGE_SIZE); 2130 nent = DIV_ROUND_UP(length, PAGE_SIZE);
2042 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 2131 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
2043 if (!cmd->t_data_sg) 2132 if (!sg)
2044 return -ENOMEM; 2133 return -ENOMEM;
2045 2134
2046 cmd->t_data_nents = nents; 2135 sg_init_table(sg, nent);
2047 sg_init_table(cmd->t_data_sg, nents);
2048
2049 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
2050 2136
2051 while (length) { 2137 while (length) {
2052 u32 page_len = min_t(u32, length, PAGE_SIZE); 2138 u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -2054,19 +2140,20 @@ transport_generic_get_mem(struct se_cmd *cmd)
2054 if (!page) 2140 if (!page)
2055 goto out; 2141 goto out;
2056 2142
2057 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 2143 sg_set_page(&sg[i], page, page_len, 0);
2058 length -= page_len; 2144 length -= page_len;
2059 i++; 2145 i++;
2060 } 2146 }
2147 *sgl = sg;
2148 *nents = nent;
2061 return 0; 2149 return 0;
2062 2150
2063out: 2151out:
2064 while (i > 0) { 2152 while (i > 0) {
2065 i--; 2153 i--;
2066 __free_page(sg_page(&cmd->t_data_sg[i])); 2154 __free_page(sg_page(&sg[i]));
2067 } 2155 }
2068 kfree(cmd->t_data_sg); 2156 kfree(sg);
2069 cmd->t_data_sg = NULL;
2070 return -ENOMEM; 2157 return -ENOMEM;
2071} 2158}
2072 2159
@@ -2087,7 +2174,27 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2087 */ 2174 */
2088 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2175 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2089 cmd->data_length) { 2176 cmd->data_length) {
2090 ret = transport_generic_get_mem(cmd); 2177 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2178
2179 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2180 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2181 u32 bidi_length;
2182
2183 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2184 bidi_length = cmd->t_task_nolb *
2185 cmd->se_dev->dev_attrib.block_size;
2186 else
2187 bidi_length = cmd->data_length;
2188
2189 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2190 &cmd->t_bidi_data_nents,
2191 bidi_length, zero_flag);
2192 if (ret < 0)
2193 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2194 }
2195
2196 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2197 cmd->data_length, zero_flag);
2091 if (ret < 0) 2198 if (ret < 0)
2092 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2199 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2093 } 2200 }
@@ -2740,6 +2847,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2740 buffer[SPC_ASC_KEY_OFFSET] = asc; 2847 buffer[SPC_ASC_KEY_OFFSET] = asc;
2741 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2848 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2742 break; 2849 break;
2850 case TCM_MISCOMPARE_VERIFY:
2851 /* CURRENT ERROR */
2852 buffer[0] = 0x70;
2853 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2854 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
2855 /* MISCOMPARE DURING VERIFY OPERATION */
2856 buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
2857 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
2858 break;
2743 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2859 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2744 default: 2860 default:
2745 /* CURRENT ERROR */ 2861 /* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index bf0e390ce2d7..b04467e7547c 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file contains logic for SPC-3 Unit Attention emulation 4 * This file contains logic for SPC-3 Unit Attention emulation
5 * 5 *
6 * (c) Copyright 2009-2012 RisingTide Systems LLC. 6 * (c) Copyright 2009-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 000000000000..4d22e7d2adca
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1081 @@
1/*******************************************************************************
2 * Filename: target_core_xcopy.c
3 *
4 * This file contains support for SPC-4 Extended-Copy offload with generic
5 * TCM backends.
6 *
7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
8 *
9 * Author:
10 * Nicholas A. Bellinger <nab@daterainc.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ******************************************************************************/
23
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/list.h>
27#include <linux/configfs.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <asm/unaligned.h>
31
32#include <target/target_core_base.h>
33#include <target/target_core_backend.h>
34#include <target/target_core_fabric.h>
35#include <target/target_core_configfs.h>
36
37#include "target_core_pr.h"
38#include "target_core_ua.h"
39#include "target_core_xcopy.h"
40
41static struct workqueue_struct *xcopy_wq = NULL;
42/*
43 * From target_core_spc.c
44 */
45extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
46/*
47 * From target_core_device.c
48 */
49extern struct mutex g_device_mutex;
50extern struct list_head g_device_list;
51/*
52 * From target_core_configfs.c
53 */
54extern struct configfs_subsystem *target_core_subsystem[];
55
56static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
57{
58 int off = 0;
59
60 buf[off++] = (0x6 << 4);
61 buf[off++] = 0x01;
62 buf[off++] = 0x40;
63 buf[off] = (0x5 << 4);
64
65 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
66 return 0;
67}
68
69static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
70 bool src)
71{
72 struct se_device *se_dev;
73 struct configfs_subsystem *subsys = target_core_subsystem[0];
74 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
75 int rc;
76
77 if (src == true)
78 dev_wwn = &xop->dst_tid_wwn[0];
79 else
80 dev_wwn = &xop->src_tid_wwn[0];
81
82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84
85 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
86 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
87
88 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
89 if (rc != 0)
90 continue;
91
92 if (src == true) {
93 xop->dst_dev = se_dev;
94 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
95 " se_dev\n", xop->dst_dev);
96 } else {
97 xop->src_dev = se_dev;
98 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
99 " se_dev\n", xop->src_dev);
100 }
101
102 rc = configfs_depend_item(subsys,
103 &se_dev->dev_group.cg_item);
104 if (rc != 0) {
105 pr_err("configfs_depend_item attempt failed:"
106 " %d for se_dev: %p\n", rc, se_dev);
107 mutex_unlock(&g_device_mutex);
108 return rc;
109 }
110
111 pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
112 " se_dev->se_dev_group: %p\n", subsys, se_dev,
113 &se_dev->dev_group);
114
115 mutex_unlock(&g_device_mutex);
116 return 0;
117 }
118 mutex_unlock(&g_device_mutex);
119
120 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
121 return -EINVAL;
122}
123
124static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
125 unsigned char *p, bool src)
126{
127 unsigned char *desc = p;
128 unsigned short ript;
129 u8 desig_len;
130 /*
131 * Extract RELATIVE INITIATOR PORT IDENTIFIER
132 */
133 ript = get_unaligned_be16(&desc[2]);
134 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
135 /*
136 * Check for supported code set, association, and designator type
137 */
138 if ((desc[4] & 0x0f) != 0x1) {
139 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
140 return -EINVAL;
141 }
142 if ((desc[5] & 0x30) != 0x00) {
143 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
144 return -EINVAL;
145 }
146 if ((desc[5] & 0x0f) != 0x3) {
147 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
148 (desc[5] & 0x0f));
149 return -EINVAL;
150 }
151 /*
152 * Check for matching 16 byte length for NAA IEEE Registered Extended
153 * Assigned designator
154 */
155 desig_len = desc[7];
156 if (desig_len != 16) {
157 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
158 return -EINVAL;
159 }
160 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
161 /*
162 * Check for NAA IEEE Registered Extended Assigned header..
163 */
164 if ((desc[8] & 0xf0) != 0x60) {
165 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
166 (desc[8] & 0xf0));
167 return -EINVAL;
168 }
169
170 if (src == true) {
171 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
172 /*
173 * Determine if the source designator matches the local device
174 */
175 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
176 XCOPY_NAA_IEEE_REGEX_LEN)) {
177 xop->op_origin = XCOL_SOURCE_RECV_OP;
178 xop->src_dev = se_cmd->se_dev;
179 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
180 " received xop\n", xop->src_dev);
181 }
182 } else {
183 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
184 /*
185 * Determine if the destination designator matches the local device
186 */
187 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
188 XCOPY_NAA_IEEE_REGEX_LEN)) {
189 xop->op_origin = XCOL_DEST_RECV_OP;
190 xop->dst_dev = se_cmd->se_dev;
191 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
192 " received xop\n", xop->dst_dev);
193 }
194 }
195
196 return 0;
197}
198
199static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
200 struct xcopy_op *xop, unsigned char *p,
201 unsigned short tdll)
202{
203 struct se_device *local_dev = se_cmd->se_dev;
204 unsigned char *desc = p;
205 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
206 unsigned short start = 0;
207 bool src = true;
208
209 if (offset != 0) {
210 pr_err("XCOPY target descriptor list length is not"
211 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
212 return -EINVAL;
213 }
214 if (tdll > 64) {
215 pr_err("XCOPY target descriptor supports a maximum"
216 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
217 return -EINVAL;
218 }
219 /*
220 * Generate an IEEE Registered Extended designator based upon the
221 * se_device the XCOPY was received upon..
222 */
223 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
224 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
225
226 while (start < tdll) {
227 /*
228 * Check target descriptor identification with 0xE4 type with
229 * use VPD 0x83 WWPN matching ..
230 */
231 switch (desc[0]) {
232 case 0xe4:
233 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
234 &desc[0], src);
235 if (rc != 0)
236 goto out;
237 /*
238 * Assume target descriptors are in source -> destination order..
239 */
240 if (src == true)
241 src = false;
242 else
243 src = true;
244 start += XCOPY_TARGET_DESC_LEN;
245 desc += XCOPY_TARGET_DESC_LEN;
246 ret++;
247 break;
248 default:
249 pr_err("XCOPY unsupported descriptor type code:"
250 " 0x%02x\n", desc[0]);
251 goto out;
252 }
253 }
254
255 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
256 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
257 else
258 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
259
260 if (rc < 0)
261 goto out;
262
263 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
264 xop->src_dev, &xop->src_tid_wwn[0]);
265 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
266 xop->dst_dev, &xop->dst_tid_wwn[0]);
267
268 return ret;
269
270out:
271 return -EINVAL;
272}
273
274static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
275 unsigned char *p)
276{
277 unsigned char *desc = p;
278 int dc = (desc[1] & 0x02);
279 unsigned short desc_len;
280
281 desc_len = get_unaligned_be16(&desc[2]);
282 if (desc_len != 0x18) {
283 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
284 " %hu\n", desc_len);
285 return -EINVAL;
286 }
287
288 xop->stdi = get_unaligned_be16(&desc[4]);
289 xop->dtdi = get_unaligned_be16(&desc[6]);
290 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
291 desc_len, xop->stdi, xop->dtdi, dc);
292
293 xop->nolb = get_unaligned_be16(&desc[10]);
294 xop->src_lba = get_unaligned_be64(&desc[12]);
295 xop->dst_lba = get_unaligned_be64(&desc[20]);
296 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
297 xop->nolb, (unsigned long long)xop->src_lba,
298 (unsigned long long)xop->dst_lba);
299
300 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff;
302 xop->dbl |= (desc[30] << 8) & 0xff;
303 xop->dbl |= desc[31] & 0xff;
304
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
306 }
307 return 0;
308}
309
310static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
311 struct xcopy_op *xop, unsigned char *p,
312 unsigned int sdll)
313{
314 unsigned char *desc = p;
315 unsigned int start = 0;
316 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
317
318 if (offset != 0) {
319 pr_err("XCOPY segment descriptor list length is not"
320 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
321 return -EINVAL;
322 }
323
324 while (start < sdll) {
325 /*
326 * Check segment descriptor type code for block -> block
327 */
328 switch (desc[0]) {
329 case 0x02:
330 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
331 if (rc < 0)
332 goto out;
333
334 ret++;
335 start += XCOPY_SEGMENT_DESC_LEN;
336 desc += XCOPY_SEGMENT_DESC_LEN;
337 break;
338 default:
339 pr_err("XCOPY unspported segment descriptor"
340 "type: 0x%02x\n", desc[0]);
341 goto out;
342 }
343 }
344
345 return ret;
346
347out:
348 return -EINVAL;
349}
350
351/*
352 * Start xcopy_pt ops
353 */
354
355struct xcopy_pt_cmd {
356 bool remote_port;
357 struct se_cmd se_cmd;
358 struct xcopy_op *xcopy_op;
359 struct completion xpt_passthrough_sem;
360};
361
362static struct se_port xcopy_pt_port;
363static struct se_portal_group xcopy_pt_tpg;
364static struct se_session xcopy_pt_sess;
365static struct se_node_acl xcopy_pt_nacl;
366
367static char *xcopy_pt_get_fabric_name(void)
368{
369 return "xcopy-pt";
370}
371
372static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
373{
374 return 0;
375}
376
377static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
378{
379 return 0;
380}
381
382static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
383{
384 struct configfs_subsystem *subsys = target_core_subsystem[0];
385 struct se_device *remote_dev;
386
387 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
388 remote_dev = xop->dst_dev;
389 else
390 remote_dev = xop->src_dev;
391
392 pr_debug("Calling configfs_undepend_item for subsys: %p"
393 " remote_dev: %p remote_dev->dev_group: %p\n",
394 subsys, remote_dev, &remote_dev->dev_group.cg_item);
395
396 configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
397}
398
399static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
400{
401 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
402 struct xcopy_pt_cmd, se_cmd);
403
404 if (xpt_cmd->remote_port)
405 kfree(se_cmd->se_lun);
406
407 kfree(xpt_cmd);
408}
409
410static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
411{
412 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
413 struct xcopy_pt_cmd, se_cmd);
414
415 complete(&xpt_cmd->xpt_passthrough_sem);
416 return 0;
417}
418
419static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
420{
421 return 0;
422}
423
424static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
425{
426 return 0;
427}
428
429static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
430{
431 return 0;
432}
433
434static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
435{
436 return 0;
437}
438
439static struct target_core_fabric_ops xcopy_pt_tfo = {
440 .get_fabric_name = xcopy_pt_get_fabric_name,
441 .get_task_tag = xcopy_pt_get_tag,
442 .get_cmd_state = xcopy_pt_get_cmd_state,
443 .release_cmd = xcopy_pt_release_cmd,
444 .check_stop_free = xcopy_pt_check_stop_free,
445 .write_pending = xcopy_pt_write_pending,
446 .write_pending_status = xcopy_pt_write_pending_status,
447 .queue_data_in = xcopy_pt_queue_data_in,
448 .queue_status = xcopy_pt_queue_status,
449};
450
451/*
452 * End xcopy_pt_ops
453 */
454
455int target_xcopy_setup_pt(void)
456{
457 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
458 if (!xcopy_wq) {
459 pr_err("Unable to allocate xcopy_wq\n");
460 return -ENOMEM;
461 }
462
463 memset(&xcopy_pt_port, 0, sizeof(struct se_port));
464 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
465 INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
466 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
467
468 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
469 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
470 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
471 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
472
473 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
474 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
475
476 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
477 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
478 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
479 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
480 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
481 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
482
483 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
484 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
485
486 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
487 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
488
489 return 0;
490}
491
492void target_xcopy_release_pt(void)
493{
494 if (xcopy_wq)
495 destroy_workqueue(xcopy_wq);
496}
497
498static void target_xcopy_setup_pt_port(
499 struct xcopy_pt_cmd *xpt_cmd,
500 struct xcopy_op *xop,
501 bool remote_port)
502{
503 struct se_cmd *ec_cmd = xop->xop_se_cmd;
504 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
505
506 if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
507 /*
508 * Honor destination port reservations for X-COPY PUSH emulation
509 * when CDB is received on local source port, and READs blocks to
510 * WRITE on remote destination port.
511 */
512 if (remote_port) {
513 xpt_cmd->remote_port = remote_port;
514 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
515 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
516 " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
517 pt_cmd->se_lun->lun_sep);
518 } else {
519 pt_cmd->se_lun = ec_cmd->se_lun;
520 pt_cmd->se_dev = ec_cmd->se_dev;
521
522 pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
523 " %p\n", pt_cmd->se_dev);
524 pt_cmd->se_lun = ec_cmd->se_lun;
525 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
526 pt_cmd->se_lun);
527 }
528 } else {
529 /*
530 * Honor source port reservation for X-COPY PULL emulation
531 * when CDB is received on local desintation port, and READs
532 * blocks from the remote source port to WRITE on local
533 * destination port.
534 */
535 if (remote_port) {
536 xpt_cmd->remote_port = remote_port;
537 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
538 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
539 " cmd->se_lun->lun_sep for X-COPY data PULL\n",
540 pt_cmd->se_lun->lun_sep);
541 } else {
542 pt_cmd->se_lun = ec_cmd->se_lun;
543 pt_cmd->se_dev = ec_cmd->se_dev;
544
545 pr_debug("Honoring local DST port from ec_cmd->se_dev:"
546 " %p\n", pt_cmd->se_dev);
547 pt_cmd->se_lun = ec_cmd->se_lun;
548 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
549 pt_cmd->se_lun);
550 }
551 }
552}
553
554static int target_xcopy_init_pt_lun(
555 struct xcopy_pt_cmd *xpt_cmd,
556 struct xcopy_op *xop,
557 struct se_device *se_dev,
558 struct se_cmd *pt_cmd,
559 bool remote_port)
560{
561 /*
562 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
563 * reservations. The pt_cmd->se_lun pointer will be setup from within
564 * target_xcopy_setup_pt_port()
565 */
566 if (remote_port == false) {
567 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
568 return 0;
569 }
570
571 pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
572 if (!pt_cmd->se_lun) {
573 pr_err("Unable to allocate pt_cmd->se_lun\n");
574 return -ENOMEM;
575 }
576 init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
577 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
578 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
579 spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
580 spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
581 spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
582
583 pt_cmd->se_dev = se_dev;
584
585 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
586 pt_cmd->se_lun->lun_se_dev = se_dev;
587 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
588
589 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
590 pt_cmd->se_lun->lun_se_dev);
591
592 return 0;
593}
594
595static int target_xcopy_setup_pt_cmd(
596 struct xcopy_pt_cmd *xpt_cmd,
597 struct xcopy_op *xop,
598 struct se_device *se_dev,
599 unsigned char *cdb,
600 bool remote_port,
601 bool alloc_mem)
602{
603 struct se_cmd *cmd = &xpt_cmd->se_cmd;
604 sense_reason_t sense_rc;
605 int ret = 0, rc;
606 /*
607 * Setup LUN+port to honor reservations based upon xop->op_origin for
608 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
609 */
610 rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
611 if (rc < 0) {
612 ret = rc;
613 goto out;
614 }
615 xpt_cmd->xcopy_op = xop;
616 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
617
618 sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
619 if (sense_rc) {
620 ret = -EINVAL;
621 goto out;
622 }
623
624 if (alloc_mem) {
625 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
626 cmd->data_length, false);
627 if (rc < 0) {
628 ret = rc;
629 goto out;
630 }
631 /*
632 * Set this bit so that transport_free_pages() allows the
633 * caller to release SGLs + physical memory allocated by
634 * transport_generic_get_mem()..
635 */
636 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
637 } else {
638 /*
639 * Here the previously allocated SGLs for the internal READ
640 * are mapped zero-copy to the internal WRITE.
641 */
642 sense_rc = transport_generic_map_mem_to_cmd(cmd,
643 xop->xop_data_sg, xop->xop_data_nents,
644 NULL, 0);
645 if (sense_rc) {
646 ret = -EINVAL;
647 goto out;
648 }
649
650 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
651 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
652 }
653
654 return 0;
655
656out:
657 if (remote_port == true)
658 kfree(cmd->se_lun);
659 return ret;
660}
661
662static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
663{
664 struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
665 sense_reason_t sense_rc;
666
667 sense_rc = transport_generic_new_cmd(se_cmd);
668 if (sense_rc)
669 return -EINVAL;
670
671 if (se_cmd->data_direction == DMA_TO_DEVICE)
672 target_execute_cmd(se_cmd);
673
674 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
675
676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
677 se_cmd->scsi_status);
678 return 0;
679}
680
681static int target_xcopy_read_source(
682 struct se_cmd *ec_cmd,
683 struct xcopy_op *xop,
684 struct se_device *src_dev,
685 sector_t src_lba,
686 u32 src_sectors)
687{
688 struct xcopy_pt_cmd *xpt_cmd;
689 struct se_cmd *se_cmd;
690 u32 length = (src_sectors * src_dev->dev_attrib.block_size);
691 int rc;
692 unsigned char cdb[16];
693 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
694
695 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
696 if (!xpt_cmd) {
697 pr_err("Unable to allocate xcopy_pt_cmd\n");
698 return -ENOMEM;
699 }
700 init_completion(&xpt_cmd->xpt_passthrough_sem);
701 se_cmd = &xpt_cmd->se_cmd;
702
703 memset(&cdb[0], 0, 16);
704 cdb[0] = READ_16;
705 put_unaligned_be64(src_lba, &cdb[2]);
706 put_unaligned_be32(src_sectors, &cdb[10]);
707 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
708 (unsigned long long)src_lba, src_sectors, length);
709
710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
711 DMA_FROM_DEVICE, 0, NULL);
712 xop->src_pt_cmd = xpt_cmd;
713
714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
715 remote_port, true);
716 if (rc < 0) {
717 transport_generic_free_cmd(se_cmd, 0);
718 return rc;
719 }
720
721 xop->xop_data_sg = se_cmd->t_data_sg;
722 xop->xop_data_nents = se_cmd->t_data_nents;
723 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
724 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
725
726 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
727 if (rc < 0) {
728 transport_generic_free_cmd(se_cmd, 0);
729 return rc;
730 }
731 /*
732 * Clear off the allocated t_data_sg, that has been saved for
733 * zero-copy WRITE submission reuse in struct xcopy_op..
734 */
735 se_cmd->t_data_sg = NULL;
736 se_cmd->t_data_nents = 0;
737
738 return 0;
739}
740
741static int target_xcopy_write_destination(
742 struct se_cmd *ec_cmd,
743 struct xcopy_op *xop,
744 struct se_device *dst_dev,
745 sector_t dst_lba,
746 u32 dst_sectors)
747{
748 struct xcopy_pt_cmd *xpt_cmd;
749 struct se_cmd *se_cmd;
750 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
751 int rc;
752 unsigned char cdb[16];
753 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
754
755 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
756 if (!xpt_cmd) {
757 pr_err("Unable to allocate xcopy_pt_cmd\n");
758 return -ENOMEM;
759 }
760 init_completion(&xpt_cmd->xpt_passthrough_sem);
761 se_cmd = &xpt_cmd->se_cmd;
762
763 memset(&cdb[0], 0, 16);
764 cdb[0] = WRITE_16;
765 put_unaligned_be64(dst_lba, &cdb[2]);
766 put_unaligned_be32(dst_sectors, &cdb[10]);
767 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
768 (unsigned long long)dst_lba, dst_sectors, length);
769
770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
771 DMA_TO_DEVICE, 0, NULL);
772 xop->dst_pt_cmd = xpt_cmd;
773
774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
775 remote_port, false);
776 if (rc < 0) {
777 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
778 /*
779 * If the failure happened before the t_mem_list hand-off in
780 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
781 * core releases this memory on error during X-COPY WRITE I/O.
782 */
783 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
784 src_cmd->t_data_sg = xop->xop_data_sg;
785 src_cmd->t_data_nents = xop->xop_data_nents;
786
787 transport_generic_free_cmd(se_cmd, 0);
788 return rc;
789 }
790
791 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
792 if (rc < 0) {
793 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
794 transport_generic_free_cmd(se_cmd, 0);
795 return rc;
796 }
797
798 return 0;
799}
800
801static void target_xcopy_do_work(struct work_struct *work)
802{
803 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
804 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
805 struct se_cmd *ec_cmd = xop->xop_se_cmd;
806 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
807 unsigned int max_sectors;
808 int rc;
809 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
810
811 end_lba = src_lba + nolb;
812 /*
813 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
814 * smallest max_sectors between src_dev + dev_dev, or
815 */
816 max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
817 dst_dev->dev_attrib.hw_max_sectors);
818 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
819
820 max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
821
822 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
823 nolb, max_nolb, (unsigned long long)end_lba);
824 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
825 (unsigned long long)src_lba, (unsigned long long)dst_lba);
826
827 while (src_lba < end_lba) {
828 cur_nolb = min(nolb, max_nolb);
829
830 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
831 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
832
833 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
834 if (rc < 0)
835 goto out;
836
837 src_lba += cur_nolb;
838 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
839 (unsigned long long)src_lba);
840
841 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
842 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
843
844 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
845 dst_lba, cur_nolb);
846 if (rc < 0) {
847 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
848 goto out;
849 }
850
851 dst_lba += cur_nolb;
852 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
853 (unsigned long long)dst_lba);
854
855 copied_nolb += cur_nolb;
856 nolb -= cur_nolb;
857
858 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
859 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
860
861 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
862 }
863
864 xcopy_pt_undepend_remotedev(xop);
865 kfree(xop);
866
867 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
868 (unsigned long long)src_lba, (unsigned long long)dst_lba);
869 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
870 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
871
872 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
873 target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
874 return;
875
876out:
877 xcopy_pt_undepend_remotedev(xop);
878 kfree(xop);
879
880 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
881 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
882 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
883}
884
885sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
886{
887 struct xcopy_op *xop = NULL;
888 unsigned char *p = NULL, *seg_desc;
889 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
890 int rc;
891 unsigned short tdll;
892
893 sa = se_cmd->t_task_cdb[1] & 0x1f;
894 if (sa != 0x00) {
895 pr_err("EXTENDED_COPY(LID4) not supported\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE;
897 }
898
899 p = transport_kmap_data_sg(se_cmd);
900 if (!p) {
901 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
902 return TCM_OUT_OF_RESOURCES;
903 }
904
905 list_id = p[0];
906 if (list_id != 0x00) {
907 pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
908 goto out;
909 }
910 list_id_usage = (p[1] & 0x18);
911 /*
912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
913 */
914 tdll = get_unaligned_be16(&p[2]);
915 sdll = get_unaligned_be32(&p[8]);
916
917 inline_dl = get_unaligned_be32(&p[12]);
918 if (inline_dl != 0) {
919 pr_err("XCOPY with non zero inline data length\n");
920 goto out;
921 }
922
923 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
924 if (!xop) {
925 pr_err("Unable to allocate xcopy_op\n");
926 goto out;
927 }
928 xop->xop_se_cmd = se_cmd;
929
930 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
931 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
932 tdll, sdll, inline_dl);
933
934 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
935 if (rc <= 0)
936 goto out;
937
938 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
939 rc * XCOPY_TARGET_DESC_LEN);
940 seg_desc = &p[16];
941 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
942
943 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
944 if (rc <= 0) {
945 xcopy_pt_undepend_remotedev(xop);
946 goto out;
947 }
948 transport_kunmap_data_sg(se_cmd);
949
950 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
951 rc * XCOPY_SEGMENT_DESC_LEN);
952 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
953 queue_work(xcopy_wq, &xop->xop_work);
954 return TCM_NO_SENSE;
955
956out:
957 if (p)
958 transport_kunmap_data_sg(se_cmd);
959 kfree(xop);
960 return TCM_INVALID_CDB_FIELD;
961}
962
963static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
964{
965 unsigned char *p;
966
967 p = transport_kmap_data_sg(se_cmd);
968 if (!p) {
969 pr_err("transport_kmap_data_sg failed in"
970 " target_rcr_operating_parameters\n");
971 return TCM_OUT_OF_RESOURCES;
972 }
973
974 if (se_cmd->data_length < 54) {
975 pr_err("Receive Copy Results Op Parameters length"
976 " too small: %u\n", se_cmd->data_length);
977 transport_kunmap_data_sg(se_cmd);
978 return TCM_INVALID_CDB_FIELD;
979 }
980 /*
981 * Set SNLID=1 (Supports no List ID)
982 */
983 p[4] = 0x1;
984 /*
985 * MAXIMUM TARGET DESCRIPTOR COUNT
986 */
987 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
988 /*
989 * MAXIMUM SEGMENT DESCRIPTOR COUNT
990 */
991 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
992 /*
993 * MAXIMUM DESCRIPTOR LIST LENGTH
994 */
995 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
996 /*
997 * MAXIMUM SEGMENT LENGTH
998 */
999 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
1000 /*
1001 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
1002 */
1003 put_unaligned_be32(0x0, &p[20]);
1004 /*
1005 * HELD DATA LIMIT
1006 */
1007 put_unaligned_be32(0x0, &p[24]);
1008 /*
1009 * MAXIMUM STREAM DEVICE TRANSFER SIZE
1010 */
1011 put_unaligned_be32(0x0, &p[28]);
1012 /*
1013 * TOTAL CONCURRENT COPIES
1014 */
1015 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
1016 /*
1017 * MAXIMUM CONCURRENT COPIES
1018 */
1019 p[36] = RCR_OP_MAX_CONCURR_COPIES;
1020 /*
1021 * DATA SEGMENT GRANULARITY (log 2)
1022 */
1023 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
1024 /*
1025 * INLINE DATA GRANULARITY log 2)
1026 */
1027 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
1028 /*
1029 * HELD DATA GRANULARITY
1030 */
1031 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1032 /*
1033 * IMPLEMENTED DESCRIPTOR LIST LENGTH
1034 */
1035 p[43] = 0x2;
1036 /*
1037 * List of implemented descriptor type codes (ordered)
1038 */
1039 p[44] = 0x02; /* Copy Block to Block device */
1040 p[45] = 0xe4; /* Identification descriptor target descriptor */
1041
1042 /*
1043 * AVAILABLE DATA (n-3)
1044 */
1045 put_unaligned_be32(42, &p[0]);
1046
1047 transport_kunmap_data_sg(se_cmd);
1048 target_complete_cmd(se_cmd, GOOD);
1049
1050 return TCM_NO_SENSE;
1051}
1052
1053sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1054{
1055 unsigned char *cdb = &se_cmd->t_task_cdb[0];
1056 int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1057 sense_reason_t rc = TCM_NO_SENSE;
1058
1059 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1060 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1061
1062 if (list_id != 0) {
1063 pr_err("Receive Copy Results with non zero list identifier"
1064 " not supported\n");
1065 return TCM_INVALID_CDB_FIELD;
1066 }
1067
1068 switch (sa) {
1069 case RCR_SA_OPERATING_PARAMETERS:
1070 rc = target_rcr_operating_parameters(se_cmd);
1071 break;
1072 case RCR_SA_COPY_STATUS:
1073 case RCR_SA_RECEIVE_DATA:
1074 case RCR_SA_FAILED_SEGMENT_DETAILS:
1075 default:
1076 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1077 return TCM_INVALID_CDB_FIELD;
1078 }
1079
1080 return rc;
1081}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 000000000000..700a981c7b41
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,62 @@
1#define XCOPY_TARGET_DESC_LEN 32
2#define XCOPY_SEGMENT_DESC_LEN 28
3#define XCOPY_NAA_IEEE_REGEX_LEN 16
4#define XCOPY_MAX_SECTORS 1024
5
6enum xcopy_origin_list {
7 XCOL_SOURCE_RECV_OP = 0x01,
8 XCOL_DEST_RECV_OP = 0x02,
9};
10
11struct xcopy_pt_cmd;
12
13struct xcopy_op {
14 int op_origin;
15
16 struct se_cmd *xop_se_cmd;
17 struct se_device *src_dev;
18 unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
19 struct se_device *dst_dev;
20 unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
21 unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
22
23 sector_t src_lba;
24 sector_t dst_lba;
25 unsigned short stdi;
26 unsigned short dtdi;
27 unsigned short nolb;
28 unsigned int dbl;
29
30 struct xcopy_pt_cmd *src_pt_cmd;
31 struct xcopy_pt_cmd *dst_pt_cmd;
32
33 u32 xop_data_nents;
34 struct scatterlist *xop_data_sg;
35 struct work_struct xop_work;
36};
37
38/*
39 * Receive Copy Results Sevice Actions
40 */
41#define RCR_SA_COPY_STATUS 0x00
42#define RCR_SA_RECEIVE_DATA 0x01
43#define RCR_SA_OPERATING_PARAMETERS 0x03
44#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04
45
46/*
47 * Receive Copy Results defs for Operating Parameters
48 */
49#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2
50#define RCR_OP_MAX_SG_DESC_COUNT 0x1
51#define RCR_OP_MAX_DESC_LIST_LEN 1024
52#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */
53#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */
54#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */
55#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */
56#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
57#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
58
59extern int target_xcopy_setup_pt(void);
60extern void target_xcopy_release_pt(void);
61extern sense_reason_t target_do_xcopy(struct se_cmd *);
62extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index b74feb0d5133..4e0050840a72 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -311,7 +311,11 @@ static struct se_portal_group *ft_add_tpg(
311 */ 311 */
312 if (strstr(name, "tpgt_") != name) 312 if (strstr(name, "tpgt_") != name)
313 return NULL; 313 return NULL;
314 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX) 314
315 ret = kstrtoul(name + 5, 10, &index);
316 if (ret)
317 return NULL;
318 if (index > UINT_MAX)
315 return NULL; 319 return NULL;
316 320
317 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); 321 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0c27c7df1b09..4b79a1f2f901 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,12 +1,12 @@
1/******************************************************************************* 1/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators 2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 * 3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC. 4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp. 5 * (C) Copyright 2010-2012 IBM Corp.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com> 9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
@@ -48,12 +48,16 @@
48#include <linux/virtio_scsi.h> 48#include <linux/virtio_scsi.h>
49#include <linux/llist.h> 49#include <linux/llist.h>
50#include <linux/bitmap.h> 50#include <linux/bitmap.h>
51#include <linux/percpu_ida.h>
51 52
52#include "vhost.h" 53#include "vhost.h"
53 54
54#define TCM_VHOST_VERSION "v0.1" 55#define TCM_VHOST_VERSION "v0.1"
55#define TCM_VHOST_NAMELEN 256 56#define TCM_VHOST_NAMELEN 256
56#define TCM_VHOST_MAX_CDB_SIZE 32 57#define TCM_VHOST_MAX_CDB_SIZE 32
58#define TCM_VHOST_DEFAULT_TAGS 256
59#define TCM_VHOST_PREALLOC_SGLS 2048
60#define TCM_VHOST_PREALLOC_PAGES 2048
57 61
58struct vhost_scsi_inflight { 62struct vhost_scsi_inflight {
59 /* Wait for the flush operation to finish */ 63 /* Wait for the flush operation to finish */
@@ -79,6 +83,7 @@ struct tcm_vhost_cmd {
79 u32 tvc_lun; 83 u32 tvc_lun;
80 /* Pointer to the SGL formatted memory from virtio-scsi */ 84 /* Pointer to the SGL formatted memory from virtio-scsi */
81 struct scatterlist *tvc_sgl; 85 struct scatterlist *tvc_sgl;
86 struct page **tvc_upages;
82 /* Pointer to response */ 87 /* Pointer to response */
83 struct virtio_scsi_cmd_resp __user *tvc_resp; 88 struct virtio_scsi_cmd_resp __user *tvc_resp;
84 /* Pointer to vhost_scsi for our device */ 89 /* Pointer to vhost_scsi for our device */
@@ -450,17 +455,16 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
450{ 455{
451 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 456 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
452 struct tcm_vhost_cmd, tvc_se_cmd); 457 struct tcm_vhost_cmd, tvc_se_cmd);
458 struct se_session *se_sess = se_cmd->se_sess;
453 459
454 if (tv_cmd->tvc_sgl_count) { 460 if (tv_cmd->tvc_sgl_count) {
455 u32 i; 461 u32 i;
456 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
457 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 463 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
458
459 kfree(tv_cmd->tvc_sgl);
460 } 464 }
461 465
462 tcm_vhost_put_inflight(tv_cmd->inflight); 466 tcm_vhost_put_inflight(tv_cmd->inflight);
463 kfree(tv_cmd); 467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
464} 468}
465 469
466static int tcm_vhost_shutdown_session(struct se_session *se_sess) 470static int tcm_vhost_shutdown_session(struct se_session *se_sess)
@@ -704,7 +708,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
704} 708}
705 709
706static struct tcm_vhost_cmd * 710static struct tcm_vhost_cmd *
707vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq, 711vhost_scsi_get_tag(struct vhost_virtqueue *vq,
708 struct tcm_vhost_tpg *tpg, 712 struct tcm_vhost_tpg *tpg,
709 struct virtio_scsi_cmd_req *v_req, 713 struct virtio_scsi_cmd_req *v_req,
710 u32 exp_data_len, 714 u32 exp_data_len,
@@ -712,18 +716,27 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
712{ 716{
713 struct tcm_vhost_cmd *cmd; 717 struct tcm_vhost_cmd *cmd;
714 struct tcm_vhost_nexus *tv_nexus; 718 struct tcm_vhost_nexus *tv_nexus;
719 struct se_session *se_sess;
720 struct scatterlist *sg;
721 struct page **pages;
722 int tag;
715 723
716 tv_nexus = tpg->tpg_nexus; 724 tv_nexus = tpg->tpg_nexus;
717 if (!tv_nexus) { 725 if (!tv_nexus) {
718 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 726 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
719 return ERR_PTR(-EIO); 727 return ERR_PTR(-EIO);
720 } 728 }
729 se_sess = tv_nexus->tvn_se_sess;
721 730
722 cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL);
723 if (!cmd) { 732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
724 pr_err("Unable to allocate struct tcm_vhost_cmd\n"); 733 sg = cmd->tvc_sgl;
725 return ERR_PTR(-ENOMEM); 734 pages = cmd->tvc_upages;
726 } 735 memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
736
737 cmd->tvc_sgl = sg;
738 cmd->tvc_upages = pages;
739 cmd->tvc_se_cmd.map_tag = tag;
727 cmd->tvc_tag = v_req->tag; 740 cmd->tvc_tag = v_req->tag;
728 cmd->tvc_task_attr = v_req->task_attr; 741 cmd->tvc_task_attr = v_req->task_attr;
729 cmd->tvc_exp_data_len = exp_data_len; 742 cmd->tvc_exp_data_len = exp_data_len;
@@ -740,7 +753,8 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
740 * Returns the number of scatterlist entries used or -errno on error. 753 * Returns the number of scatterlist entries used or -errno on error.
741 */ 754 */
742static int 755static int
743vhost_scsi_map_to_sgl(struct scatterlist *sgl, 756vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
757 struct scatterlist *sgl,
744 unsigned int sgl_count, 758 unsigned int sgl_count,
745 struct iovec *iov, 759 struct iovec *iov,
746 int write) 760 int write)
@@ -752,13 +766,25 @@ vhost_scsi_map_to_sgl(struct scatterlist *sgl,
752 struct page **pages; 766 struct page **pages;
753 int ret, i; 767 int ret, i;
754 768
769 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
770 pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
771 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
772 sgl_count, TCM_VHOST_PREALLOC_SGLS);
773 return -ENOBUFS;
774 }
775
755 pages_nr = iov_num_pages(iov); 776 pages_nr = iov_num_pages(iov);
756 if (pages_nr > sgl_count) 777 if (pages_nr > sgl_count)
757 return -ENOBUFS; 778 return -ENOBUFS;
758 779
759 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL); 780 if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
760 if (!pages) 781 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
761 return -ENOMEM; 782 " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
783 pages_nr, TCM_VHOST_PREALLOC_PAGES);
784 return -ENOBUFS;
785 }
786
787 pages = tv_cmd->tvc_upages;
762 788
763 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 789 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
764 /* No pages were pinned */ 790 /* No pages were pinned */
@@ -783,7 +809,6 @@ vhost_scsi_map_to_sgl(struct scatterlist *sgl,
783 } 809 }
784 810
785out: 811out:
786 kfree(pages);
787 return ret; 812 return ret;
788} 813}
789 814
@@ -807,24 +832,20 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
807 832
808 /* TODO overflow checking */ 833 /* TODO overflow checking */
809 834
810 sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 835 sg = cmd->tvc_sgl;
811 if (!sg) 836 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
812 return -ENOMEM;
813 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
814 sg, sgl_count, !sg);
815 sg_init_table(sg, sgl_count); 837 sg_init_table(sg, sgl_count);
816 838
817 cmd->tvc_sgl = sg;
818 cmd->tvc_sgl_count = sgl_count; 839 cmd->tvc_sgl_count = sgl_count;
819 840
820 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 841 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
821 for (i = 0; i < niov; i++) { 842 for (i = 0; i < niov; i++) {
822 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); 843 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
844 write);
823 if (ret < 0) { 845 if (ret < 0) {
824 for (i = 0; i < cmd->tvc_sgl_count; i++) 846 for (i = 0; i < cmd->tvc_sgl_count; i++)
825 put_page(sg_page(&cmd->tvc_sgl[i])); 847 put_page(sg_page(&cmd->tvc_sgl[i]));
826 kfree(cmd->tvc_sgl); 848
827 cmd->tvc_sgl = NULL;
828 cmd->tvc_sgl_count = 0; 849 cmd->tvc_sgl_count = 0;
829 return ret; 850 return ret;
830 } 851 }
@@ -989,10 +1010,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
989 for (i = 0; i < data_num; i++) 1010 for (i = 0; i < data_num; i++)
990 exp_data_len += vq->iov[data_first + i].iov_len; 1011 exp_data_len += vq->iov[data_first + i].iov_len;
991 1012
992 cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req, 1013 cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
993 exp_data_len, data_direction); 1014 exp_data_len, data_direction);
994 if (IS_ERR(cmd)) { 1015 if (IS_ERR(cmd)) {
995 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 1016 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
996 PTR_ERR(cmd)); 1017 PTR_ERR(cmd));
997 goto err_cmd; 1018 goto err_cmd;
998 } 1019 }
@@ -1654,11 +1675,31 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1654 kfree(nacl); 1675 kfree(nacl);
1655} 1676}
1656 1677
1678static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1679 struct se_session *se_sess)
1680{
1681 struct tcm_vhost_cmd *tv_cmd;
1682 unsigned int i;
1683
1684 if (!se_sess->sess_cmd_map)
1685 return;
1686
1687 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1688 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1689
1690 kfree(tv_cmd->tvc_sgl);
1691 kfree(tv_cmd->tvc_upages);
1692 }
1693}
1694
1657static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1695static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1658 const char *name) 1696 const char *name)
1659{ 1697{
1660 struct se_portal_group *se_tpg; 1698 struct se_portal_group *se_tpg;
1699 struct se_session *se_sess;
1661 struct tcm_vhost_nexus *tv_nexus; 1700 struct tcm_vhost_nexus *tv_nexus;
1701 struct tcm_vhost_cmd *tv_cmd;
1702 unsigned int i;
1662 1703
1663 mutex_lock(&tpg->tv_tpg_mutex); 1704 mutex_lock(&tpg->tv_tpg_mutex);
1664 if (tpg->tpg_nexus) { 1705 if (tpg->tpg_nexus) {
@@ -1675,14 +1716,37 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1675 return -ENOMEM; 1716 return -ENOMEM;
1676 } 1717 }
1677 /* 1718 /*
1678 * Initialize the struct se_session pointer 1719 * Initialize the struct se_session pointer and setup tagpool
1720 * for struct tcm_vhost_cmd descriptors
1679 */ 1721 */
1680 tv_nexus->tvn_se_sess = transport_init_session(); 1722 tv_nexus->tvn_se_sess = transport_init_session_tags(
1723 TCM_VHOST_DEFAULT_TAGS,
1724 sizeof(struct tcm_vhost_cmd));
1681 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1725 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1682 mutex_unlock(&tpg->tv_tpg_mutex); 1726 mutex_unlock(&tpg->tv_tpg_mutex);
1683 kfree(tv_nexus); 1727 kfree(tv_nexus);
1684 return -ENOMEM; 1728 return -ENOMEM;
1685 } 1729 }
1730 se_sess = tv_nexus->tvn_se_sess;
1731 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1732 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1733
1734 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1735 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1736 if (!tv_cmd->tvc_sgl) {
1737 mutex_unlock(&tpg->tv_tpg_mutex);
1738 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1739 goto out;
1740 }
1741
1742 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1743 TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
1744 if (!tv_cmd->tvc_upages) {
1745 mutex_unlock(&tpg->tv_tpg_mutex);
1746 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1747 goto out;
1748 }
1749 }
1686 /* 1750 /*
1687 * Since we are running in 'demo mode' this call with generate a 1751 * Since we are running in 'demo mode' this call with generate a
1688 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1752 * struct se_node_acl for the tcm_vhost struct se_portal_group with
@@ -1694,9 +1758,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1694 mutex_unlock(&tpg->tv_tpg_mutex); 1758 mutex_unlock(&tpg->tv_tpg_mutex);
1695 pr_debug("core_tpg_check_initiator_node_acl() failed" 1759 pr_debug("core_tpg_check_initiator_node_acl() failed"
1696 " for %s\n", name); 1760 " for %s\n", name);
1697 transport_free_session(tv_nexus->tvn_se_sess); 1761 goto out;
1698 kfree(tv_nexus);
1699 return -ENOMEM;
1700 } 1762 }
1701 /* 1763 /*
1702 * Now register the TCM vhost virtual I_T Nexus as active with the 1764 * Now register the TCM vhost virtual I_T Nexus as active with the
@@ -1708,6 +1770,12 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1708 1770
1709 mutex_unlock(&tpg->tv_tpg_mutex); 1771 mutex_unlock(&tpg->tv_tpg_mutex);
1710 return 0; 1772 return 0;
1773
1774out:
1775 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1776 transport_free_session(se_sess);
1777 kfree(tv_nexus);
1778 return -ENOMEM;
1711} 1779}
1712 1780
1713static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1781static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
@@ -1747,6 +1815,8 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1747 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 1815 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1748 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1816 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1749 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1817 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1818
1819 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1750 /* 1820 /*
1751 * Release the SCSI I_T Nexus to the emulated vhost Target Port 1821 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1752 */ 1822 */
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
new file mode 100644
index 000000000000..0b23edbee309
--- /dev/null
+++ b/include/linux/percpu_ida.h
@@ -0,0 +1,60 @@
1#ifndef __PERCPU_IDA_H__
2#define __PERCPU_IDA_H__
3
4#include <linux/types.h>
5#include <linux/bitops.h>
6#include <linux/init.h>
7#include <linux/spinlock_types.h>
8#include <linux/wait.h>
9#include <linux/cpumask.h>
10
11struct percpu_ida_cpu;
12
13struct percpu_ida {
14 /*
15 * number of tags available to be allocated, as passed to
16 * percpu_ida_init()
17 */
18 unsigned nr_tags;
19
20 struct percpu_ida_cpu __percpu *tag_cpu;
21
22 /*
23 * Bitmap of cpus that (may) have tags on their percpu freelists:
24 * steal_tags() uses this to decide when to steal tags, and which cpus
25 * to try stealing from.
26 *
27 * It's ok for a freelist to be empty when its bit is set - steal_tags()
28 * will just keep looking - but the bitmap _must_ be set whenever a
29 * percpu freelist does have tags.
30 */
31 cpumask_t cpus_have_tags;
32
33 struct {
34 spinlock_t lock;
35 /*
36 * When we go to steal tags from another cpu (see steal_tags()),
37 * we want to pick a cpu at random. Cycling through them every
38 * time we steal is a bit easier and more or less equivalent:
39 */
40 unsigned cpu_last_stolen;
41
42 /* For sleeping on allocation failure */
43 wait_queue_head_t wait;
44
45 /*
46 * Global freelist - it's a stack where nr_free points to the
47 * top
48 */
49 unsigned nr_free;
50 unsigned *freelist;
51 } ____cacheline_aligned_in_smp;
52};
53
54int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
55void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
56
57void percpu_ida_destroy(struct percpu_ida *pool);
58int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
59
60#endif /* __PERCPU_IDA_H__ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index d477bfb73fb9..66d42edfb3fc 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -144,6 +144,7 @@ enum scsi_timeouts {
144#define ACCESS_CONTROL_IN 0x86 144#define ACCESS_CONTROL_IN 0x86
145#define ACCESS_CONTROL_OUT 0x87 145#define ACCESS_CONTROL_OUT 0x87
146#define READ_16 0x88 146#define READ_16 0x88
147#define COMPARE_AND_WRITE 0x89
147#define WRITE_16 0x8a 148#define WRITE_16 0x8a
148#define READ_ATTRIBUTE 0x8c 149#define READ_ATTRIBUTE 0x8c
149#define WRITE_ATTRIBUTE 0x8d 150#define WRITE_ATTRIBUTE 0x8d
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index e5d09d242ba3..a12589c4ee92 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,13 +6,13 @@ struct iscsit_transport {
6#define ISCSIT_TRANSPORT_NAME 16 6#define ISCSIT_TRANSPORT_NAME 16
7 char name[ISCSIT_TRANSPORT_NAME]; 7 char name[ISCSIT_TRANSPORT_NAME];
8 int transport_type; 8 int transport_type;
9 int priv_size;
9 struct module *owner; 10 struct module *owner;
10 struct list_head t_node; 11 struct list_head t_node;
11 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
12 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
13 void (*iscsit_free_np)(struct iscsi_np *); 14 void (*iscsit_free_np)(struct iscsi_np *);
14 void (*iscsit_free_conn)(struct iscsi_conn *); 15 void (*iscsit_free_conn)(struct iscsi_conn *);
15 struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t);
16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); 17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
18 int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); 18 int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
@@ -22,6 +22,11 @@ struct iscsit_transport {
22 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); 22 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
23}; 23};
24 24
25static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
26{
27 return (void *)(cmd + 1);
28}
29
25/* 30/*
26 * From iscsi_target_transport.c 31 * From iscsi_target_transport.c
27 */ 32 */
@@ -92,3 +97,4 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
92extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 97extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
93extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 98extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
94 unsigned char *, __be32); 99 unsigned char *, __be32);
100extern void iscsit_release_cmd(struct iscsi_cmd *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index ffa2696d64dc..5ebe21cd5d1c 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -39,7 +39,8 @@ struct se_subsystem_api {
39}; 39};
40 40
41struct sbc_ops { 41struct sbc_ops {
42 sense_reason_t (*execute_rw)(struct se_cmd *cmd); 42 sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
43 u32, enum dma_data_direction);
43 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); 44 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
44 sense_reason_t (*execute_write_same)(struct se_cmd *cmd); 45 sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
45 sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd); 46 sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
@@ -73,6 +74,10 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
73/* core helpers also used by command snooping in pscsi */ 74/* core helpers also used by command snooping in pscsi */
74void *transport_kmap_data_sg(struct se_cmd *); 75void *transport_kmap_data_sg(struct se_cmd *);
75void transport_kunmap_data_sg(struct se_cmd *); 76void transport_kunmap_data_sg(struct se_cmd *);
77/* core helpers also used by xcopy during internal command setup */
78int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
79sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
80 struct scatterlist *, u32, struct scatterlist *, u32);
76 81
77void array_free(void *array, int n); 82void array_free(void *array, int n);
78 83
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e34fc904f2e1..5bdb8b7d2a69 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -5,11 +5,12 @@
5#include <linux/configfs.h> 5#include <linux/configfs.h>
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <linux/percpu_ida.h>
8#include <scsi/scsi_cmnd.h> 9#include <scsi/scsi_cmnd.h>
9#include <net/sock.h> 10#include <net/sock.h>
10#include <net/tcp.h> 11#include <net/tcp.h>
11 12
12#define TARGET_CORE_MOD_VERSION "v4.1.0-rc2-ml" 13#define TARGET_CORE_MOD_VERSION "v4.1.0"
13#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION 14#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
14 15
15/* Maximum Number of LUNs per Target Portal Group */ 16/* Maximum Number of LUNs per Target Portal Group */
@@ -96,6 +97,10 @@
96 * block/blk-lib.c:blkdev_issue_discard() 97 * block/blk-lib.c:blkdev_issue_discard()
97 */ 98 */
98#define DA_EMULATE_TPWS 0 99#define DA_EMULATE_TPWS 0
100/* Emulation for CompareAndWrite (AtomicTestandSet) by default */
101#define DA_EMULATE_CAW 1
102/* Emulation for 3rd Party Copy (ExtendedCopy) by default */
103#define DA_EMULATE_3PC 1
99/* No Emulation for PSCSI by default */ 104/* No Emulation for PSCSI by default */
100#define DA_EMULATE_ALUA 0 105#define DA_EMULATE_ALUA 0
101/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 106/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
@@ -158,6 +163,9 @@ enum se_cmd_flags_table {
158 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 163 SCF_ALUA_NON_OPTIMIZED = 0x00008000,
159 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 164 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
160 SCF_ACK_KREF = 0x00040000, 165 SCF_ACK_KREF = 0x00040000,
166 SCF_COMPARE_AND_WRITE = 0x00080000,
167 SCF_COMPARE_AND_WRITE_POST = 0x00100000,
168 SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000,
161}; 169};
162 170
163/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 171/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -196,6 +204,7 @@ enum tcm_sense_reason_table {
196 TCM_ADDRESS_OUT_OF_RANGE = R(0x11), 204 TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
197 TCM_OUT_OF_RESOURCES = R(0x12), 205 TCM_OUT_OF_RESOURCES = R(0x12),
198 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 206 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
207 TCM_MISCOMPARE_VERIFY = R(0x14),
199#undef R 208#undef R
200}; 209};
201 210
@@ -415,6 +424,8 @@ struct se_cmd {
415 enum dma_data_direction data_direction; 424 enum dma_data_direction data_direction;
416 /* For SAM Task Attribute */ 425 /* For SAM Task Attribute */
417 int sam_task_attr; 426 int sam_task_attr;
427 /* Used for se_sess->sess_tag_pool */
428 unsigned int map_tag;
418 /* Transport protocol dependent state, see transport_state_table */ 429 /* Transport protocol dependent state, see transport_state_table */
419 enum transport_state_table t_state; 430 enum transport_state_table t_state;
420 unsigned cmd_wait_set:1; 431 unsigned cmd_wait_set:1;
@@ -444,11 +455,14 @@ struct se_cmd {
444 struct kref cmd_kref; 455 struct kref cmd_kref;
445 struct target_core_fabric_ops *se_tfo; 456 struct target_core_fabric_ops *se_tfo;
446 sense_reason_t (*execute_cmd)(struct se_cmd *); 457 sense_reason_t (*execute_cmd)(struct se_cmd *);
447 void (*transport_complete_callback)(struct se_cmd *); 458 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
459 u32, enum dma_data_direction);
460 sense_reason_t (*transport_complete_callback)(struct se_cmd *);
448 461
449 unsigned char *t_task_cdb; 462 unsigned char *t_task_cdb;
450 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 463 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
451 unsigned long long t_task_lba; 464 unsigned long long t_task_lba;
465 unsigned int t_task_nolb;
452 unsigned int transport_state; 466 unsigned int transport_state;
453#define CMD_T_ABORTED (1 << 0) 467#define CMD_T_ABORTED (1 << 0)
454#define CMD_T_ACTIVE (1 << 1) 468#define CMD_T_ACTIVE (1 << 1)
@@ -469,7 +483,9 @@ struct se_cmd {
469 struct work_struct work; 483 struct work_struct work;
470 484
471 struct scatterlist *t_data_sg; 485 struct scatterlist *t_data_sg;
486 struct scatterlist *t_data_sg_orig;
472 unsigned int t_data_nents; 487 unsigned int t_data_nents;
488 unsigned int t_data_nents_orig;
473 void *t_data_vmap; 489 void *t_data_vmap;
474 struct scatterlist *t_bidi_data_sg; 490 struct scatterlist *t_bidi_data_sg;
475 unsigned int t_bidi_data_nents; 491 unsigned int t_bidi_data_nents;
@@ -536,6 +552,8 @@ struct se_session {
536 struct list_head sess_wait_list; 552 struct list_head sess_wait_list;
537 spinlock_t sess_cmd_lock; 553 spinlock_t sess_cmd_lock;
538 struct kref sess_kref; 554 struct kref sess_kref;
555 void *sess_cmd_map;
556 struct percpu_ida sess_tag_pool;
539}; 557};
540 558
541struct se_device; 559struct se_device;
@@ -589,6 +607,8 @@ struct se_dev_attrib {
589 int emulate_tas; 607 int emulate_tas;
590 int emulate_tpu; 608 int emulate_tpu;
591 int emulate_tpws; 609 int emulate_tpws;
610 int emulate_caw;
611 int emulate_3pc;
592 int enforce_pr_isids; 612 int enforce_pr_isids;
593 int is_nonrot; 613 int is_nonrot;
594 int emulate_rest_reord; 614 int emulate_rest_reord;
@@ -656,6 +676,7 @@ struct se_device {
656 spinlock_t se_port_lock; 676 spinlock_t se_port_lock;
657 spinlock_t se_tmr_lock; 677 spinlock_t se_tmr_lock;
658 spinlock_t qf_cmd_lock; 678 spinlock_t qf_cmd_lock;
679 struct semaphore caw_sem;
659 /* Used for legacy SPC-2 reservationsa */ 680 /* Used for legacy SPC-2 reservationsa */
660 struct se_node_acl *dev_reserved_node_acl; 681 struct se_node_acl *dev_reserved_node_acl;
661 /* Used for ALUA Logical Unit Group membership */ 682 /* Used for ALUA Logical Unit Group membership */
@@ -669,6 +690,7 @@ struct se_device {
669 struct list_head delayed_cmd_list; 690 struct list_head delayed_cmd_list;
670 struct list_head state_list; 691 struct list_head state_list;
671 struct list_head qf_cmd_list; 692 struct list_head qf_cmd_list;
693 struct list_head g_dev_node;
672 /* Pointer to associated SE HBA */ 694 /* Pointer to associated SE HBA */
673 struct se_hba *se_hba; 695 struct se_hba *se_hba;
674 /* T10 Inquiry and VPD WWN Information */ 696 /* T10 Inquiry and VPD WWN Information */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 7a16178424f9..882b650e32be 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -84,6 +84,9 @@ struct target_core_fabric_ops {
84}; 84};
85 85
86struct se_session *transport_init_session(void); 86struct se_session *transport_init_session(void);
87int transport_alloc_session_tags(struct se_session *, unsigned int,
88 unsigned int);
89struct se_session *transport_init_session_tags(unsigned int, unsigned int);
87void __transport_register_session(struct se_portal_group *, 90void __transport_register_session(struct se_portal_group *,
88 struct se_node_acl *, struct se_session *, void *); 91 struct se_node_acl *, struct se_session *, void *);
89void transport_register_session(struct se_portal_group *, 92void transport_register_session(struct se_portal_group *,
@@ -131,6 +134,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
131void core_tmr_release_req(struct se_tmr_req *); 134void core_tmr_release_req(struct se_tmr_req *);
132int transport_generic_handle_tmr(struct se_cmd *); 135int transport_generic_handle_tmr(struct se_cmd *);
133void transport_generic_request_failure(struct se_cmd *, sense_reason_t); 136void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
137void __target_execute_cmd(struct se_cmd *);
134int transport_lookup_tmr_lun(struct se_cmd *, u32); 138int transport_lookup_tmr_lun(struct se_cmd *, u32);
135 139
136struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 140struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
@@ -175,4 +179,30 @@ u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *
175char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *, 179char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
176 u32 *, char **); 180 u32 *, char **);
177 181
182/*
183 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
184 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
185 * that data is coming from the target (eg handling a READ). However,
186 * this is just the opposite of what we have to tell the DMA mapping
187 * layer -- eg when handling a READ, the HBA will have to DMA the data
188 * out of memory so it can send it to the initiator, which means we
189 * need to use DMA_TO_DEVICE when we map the data.
190 */
191static inline enum dma_data_direction
192target_reverse_dma_direction(struct se_cmd *se_cmd)
193{
194 if (se_cmd->se_cmd_flags & SCF_BIDI)
195 return DMA_BIDIRECTIONAL;
196
197 switch (se_cmd->data_direction) {
198 case DMA_TO_DEVICE:
199 return DMA_FROM_DEVICE;
200 case DMA_FROM_DEVICE:
201 return DMA_TO_DEVICE;
202 case DMA_NONE:
203 default:
204 return DMA_NONE;
205 }
206}
207
178#endif /* TARGET_CORE_FABRICH */ 208#endif /* TARGET_CORE_FABRICH */
diff --git a/lib/Makefile b/lib/Makefile
index f2cb3082697c..f3bb2cb98adf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o percpu-refcount.o 16 earlycpio.o percpu-refcount.o percpu_ida.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
@@ -25,7 +25,8 @@ obj-y += lockref.o
25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu_ida.o
29obj-y += string_helpers.o 30obj-y += string_helpers.o
30obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
31obj-y += kstrtox.o 32obj-y += kstrtox.o
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
new file mode 100644
index 000000000000..bab1ba2a4c71
--- /dev/null
+++ b/lib/percpu_ida.c
@@ -0,0 +1,335 @@
1/*
2 * Percpu IDA library
3 *
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/bitops.h>
19#include <linux/bug.h>
20#include <linux/err.h>
21#include <linux/export.h>
22#include <linux/hardirq.h>
23#include <linux/idr.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/percpu.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/spinlock.h>
31#include <linux/percpu_ida.h>
32
33/*
34 * Number of tags we move between the percpu freelist and the global freelist at
35 * a time
36 */
37#define IDA_PCPU_BATCH_MOVE 32U
38
39/* Max size of percpu freelist, */
40#define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2)
41
42struct percpu_ida_cpu {
43 /*
44 * Even though this is percpu, we need a lock for tag stealing by remote
45 * CPUs:
46 */
47 spinlock_t lock;
48
49 /* nr_free/freelist form a stack of free IDs */
50 unsigned nr_free;
51 unsigned freelist[];
52};
53
54static inline void move_tags(unsigned *dst, unsigned *dst_nr,
55 unsigned *src, unsigned *src_nr,
56 unsigned nr)
57{
58 *src_nr -= nr;
59 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
60 *dst_nr += nr;
61}
62
63/*
64 * Try to steal tags from a remote cpu's percpu freelist.
65 *
66 * We first check how many percpu freelists have tags - we don't steal tags
67 * unless enough percpu freelists have tags on them that it's possible more than
68 * half the total tags could be stuck on remote percpu freelists.
69 *
70 * Then we iterate through the cpus until we find some tags - we don't attempt
71 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
72 * minimum.
73 */
74static inline void steal_tags(struct percpu_ida *pool,
75 struct percpu_ida_cpu *tags)
76{
77 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
78 struct percpu_ida_cpu *remote;
79
80 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
81 cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
82 cpus_have_tags--) {
83 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
84
85 if (cpu >= nr_cpu_ids) {
86 cpu = cpumask_first(&pool->cpus_have_tags);
87 if (cpu >= nr_cpu_ids)
88 BUG();
89 }
90
91 pool->cpu_last_stolen = cpu;
92 remote = per_cpu_ptr(pool->tag_cpu, cpu);
93
94 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
95
96 if (remote == tags)
97 continue;
98
99 spin_lock(&remote->lock);
100
101 if (remote->nr_free) {
102 memcpy(tags->freelist,
103 remote->freelist,
104 sizeof(unsigned) * remote->nr_free);
105
106 tags->nr_free = remote->nr_free;
107 remote->nr_free = 0;
108 }
109
110 spin_unlock(&remote->lock);
111
112 if (tags->nr_free)
113 break;
114 }
115}
116
117/*
118 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
119 * our percpu freelist:
120 */
121static inline void alloc_global_tags(struct percpu_ida *pool,
122 struct percpu_ida_cpu *tags)
123{
124 move_tags(tags->freelist, &tags->nr_free,
125 pool->freelist, &pool->nr_free,
126 min(pool->nr_free, IDA_PCPU_BATCH_MOVE));
127}
128
129static inline unsigned alloc_local_tag(struct percpu_ida *pool,
130 struct percpu_ida_cpu *tags)
131{
132 int tag = -ENOSPC;
133
134 spin_lock(&tags->lock);
135 if (tags->nr_free)
136 tag = tags->freelist[--tags->nr_free];
137 spin_unlock(&tags->lock);
138
139 return tag;
140}
141
142/**
143 * percpu_ida_alloc - allocate a tag
144 * @pool: pool to allocate from
145 * @gfp: gfp flags
146 *
147 * Returns a tag - an integer in the range [0..nr_tags) (passed to
148 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
149 *
150 * Safe to be called from interrupt context (assuming it isn't passed
151 * __GFP_WAIT, of course).
152 *
153 * @gfp indicates whether or not to wait until a free id is available (it's not
154 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
155 * however long it takes until another thread frees an id (same semantics as a
156 * mempool).
157 *
158 * Will not fail if passed __GFP_WAIT.
159 */
160int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
161{
162 DEFINE_WAIT(wait);
163 struct percpu_ida_cpu *tags;
164 unsigned long flags;
165 int tag;
166
167 local_irq_save(flags);
168 tags = this_cpu_ptr(pool->tag_cpu);
169
170 /* Fastpath */
171 tag = alloc_local_tag(pool, tags);
172 if (likely(tag >= 0)) {
173 local_irq_restore(flags);
174 return tag;
175 }
176
177 while (1) {
178 spin_lock(&pool->lock);
179
180 /*
181 * prepare_to_wait() must come before steal_tags(), in case
182 * percpu_ida_free() on another cpu flips a bit in
183 * cpus_have_tags
184 *
185 * global lock held and irqs disabled, don't need percpu lock
186 */
187 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
188
189 if (!tags->nr_free)
190 alloc_global_tags(pool, tags);
191 if (!tags->nr_free)
192 steal_tags(pool, tags);
193
194 if (tags->nr_free) {
195 tag = tags->freelist[--tags->nr_free];
196 if (tags->nr_free)
197 cpumask_set_cpu(smp_processor_id(),
198 &pool->cpus_have_tags);
199 }
200
201 spin_unlock(&pool->lock);
202 local_irq_restore(flags);
203
204 if (tag >= 0 || !(gfp & __GFP_WAIT))
205 break;
206
207 schedule();
208
209 local_irq_save(flags);
210 tags = this_cpu_ptr(pool->tag_cpu);
211 }
212
213 finish_wait(&pool->wait, &wait);
214 return tag;
215}
216EXPORT_SYMBOL_GPL(percpu_ida_alloc);
217
218/**
219 * percpu_ida_free - free a tag
220 * @pool: pool @tag was allocated from
221 * @tag: a tag previously allocated with percpu_ida_alloc()
222 *
223 * Safe to be called from interrupt context.
224 */
225void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
226{
227 struct percpu_ida_cpu *tags;
228 unsigned long flags;
229 unsigned nr_free;
230
231 BUG_ON(tag >= pool->nr_tags);
232
233 local_irq_save(flags);
234 tags = this_cpu_ptr(pool->tag_cpu);
235
236 spin_lock(&tags->lock);
237 tags->freelist[tags->nr_free++] = tag;
238
239 nr_free = tags->nr_free;
240 spin_unlock(&tags->lock);
241
242 if (nr_free == 1) {
243 cpumask_set_cpu(smp_processor_id(),
244 &pool->cpus_have_tags);
245 wake_up(&pool->wait);
246 }
247
248 if (nr_free == IDA_PCPU_SIZE) {
249 spin_lock(&pool->lock);
250
251 /*
252 * Global lock held and irqs disabled, don't need percpu
253 * lock
254 */
255 if (tags->nr_free == IDA_PCPU_SIZE) {
256 move_tags(pool->freelist, &pool->nr_free,
257 tags->freelist, &tags->nr_free,
258 IDA_PCPU_BATCH_MOVE);
259
260 wake_up(&pool->wait);
261 }
262 spin_unlock(&pool->lock);
263 }
264
265 local_irq_restore(flags);
266}
267EXPORT_SYMBOL_GPL(percpu_ida_free);
268
269/**
270 * percpu_ida_destroy - release a tag pool's resources
271 * @pool: pool to free
272 *
273 * Frees the resources allocated by percpu_ida_init().
274 */
275void percpu_ida_destroy(struct percpu_ida *pool)
276{
277 free_percpu(pool->tag_cpu);
278 free_pages((unsigned long) pool->freelist,
279 get_order(pool->nr_tags * sizeof(unsigned)));
280}
281EXPORT_SYMBOL_GPL(percpu_ida_destroy);
282
283/**
284 * percpu_ida_init - initialize a percpu tag pool
285 * @pool: pool to initialize
286 * @nr_tags: number of tags that will be available for allocation
287 *
288 * Initializes @pool so that it can be used to allocate tags - integers in the
289 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
290 * preallocated array of tag structures.
291 *
292 * Allocation is percpu, but sharding is limited by nr_tags - for best
293 * performance, the workload should not span more cpus than nr_tags / 128.
294 */
295int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
296{
297 unsigned i, cpu, order;
298
299 memset(pool, 0, sizeof(*pool));
300
301 init_waitqueue_head(&pool->wait);
302 spin_lock_init(&pool->lock);
303 pool->nr_tags = nr_tags;
304
305 /* Guard against overflow */
306 if (nr_tags > (unsigned) INT_MAX + 1) {
307 pr_err("percpu_ida_init(): nr_tags too large\n");
308 return -EINVAL;
309 }
310
311 order = get_order(nr_tags * sizeof(unsigned));
312 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
313 if (!pool->freelist)
314 return -ENOMEM;
315
316 for (i = 0; i < nr_tags; i++)
317 pool->freelist[i] = i;
318
319 pool->nr_free = nr_tags;
320
321 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
322 IDA_PCPU_SIZE * sizeof(unsigned),
323 sizeof(unsigned));
324 if (!pool->tag_cpu)
325 goto err;
326
327 for_each_possible_cpu(cpu)
328 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
329
330 return 0;
331err:
332 percpu_ida_destroy(pool);
333 return -ENOMEM;
334}
335EXPORT_SYMBOL_GPL(percpu_ida_init);