aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:20:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:20:53 -0500
commit71e4634e00119b2fb8dd0da99b3f5ebbb49cc872 (patch)
treec42a37a98e99aa9ce8a2af79710e295704782a16
parent19a3dd7621af01b7e44a70d16beab59326c38824 (diff)
parentfab683eb12e71ac6057dc42dc7d1e5e71e5cba5e (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - Introduce configfs support for unlocked configfs_depend_item() (krzysztof + andrezej) - Conversion of usb-gadget target driver to new function registration interface (andrzej + sebastian) - Enable qla2xxx FC target mode support for Extended Logins (himansu + giridhar) - Enable qla2xxx FC target mode support for Exchange Offload (himansu + giridhar) - Add qla2xxx FC target mode irq affinity notification + selective command queuing. (quinn + himanshu) - Fix iscsi-target deadlock in se_node_acl configfs deletion (sagi + nab) - Convert se_node_acl configfs deletion + se_node_acl->queue_depth to proper se_session->sess_kref + target_get_session() usage. (hch + sagi + nab) - Fix long-standing race between se_node_acl->acl_kref get and get_initiator_node_acl() lookup. (hch + nab) - Fix target/user block-size handling, and make sure netlink reaches all network namespaces (sheng + andy) Note there is an outstanding bug-fix series for remote I_T nexus port TMR LUN_RESET has been posted and still being tested, and will likely become post -rc1 material at this point" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (56 commits) scsi: qla2xxxx: avoid type mismatch in comparison target/user: Make sure netlink would reach all network namespaces target: Obtain se_node_acl->acl_kref during get_initiator_node_acl target: Convert ACL change queue_depth se_session reference usage iscsi-target: Fix potential dead-lock during node acl delete ib_srpt: Convert acl lookup to modern get_initiator_node_acl usage tcm_fc: Convert acl lookup to modern get_initiator_node_acl usage tcm_fc: Wait for command completion before freeing a session target: Fix a memory leak in target_dev_lba_map_store() target: Support aborting tasks with a 64-bit tag usb/gadget: Remove set-but-not-used variables target: Remove an unused variable target: Fix indentation in target_core_configfs.c target/user: Allow user to set block size before enabling device iser-target: Fix non negative ERR_PTR isert_device_get usage target/fcoe: Add tag support to tcm_fc qla2xxx: Check for online flag instead of active reset when transmitting responses qla2xxx: Set all queues to 4k qla2xxx: Disable ZIO at start time. qla2xxx: Move atioq to a different lock to reduce lock contention ...
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-tcm6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c7
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c103
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c188
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c126
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c265
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c165
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c645
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h34
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c38
-rw-r--r--drivers/target/iscsi/iscsi_target.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c14
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c47
-rw-r--r--drivers/target/target_core_device.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_spc.c12
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_tpg.c197
-rw-r--r--drivers/target/target_core_transport.c33
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c47
-rw-r--r--drivers/target/tcm_fc/tfc_io.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/usb/gadget/Kconfig17
-rw-r--r--drivers/usb/gadget/function/Makefile2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2381
-rw-r--r--drivers/usb/gadget/function/tcm.h (renamed from drivers/usb/gadget/legacy/tcm_usb_gadget.h)9
-rw-r--r--drivers/usb/gadget/function/u_tcm.h50
-rw-r--r--drivers/usb/gadget/legacy/Kconfig1
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2165
-rw-r--r--fs/configfs/dir.c150
-rw-r--r--fs/ocfs2/cluster/nodemanager.c2
-rw-r--r--include/linux/configfs.h21
-rw-r--r--include/target/target_core_base.h6
-rw-r--r--include/target/target_core_fabric.h9
53 files changed, 4507 insertions, 2699 deletions
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-tcm b/Documentation/ABI/testing/configfs-usb-gadget-tcm
new file mode 100644
index 000000000000..a29ed2dd6173
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-tcm
@@ -0,0 +1,6 @@
1What: /config/usb-gadget/gadget/functions/tcm.name
2Date: Dec 2015
3KernelVersion: 4.5
4Description:
5 There are no attributes because all the configuration
6 is performed in the "target" subsystem of configfs.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8a51c3b5d657..468c5e132563 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -350,7 +350,7 @@ isert_create_device_ib_res(struct isert_device *device)
350 dev_attr = &device->dev_attr; 350 dev_attr = &device->dev_attr;
351 ret = isert_query_device(device->ib_device, dev_attr); 351 ret = isert_query_device(device->ib_device, dev_attr);
352 if (ret) 352 if (ret)
353 return ret; 353 goto out;
354 354
355 /* asign function handlers */ 355 /* asign function handlers */
356 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 356 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -366,7 +366,7 @@ isert_create_device_ib_res(struct isert_device *device)
366 366
367 ret = isert_alloc_comps(device, dev_attr); 367 ret = isert_alloc_comps(device, dev_attr);
368 if (ret) 368 if (ret)
369 return ret; 369 goto out;
370 370
371 device->pd = ib_alloc_pd(device->ib_device); 371 device->pd = ib_alloc_pd(device->ib_device);
372 if (IS_ERR(device->pd)) { 372 if (IS_ERR(device->pd)) {
@@ -384,6 +384,9 @@ isert_create_device_ib_res(struct isert_device *device)
384 384
385out_cq: 385out_cq:
386 isert_free_comps(device); 386 isert_free_comps(device);
387out:
388 if (ret > 0)
389 ret = -EINVAL;
387 return ret; 390 return ret;
388} 391}
389 392
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2e2fe818ca9f..bc5470c43d26 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2370,31 +2370,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2370 kfree(ch); 2370 kfree(ch);
2371} 2371}
2372 2372
2373static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2374 u8 i_port_id[16])
2375{
2376 struct srpt_node_acl *nacl;
2377
2378 list_for_each_entry(nacl, &sport->port_acl_list, list)
2379 if (memcmp(nacl->i_port_id, i_port_id,
2380 sizeof(nacl->i_port_id)) == 0)
2381 return nacl;
2382
2383 return NULL;
2384}
2385
2386static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2387 u8 i_port_id[16])
2388{
2389 struct srpt_node_acl *nacl;
2390
2391 spin_lock_irq(&sport->port_acl_lock);
2392 nacl = __srpt_lookup_acl(sport, i_port_id);
2393 spin_unlock_irq(&sport->port_acl_lock);
2394
2395 return nacl;
2396}
2397
2398/** 2373/**
2399 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. 2374 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2400 * 2375 *
@@ -2412,10 +2387,10 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2412 struct srp_login_rej *rej; 2387 struct srp_login_rej *rej;
2413 struct ib_cm_rep_param *rep_param; 2388 struct ib_cm_rep_param *rep_param;
2414 struct srpt_rdma_ch *ch, *tmp_ch; 2389 struct srpt_rdma_ch *ch, *tmp_ch;
2415 struct srpt_node_acl *nacl; 2390 struct se_node_acl *se_acl;
2416 u32 it_iu_len; 2391 u32 it_iu_len;
2417 int i; 2392 int i, ret = 0;
2418 int ret = 0; 2393 unsigned char *p;
2419 2394
2420 WARN_ON_ONCE(irqs_disabled()); 2395 WARN_ON_ONCE(irqs_disabled());
2421 2396
@@ -2565,33 +2540,47 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2565 " RTR failed (error code = %d)\n", ret); 2540 " RTR failed (error code = %d)\n", ret);
2566 goto destroy_ib; 2541 goto destroy_ib;
2567 } 2542 }
2543
2568 /* 2544 /*
2569 * Use the initator port identifier as the session name. 2545 * Use the initator port identifier as the session name, when
2546 * checking against se_node_acl->initiatorname[] this can be
2547 * with or without preceeding '0x'.
2570 */ 2548 */
2571 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", 2549 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2572 be64_to_cpu(*(__be64 *)ch->i_port_id), 2550 be64_to_cpu(*(__be64 *)ch->i_port_id),
2573 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); 2551 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2574 2552
2575 pr_debug("registering session %s\n", ch->sess_name); 2553 pr_debug("registering session %s\n", ch->sess_name);
2554 p = &ch->sess_name[0];
2576 2555
2577 nacl = srpt_lookup_acl(sport, ch->i_port_id); 2556 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2578 if (!nacl) { 2557 if (IS_ERR(ch->sess)) {
2579 pr_info("Rejected login because no ACL has been"
2580 " configured yet for initiator %s.\n", ch->sess_name);
2581 rej->reason = cpu_to_be32( 2558 rej->reason = cpu_to_be32(
2582 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2559 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2560 pr_debug("Failed to create session\n");
2583 goto destroy_ib; 2561 goto destroy_ib;
2584 } 2562 }
2585 2563
2586 ch->sess = transport_init_session(TARGET_PROT_NORMAL); 2564try_again:
2587 if (IS_ERR(ch->sess)) { 2565 se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
2566 if (!se_acl) {
2567 pr_info("Rejected login because no ACL has been"
2568 " configured yet for initiator %s.\n", ch->sess_name);
2569 /*
2570 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2571 */
2572 if (p == &ch->sess_name[0]) {
2573 p += 2;
2574 goto try_again;
2575 }
2588 rej->reason = cpu_to_be32( 2576 rej->reason = cpu_to_be32(
2589 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2577 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2590 pr_debug("Failed to create session\n"); 2578 transport_free_session(ch->sess);
2591 goto deregister_session; 2579 goto destroy_ib;
2592 } 2580 }
2593 ch->sess->se_node_acl = &nacl->nacl; 2581 ch->sess->se_node_acl = se_acl;
2594 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); 2582
2583 transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
2595 2584
2596 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, 2585 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2597 ch->sess_name, ch->cm_id); 2586 ch->sess_name, ch->cm_id);
@@ -2635,8 +2624,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2635release_channel: 2624release_channel:
2636 srpt_set_ch_state(ch, CH_RELEASING); 2625 srpt_set_ch_state(ch, CH_RELEASING);
2637 transport_deregister_session_configfs(ch->sess); 2626 transport_deregister_session_configfs(ch->sess);
2638
2639deregister_session:
2640 transport_deregister_session(ch->sess); 2627 transport_deregister_session(ch->sess);
2641 ch->sess = NULL; 2628 ch->sess = NULL;
2642 2629
@@ -3273,8 +3260,6 @@ static void srpt_add_one(struct ib_device *device)
3273 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; 3260 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3274 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; 3261 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3275 INIT_WORK(&sport->work, srpt_refresh_port_work); 3262 INIT_WORK(&sport->work, srpt_refresh_port_work);
3276 INIT_LIST_HEAD(&sport->port_acl_list);
3277 spin_lock_init(&sport->port_acl_lock);
3278 3263
3279 if (srpt_refresh_port(sport)) { 3264 if (srpt_refresh_port(sport)) {
3280 pr_err("MAD registration failed for %s-%d.\n", 3265 pr_err("MAD registration failed for %s-%d.\n",
@@ -3508,42 +3493,15 @@ out:
3508 */ 3493 */
3509static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 3494static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3510{ 3495{
3511 struct srpt_port *sport =
3512 container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1);
3513 struct srpt_node_acl *nacl =
3514 container_of(se_nacl, struct srpt_node_acl, nacl);
3515 u8 i_port_id[16]; 3496 u8 i_port_id[16];
3516 3497
3517 if (srpt_parse_i_port_id(i_port_id, name) < 0) { 3498 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3518 pr_err("invalid initiator port ID %s\n", name); 3499 pr_err("invalid initiator port ID %s\n", name);
3519 return -EINVAL; 3500 return -EINVAL;
3520 } 3501 }
3521
3522 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3523 nacl->sport = sport;
3524
3525 spin_lock_irq(&sport->port_acl_lock);
3526 list_add_tail(&nacl->list, &sport->port_acl_list);
3527 spin_unlock_irq(&sport->port_acl_lock);
3528
3529 return 0; 3502 return 0;
3530} 3503}
3531 3504
3532/*
3533 * configfs callback function invoked for
3534 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3535 */
3536static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl)
3537{
3538 struct srpt_node_acl *nacl =
3539 container_of(se_nacl, struct srpt_node_acl, nacl);
3540 struct srpt_port *sport = nacl->sport;
3541
3542 spin_lock_irq(&sport->port_acl_lock);
3543 list_del(&nacl->list);
3544 spin_unlock_irq(&sport->port_acl_lock);
3545}
3546
3547static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item, 3505static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3548 char *page) 3506 char *page)
3549{ 3507{
@@ -3820,7 +3778,6 @@ static const struct target_core_fabric_ops srpt_template = {
3820 .fabric_make_tpg = srpt_make_tpg, 3778 .fabric_make_tpg = srpt_make_tpg,
3821 .fabric_drop_tpg = srpt_drop_tpg, 3779 .fabric_drop_tpg = srpt_drop_tpg,
3822 .fabric_init_nodeacl = srpt_init_nodeacl, 3780 .fabric_init_nodeacl = srpt_init_nodeacl,
3823 .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl,
3824 3781
3825 .tfc_wwn_attrs = srpt_wwn_attrs, 3782 .tfc_wwn_attrs = srpt_wwn_attrs,
3826 .tfc_tpg_base_attrs = srpt_tpg_attrs, 3783 .tfc_tpg_base_attrs = srpt_tpg_attrs,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 5faad8acd789..5366e0a9fd6d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -364,11 +364,9 @@ struct srpt_port {
364 u16 sm_lid; 364 u16 sm_lid;
365 u16 lid; 365 u16 lid;
366 union ib_gid gid; 366 union ib_gid gid;
367 spinlock_t port_acl_lock;
368 struct work_struct work; 367 struct work_struct work;
369 struct se_portal_group port_tpg_1; 368 struct se_portal_group port_tpg_1;
370 struct se_wwn port_wwn; 369 struct se_wwn port_wwn;
371 struct list_head port_acl_list;
372 struct srpt_port_attrib port_attrib; 370 struct srpt_port_attrib port_attrib;
373}; 371};
374 372
@@ -409,15 +407,9 @@ struct srpt_device {
409/** 407/**
410 * struct srpt_node_acl - Per-initiator ACL data (managed via configfs). 408 * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
411 * @nacl: Target core node ACL information. 409 * @nacl: Target core node ACL information.
412 * @i_port_id: 128-bit SRP initiator port ID.
413 * @sport: port information.
414 * @list: Element of the per-HCA ACL list.
415 */ 410 */
416struct srpt_node_acl { 411struct srpt_node_acl {
417 struct se_node_acl nacl; 412 struct se_node_acl nacl;
418 u8 i_port_id[16];
419 struct srpt_port *sport;
420 struct list_head list;
421}; 413};
422 414
423#endif /* IB_SRPT_H */ 415#endif /* IB_SRPT_H */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b942d9e5b74..6992ebc50c87 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -824,6 +824,41 @@ static struct bin_attribute sysfs_reset_attr = {
824}; 824};
825 825
826static ssize_t 826static ssize_t
827qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
828 struct bin_attribute *bin_attr,
829 char *buf, loff_t off, size_t count)
830{
831 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
832 struct device, kobj)));
833 int type;
834 int rval = 0;
835 port_id_t did;
836
837 type = simple_strtol(buf, NULL, 10);
838
839 did.b.domain = (type & 0x00ff0000) >> 16;
840 did.b.area = (type & 0x0000ff00) >> 8;
841 did.b.al_pa = (type & 0x000000ff);
842
843 ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
844 did.b.domain, did.b.area, did.b.al_pa);
845
846 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
847
848 rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
849 return count;
850}
851
852static struct bin_attribute sysfs_issue_logo_attr = {
853 .attr = {
854 .name = "issue_logo",
855 .mode = S_IWUSR,
856 },
857 .size = 0,
858 .write = qla2x00_issue_logo,
859};
860
861static ssize_t
827qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 862qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
828 struct bin_attribute *bin_attr, 863 struct bin_attribute *bin_attr,
829 char *buf, loff_t off, size_t count) 864 char *buf, loff_t off, size_t count)
@@ -937,6 +972,7 @@ static struct sysfs_entry {
937 { "vpd", &sysfs_vpd_attr, 1 }, 972 { "vpd", &sysfs_vpd_attr, 1 },
938 { "sfp", &sysfs_sfp_attr, 1 }, 973 { "sfp", &sysfs_sfp_attr, 1 },
939 { "reset", &sysfs_reset_attr, }, 974 { "reset", &sysfs_reset_attr, },
975 { "issue_logo", &sysfs_issue_logo_attr, },
940 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 976 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
941 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 977 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
942 { NULL }, 978 { NULL },
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34dc9a35670b..cd0d94ea7f74 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,25 +14,24 @@
14 * | Module Init and Probe | 0x017f | 0x0146 | 14 * | Module Init and Probe | 0x017f | 0x0146 |
15 * | | | 0x015b-0x0160 | 15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e-0x0170 | 16 * | | | 0x016e-0x0170 |
17 * | Mailbox commands | 0x118d | 0x1115-0x1116 | 17 * | Mailbox commands | 0x1192 | |
18 * | | | 0x111a-0x111b | 18 * | | | |
19 * | Device Discovery | 0x2016 | 0x2020-0x2022, | 19 * | Device Discovery | 0x2016 | 0x2020-0x2022, |
20 * | | | 0x2011-0x2012, | 20 * | | | 0x2011-0x2012, |
21 * | | | 0x2099-0x20a4 | 21 * | | | 0x2099-0x20a4 |
22 * | Queue Command and IO tracing | 0x3075 | 0x300b | 22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 | 23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 | 24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 | 25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 | 26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a | 27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x508a | 0x502b-0x502f | 29 * | Async Events | 0x5089 | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 | 30 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 | 31 * | | | 0x503d,0x5044 |
33 * | | | 0x507b,0x505f | 32 * | | | 0x507b,0x505f |
34 * | Timer Routines | 0x6012 | | 33 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e2 | 0x7018,0x702e | 34 * | User Space Interactions | 0x70e65 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 | 35 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 | 36 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 | 37 * | | | 0x7073-0x7075 |
@@ -60,15 +59,11 @@
60 * | | | 0xb13c-0xb140 | 59 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 | 60 * | | | 0xb149 |
62 * | MultiQ | 0xc00c | | 61 * | MultiQ | 0xc00c | |
63 * | Misc | 0xd300 | 0xd016-0xd017 | 62 * | Misc | 0xd301 | 0xd031-0xd0ff |
64 * | | | 0xd021,0xd024 |
65 * | | | 0xd025,0xd029 |
66 * | | | 0xd02a,0xd02e |
67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 63 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 64 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe080 | | 65 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf096 | 0xf002 | 66 * | Target Mode Management | 0xf09b | 0xf002 |
72 * | | | 0xf046-0xf049 | 67 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000d | | 68 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 69 * ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 388d79088b59..9872f3429e53 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -259,7 +259,7 @@
259#define LOOP_DOWN_TIME 255 /* 240 */ 259#define LOOP_DOWN_TIME 255 /* 240 */
260#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) 260#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
261 261
262#define DEFAULT_OUTSTANDING_COMMANDS 1024 262#define DEFAULT_OUTSTANDING_COMMANDS 4096
263#define MIN_OUTSTANDING_COMMANDS 128 263#define MIN_OUTSTANDING_COMMANDS 128
264 264
265/* ISP request and response entry counts (37-65535) */ 265/* ISP request and response entry counts (37-65535) */
@@ -267,11 +267,13 @@
267#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */ 267#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
268#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 268#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
269#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */ 269#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
270#define RESPONSE_ENTRY_CNT_83XX 4096 /* Number of response entries.*/
270#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 271#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
271#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 272#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
272#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 273#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
273#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ 274#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 275#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
276#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */
275 277
276struct req_que; 278struct req_que;
277struct qla_tgt_sess; 279struct qla_tgt_sess;
@@ -309,6 +311,14 @@ struct srb_cmd {
309/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ 311/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
310#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) 312#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
311 313
314struct els_logo_payload {
315 uint8_t opcode;
316 uint8_t rsvd[3];
317 uint8_t s_id[3];
318 uint8_t rsvd1[1];
319 uint8_t wwpn[WWN_SIZE];
320};
321
312/* 322/*
313 * SRB extensions. 323 * SRB extensions.
314 */ 324 */
@@ -322,6 +332,15 @@ struct srb_iocb {
322 uint16_t data[2]; 332 uint16_t data[2];
323 } logio; 333 } logio;
324 struct { 334 struct {
335#define ELS_DCMD_TIMEOUT 20
336#define ELS_DCMD_LOGO 0x5
337 uint32_t flags;
338 uint32_t els_cmd;
339 struct completion comp;
340 struct els_logo_payload *els_logo_pyld;
341 dma_addr_t els_logo_pyld_dma;
342 } els_logo;
343 struct {
325 /* 344 /*
326 * Values for flags field below are as 345 * Values for flags field below are as
327 * defined in tsk_mgmt_entry struct 346 * defined in tsk_mgmt_entry struct
@@ -382,7 +401,7 @@ struct srb_iocb {
382#define SRB_FXIOCB_DCMD 10 401#define SRB_FXIOCB_DCMD 10
383#define SRB_FXIOCB_BCMD 11 402#define SRB_FXIOCB_BCMD 11
384#define SRB_ABT_CMD 12 403#define SRB_ABT_CMD 12
385 404#define SRB_ELS_DCMD 13
386 405
387typedef struct srb { 406typedef struct srb {
388 atomic_t ref_count; 407 atomic_t ref_count;
@@ -891,6 +910,7 @@ struct mbx_cmd_32 {
891#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */ 910#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */
892#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */ 911#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */
893#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */ 912#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
913#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
894#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */ 914#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
895#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */ 915#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
896#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */ 916#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
@@ -2695,11 +2715,16 @@ struct isp_operations {
2695 2715
2696struct scsi_qla_host; 2716struct scsi_qla_host;
2697 2717
2718
2719#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */
2720
2698struct qla_msix_entry { 2721struct qla_msix_entry {
2699 int have_irq; 2722 int have_irq;
2700 uint32_t vector; 2723 uint32_t vector;
2701 uint16_t entry; 2724 uint16_t entry;
2702 struct rsp_que *rsp; 2725 struct rsp_que *rsp;
2726 struct irq_affinity_notify irq_notify;
2727 int cpuid;
2703}; 2728};
2704 2729
2705#define WATCH_INTERVAL 1 /* number of seconds */ 2730#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2910,12 +2935,15 @@ struct qlt_hw_data {
2910 uint32_t num_qfull_cmds_dropped; 2935 uint32_t num_qfull_cmds_dropped;
2911 spinlock_t q_full_lock; 2936 spinlock_t q_full_lock;
2912 uint32_t leak_exchg_thresh_hold; 2937 uint32_t leak_exchg_thresh_hold;
2938 spinlock_t sess_lock;
2939 int rspq_vector_cpuid;
2940 spinlock_t atio_lock ____cacheline_aligned;
2913}; 2941};
2914 2942
2915#define MAX_QFULL_CMDS_ALLOC 8192 2943#define MAX_QFULL_CMDS_ALLOC 8192
2916#define Q_FULL_THRESH_HOLD_PERCENT 90 2944#define Q_FULL_THRESH_HOLD_PERCENT 90
2917#define Q_FULL_THRESH_HOLD(ha) \ 2945#define Q_FULL_THRESH_HOLD(ha) \
2918 ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) 2946 ((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
2919 2947
2920#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ 2948#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
2921 2949
@@ -2962,10 +2990,12 @@ struct qla_hw_data {
2962 uint32_t isp82xx_no_md_cap:1; 2990 uint32_t isp82xx_no_md_cap:1;
2963 uint32_t host_shutting_down:1; 2991 uint32_t host_shutting_down:1;
2964 uint32_t idc_compl_status:1; 2992 uint32_t idc_compl_status:1;
2965
2966 uint32_t mr_reset_hdlr_active:1; 2993 uint32_t mr_reset_hdlr_active:1;
2967 uint32_t mr_intr_valid:1; 2994 uint32_t mr_intr_valid:1;
2995
2968 uint32_t fawwpn_enabled:1; 2996 uint32_t fawwpn_enabled:1;
2997 uint32_t exlogins_enabled:1;
2998 uint32_t exchoffld_enabled:1;
2969 /* 35 bits */ 2999 /* 35 bits */
2970 } flags; 3000 } flags;
2971 3001
@@ -3237,6 +3267,21 @@ struct qla_hw_data {
3237 void *async_pd; 3267 void *async_pd;
3238 dma_addr_t async_pd_dma; 3268 dma_addr_t async_pd_dma;
3239 3269
3270#define ENABLE_EXTENDED_LOGIN BIT_7
3271
3272 /* Extended Logins */
3273 void *exlogin_buf;
3274 dma_addr_t exlogin_buf_dma;
3275 int exlogin_size;
3276
3277#define ENABLE_EXCHANGE_OFFLD BIT_2
3278
3279 /* Exchange Offload */
3280 void *exchoffld_buf;
3281 dma_addr_t exchoffld_buf_dma;
3282 int exchoffld_size;
3283 int exchoffld_count;
3284
3240 void *swl; 3285 void *swl;
3241 3286
3242 /* These are used by mailbox operations. */ 3287 /* These are used by mailbox operations. */
@@ -3279,8 +3324,14 @@ struct qla_hw_data {
3279#define RISC_START_ADDRESS_2100 0x1000 3324#define RISC_START_ADDRESS_2100 0x1000
3280#define RISC_START_ADDRESS_2300 0x800 3325#define RISC_START_ADDRESS_2300 0x800
3281#define RISC_START_ADDRESS_2400 0x100000 3326#define RISC_START_ADDRESS_2400 0x100000
3282 uint16_t fw_xcb_count; 3327
3283 uint16_t fw_iocb_count; 3328 uint16_t orig_fw_tgt_xcb_count;
3329 uint16_t cur_fw_tgt_xcb_count;
3330 uint16_t orig_fw_xcb_count;
3331 uint16_t cur_fw_xcb_count;
3332 uint16_t orig_fw_iocb_count;
3333 uint16_t cur_fw_iocb_count;
3334 uint16_t fw_max_fcf_count;
3284 3335
3285 uint32_t fw_shared_ram_start; 3336 uint32_t fw_shared_ram_start;
3286 uint32_t fw_shared_ram_end; 3337 uint32_t fw_shared_ram_end;
@@ -3323,6 +3374,9 @@ struct qla_hw_data {
3323 uint32_t chain_offset; 3374 uint32_t chain_offset;
3324 struct dentry *dfs_dir; 3375 struct dentry *dfs_dir;
3325 struct dentry *dfs_fce; 3376 struct dentry *dfs_fce;
3377 struct dentry *dfs_tgt_counters;
3378 struct dentry *dfs_fw_resource_cnt;
3379
3326 dma_addr_t fce_dma; 3380 dma_addr_t fce_dma;
3327 void *fce; 3381 void *fce;
3328 uint32_t fce_bufs; 3382 uint32_t fce_bufs;
@@ -3480,6 +3534,18 @@ struct qla_hw_data {
3480 int allow_cna_fw_dump; 3534 int allow_cna_fw_dump;
3481}; 3535};
3482 3536
3537struct qla_tgt_counters {
3538 uint64_t qla_core_sbt_cmd;
3539 uint64_t core_qla_que_buf;
3540 uint64_t qla_core_ret_ctio;
3541 uint64_t core_qla_snd_status;
3542 uint64_t qla_core_ret_sta_ctio;
3543 uint64_t core_qla_free_cmd;
3544 uint64_t num_q_full_sent;
3545 uint64_t num_alloc_iocb_failed;
3546 uint64_t num_term_xchg_sent;
3547};
3548
3483/* 3549/*
3484 * Qlogic scsi host structure 3550 * Qlogic scsi host structure
3485 */ 3551 */
@@ -3595,6 +3661,10 @@ typedef struct scsi_qla_host {
3595 atomic_t generation_tick; 3661 atomic_t generation_tick;
3596 /* Time when global fcport update has been scheduled */ 3662 /* Time when global fcport update has been scheduled */
3597 int total_fcport_update_gen; 3663 int total_fcport_update_gen;
3664 /* List of pending LOGOs, protected by tgt_mutex */
3665 struct list_head logo_list;
3666 /* List of pending PLOGI acks, protected by hw lock */
3667 struct list_head plogi_ack_list;
3598 3668
3599 uint32_t vp_abort_cnt; 3669 uint32_t vp_abort_cnt;
3600 3670
@@ -3632,6 +3702,7 @@ typedef struct scsi_qla_host {
3632 3702
3633 atomic_t vref_count; 3703 atomic_t vref_count;
3634 struct qla8044_reset_template reset_tmplt; 3704 struct qla8044_reset_template reset_tmplt;
3705 struct qla_tgt_counters tgt_counters;
3635} scsi_qla_host_t; 3706} scsi_qla_host_t;
3636 3707
3637#define SET_VP_IDX 1 3708#define SET_VP_IDX 1
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 15cf074ffa3c..cd8b96a4b0dd 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -13,6 +13,85 @@ static struct dentry *qla2x00_dfs_root;
13static atomic_t qla2x00_dfs_root_count; 13static atomic_t qla2x00_dfs_root_count;
14 14
15static int 15static int
16qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
17{
18 struct scsi_qla_host *vha = s->private;
19 struct qla_hw_data *ha = vha->hw;
20
21 seq_puts(s, "FW Resource count\n\n");
22 seq_printf(s, "Original TGT exchg count[%d]\n",
23 ha->orig_fw_tgt_xcb_count);
24 seq_printf(s, "current TGT exchg count[%d]\n",
25 ha->cur_fw_tgt_xcb_count);
26 seq_printf(s, "original Initiator Exchange count[%d]\n",
27 ha->orig_fw_xcb_count);
28 seq_printf(s, "Current Initiator Exchange count[%d]\n",
29 ha->cur_fw_xcb_count);
30 seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
31 seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
32 seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
33 seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
34
35 return 0;
36}
37
38static int
39qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
40{
41 struct scsi_qla_host *vha = inode->i_private;
42 return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
43}
44
45static const struct file_operations dfs_fw_resource_cnt_ops = {
46 .open = qla_dfs_fw_resource_cnt_open,
47 .read = seq_read,
48 .llseek = seq_lseek,
49 .release = single_release,
50};
51
52static int
53qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
54{
55 struct scsi_qla_host *vha = s->private;
56
57 seq_puts(s, "Target Counters\n");
58 seq_printf(s, "qla_core_sbt_cmd = %lld\n",
59 vha->tgt_counters.qla_core_sbt_cmd);
60 seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
61 vha->tgt_counters.qla_core_ret_sta_ctio);
62 seq_printf(s, "qla_core_ret_ctio = %lld\n",
63 vha->tgt_counters.qla_core_ret_ctio);
64 seq_printf(s, "core_qla_que_buf = %lld\n",
65 vha->tgt_counters.core_qla_que_buf);
66 seq_printf(s, "core_qla_snd_status = %lld\n",
67 vha->tgt_counters.core_qla_snd_status);
68 seq_printf(s, "core_qla_free_cmd = %lld\n",
69 vha->tgt_counters.core_qla_free_cmd);
70 seq_printf(s, "num alloc iocb failed = %lld\n",
71 vha->tgt_counters.num_alloc_iocb_failed);
72 seq_printf(s, "num term exchange sent = %lld\n",
73 vha->tgt_counters.num_term_xchg_sent);
74 seq_printf(s, "num Q full sent = %lld\n",
75 vha->tgt_counters.num_q_full_sent);
76
77 return 0;
78}
79
80static int
81qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
82{
83 struct scsi_qla_host *vha = inode->i_private;
84 return single_open(file, qla_dfs_tgt_counters_show, vha);
85}
86
87static const struct file_operations dfs_tgt_counters_ops = {
88 .open = qla_dfs_tgt_counters_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release,
92};
93
94static int
16qla2x00_dfs_fce_show(struct seq_file *s, void *unused) 95qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
17{ 96{
18 scsi_qla_host_t *vha = s->private; 97 scsi_qla_host_t *vha = s->private;
@@ -146,6 +225,22 @@ create_dir:
146 atomic_inc(&qla2x00_dfs_root_count); 225 atomic_inc(&qla2x00_dfs_root_count);
147 226
148create_nodes: 227create_nodes:
228 ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
229 S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
230 if (!ha->dfs_fw_resource_cnt) {
231 ql_log(ql_log_warn, vha, 0x00fd,
232 "Unable to create debugFS fw_resource_count node.\n");
233 goto out;
234 }
235
236 ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
237 ha->dfs_dir, vha, &dfs_tgt_counters_ops);
238 if (!ha->dfs_tgt_counters) {
239 ql_log(ql_log_warn, vha, 0xd301,
240 "Unable to create debugFS tgt_counters node.\n");
241 goto out;
242 }
243
149 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 244 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
150 &dfs_fce_ops); 245 &dfs_fce_ops);
151 if (!ha->dfs_fce) { 246 if (!ha->dfs_fce) {
@@ -161,6 +256,17 @@ int
161qla2x00_dfs_remove(scsi_qla_host_t *vha) 256qla2x00_dfs_remove(scsi_qla_host_t *vha)
162{ 257{
163 struct qla_hw_data *ha = vha->hw; 258 struct qla_hw_data *ha = vha->hw;
259
260 if (ha->dfs_fw_resource_cnt) {
261 debugfs_remove(ha->dfs_fw_resource_cnt);
262 ha->dfs_fw_resource_cnt = NULL;
263 }
264
265 if (ha->dfs_tgt_counters) {
266 debugfs_remove(ha->dfs_tgt_counters);
267 ha->dfs_tgt_counters = NULL;
268 }
269
164 if (ha->dfs_fce) { 270 if (ha->dfs_fce) {
165 debugfs_remove(ha->dfs_fce); 271 debugfs_remove(ha->dfs_fce);
166 ha->dfs_fce = NULL; 272 ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 7686bfe9a4a9..0103e468e357 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,8 @@ extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
44extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 44extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
46 46
47extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
48
47extern void qla2x00_update_fcports(scsi_qla_host_t *); 49extern void qla2x00_update_fcports(scsi_qla_host_t *);
48 50
49extern int qla2x00_abort_isp(scsi_qla_host_t *); 51extern int qla2x00_abort_isp(scsi_qla_host_t *);
@@ -117,6 +119,8 @@ extern int ql2xdontresethba;
117extern uint64_t ql2xmaxlun; 119extern uint64_t ql2xmaxlun;
118extern int ql2xmdcapmask; 120extern int ql2xmdcapmask;
119extern int ql2xmdenable; 121extern int ql2xmdenable;
122extern int ql2xexlogins;
123extern int ql2xexchoffld;
120 124
121extern int qla2x00_loop_reset(scsi_qla_host_t *); 125extern int qla2x00_loop_reset(scsi_qla_host_t *);
122extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 126extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -135,6 +139,10 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
135 uint16_t *); 139 uint16_t *);
136extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, 140extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
137 fc_port_t *, uint16_t *); 141 fc_port_t *, uint16_t *);
142extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
143extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
144extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
145extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *);
138 146
139extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 147extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
140 148
@@ -323,8 +331,7 @@ extern int
323qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *); 331qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
324 332
325extern int 333extern int
326qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, 334qla2x00_get_resource_cnts(scsi_qla_host_t *);
327 uint16_t *, uint16_t *, uint16_t *, uint16_t *);
328 335
329extern int 336extern int
330qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 337qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
@@ -766,4 +773,11 @@ extern int qla8044_abort_isp(scsi_qla_host_t *);
766extern int qla8044_check_fw_alive(struct scsi_qla_host *); 773extern int qla8044_check_fw_alive(struct scsi_qla_host *);
767 774
768extern void qlt_host_reset_handler(struct qla_hw_data *ha); 775extern void qlt_host_reset_handler(struct qla_hw_data *ha);
776extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
777 uint16_t *);
778extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
779extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
780extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
781extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
782
769#endif /* _QLA_GBL_H */ 783#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 16a1935cc9c1..52a87657c7dd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1766,10 +1766,10 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
1766 (ql2xmultique_tag || ql2xmaxqueues > 1))) 1766 (ql2xmultique_tag || ql2xmaxqueues > 1)))
1767 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; 1767 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
1768 else { 1768 else {
1769 if (ha->fw_xcb_count <= ha->fw_iocb_count) 1769 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
1770 req->num_outstanding_cmds = ha->fw_xcb_count; 1770 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
1771 else 1771 else
1772 req->num_outstanding_cmds = ha->fw_iocb_count; 1772 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
1773 } 1773 }
1774 1774
1775 req->outstanding_cmds = kzalloc(sizeof(srb_t *) * 1775 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
@@ -1843,9 +1843,23 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1843 ql_dbg(ql_dbg_init, vha, 0x00ca, 1843 ql_dbg(ql_dbg_init, vha, 0x00ca,
1844 "Starting firmware.\n"); 1844 "Starting firmware.\n");
1845 1845
1846 if (ql2xexlogins)
1847 ha->flags.exlogins_enabled = 1;
1848
1849 if (ql2xexchoffld)
1850 ha->flags.exchoffld_enabled = 1;
1851
1846 rval = qla2x00_execute_fw(vha, srisc_address); 1852 rval = qla2x00_execute_fw(vha, srisc_address);
1847 /* Retrieve firmware information. */ 1853 /* Retrieve firmware information. */
1848 if (rval == QLA_SUCCESS) { 1854 if (rval == QLA_SUCCESS) {
1855 rval = qla2x00_set_exlogins_buffer(vha);
1856 if (rval != QLA_SUCCESS)
1857 goto failed;
1858
1859 rval = qla2x00_set_exchoffld_buffer(vha);
1860 if (rval != QLA_SUCCESS)
1861 goto failed;
1862
1849enable_82xx_npiv: 1863enable_82xx_npiv:
1850 fw_major_version = ha->fw_major_version; 1864 fw_major_version = ha->fw_major_version;
1851 if (IS_P3P_TYPE(ha)) 1865 if (IS_P3P_TYPE(ha))
@@ -1864,9 +1878,7 @@ enable_82xx_npiv:
1864 ha->max_npiv_vports = 1878 ha->max_npiv_vports =
1865 MIN_MULTI_ID_FABRIC - 1; 1879 MIN_MULTI_ID_FABRIC - 1;
1866 } 1880 }
1867 qla2x00_get_resource_cnts(vha, NULL, 1881 qla2x00_get_resource_cnts(vha);
1868 &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
1869 &ha->max_npiv_vports, NULL);
1870 1882
1871 /* 1883 /*
1872 * Allocate the array of outstanding commands 1884 * Allocate the array of outstanding commands
@@ -2248,7 +2260,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2248 if (IS_FWI2_CAPABLE(ha)) { 2260 if (IS_FWI2_CAPABLE(ha)) {
2249 mid_init_cb->options = cpu_to_le16(BIT_1); 2261 mid_init_cb->options = cpu_to_le16(BIT_1);
2250 mid_init_cb->init_cb.execution_throttle = 2262 mid_init_cb->init_cb.execution_throttle =
2251 cpu_to_le16(ha->fw_xcb_count); 2263 cpu_to_le16(ha->cur_fw_xcb_count);
2252 /* D-Port Status */ 2264 /* D-Port Status */
2253 if (IS_DPORT_CAPABLE(ha)) 2265 if (IS_DPORT_CAPABLE(ha))
2254 mid_init_cb->init_cb.firmware_options_1 |= 2266 mid_init_cb->init_cb.firmware_options_1 |=
@@ -3053,6 +3065,26 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
3053 atomic_set(&vha->loop_state, LOOP_READY); 3065 atomic_set(&vha->loop_state, LOOP_READY);
3054 ql_dbg(ql_dbg_disc, vha, 0x2069, 3066 ql_dbg(ql_dbg_disc, vha, 0x2069,
3055 "LOOP READY.\n"); 3067 "LOOP READY.\n");
3068
3069 /*
3070 * Process any ATIO queue entries that came in
3071 * while we weren't online.
3072 */
3073 if (qla_tgt_mode_enabled(vha)) {
3074 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
3075 spin_lock_irqsave(&ha->tgt.atio_lock,
3076 flags);
3077 qlt_24xx_process_atio_queue(vha, 0);
3078 spin_unlock_irqrestore(
3079 &ha->tgt.atio_lock, flags);
3080 } else {
3081 spin_lock_irqsave(&ha->hardware_lock,
3082 flags);
3083 qlt_24xx_process_atio_queue(vha, 1);
3084 spin_unlock_irqrestore(
3085 &ha->hardware_lock, flags);
3086 }
3087 }
3056 } 3088 }
3057 } 3089 }
3058 3090
@@ -4907,7 +4939,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4907 struct qla_hw_data *ha = vha->hw; 4939 struct qla_hw_data *ha = vha->hw;
4908 struct req_que *req = ha->req_q_map[0]; 4940 struct req_que *req = ha->req_q_map[0];
4909 struct rsp_que *rsp = ha->rsp_q_map[0]; 4941 struct rsp_que *rsp = ha->rsp_q_map[0];
4910 unsigned long flags;
4911 4942
4912 /* If firmware needs to be loaded */ 4943 /* If firmware needs to be loaded */
4913 if (qla2x00_isp_firmware(vha)) { 4944 if (qla2x00_isp_firmware(vha)) {
@@ -4929,17 +4960,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4929 /* Issue a marker after FW becomes ready. */ 4960 /* Issue a marker after FW becomes ready. */
4930 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4961 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4931 4962
4932 vha->flags.online = 1;
4933
4934 /*
4935 * Process any ATIO queue entries that came in
4936 * while we weren't online.
4937 */
4938 spin_lock_irqsave(&ha->hardware_lock, flags);
4939 if (qla_tgt_mode_enabled(vha))
4940 qlt_24xx_process_atio_queue(vha);
4941 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4942
4943 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4963 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4944 } 4964 }
4945 4965
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index fee9eb7c8a60..a6b7f1588aa4 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -258,6 +258,8 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
258 if ((IS_QLAFX00(sp->fcport->vha->hw)) && 258 if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
259 (sp->type == SRB_FXIOCB_DCMD)) 259 (sp->type == SRB_FXIOCB_DCMD))
260 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 260 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
261 if (sp->type == SRB_ELS_DCMD)
262 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
261} 263}
262 264
263static inline int 265static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c49df34e9b35..b41265a75ed5 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1868,6 +1868,7 @@ skip_cmd_array:
1868 } 1868 }
1869 1869
1870queuing_error: 1870queuing_error:
1871 vha->tgt_counters.num_alloc_iocb_failed++;
1871 return pkt; 1872 return pkt;
1872} 1873}
1873 1874
@@ -2010,6 +2011,190 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2010} 2011}
2011 2012
2012static void 2013static void
2014qla2x00_els_dcmd_sp_free(void *ptr, void *data)
2015{
2016 struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
2017 struct qla_hw_data *ha = vha->hw;
2018 srb_t *sp = (srb_t *)data;
2019 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2020
2021 kfree(sp->fcport);
2022
2023 if (elsio->u.els_logo.els_logo_pyld)
2024 dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2025 elsio->u.els_logo.els_logo_pyld,
2026 elsio->u.els_logo.els_logo_pyld_dma);
2027
2028 del_timer(&elsio->timer);
2029 qla2x00_rel_sp(vha, sp);
2030}
2031
2032static void
2033qla2x00_els_dcmd_iocb_timeout(void *data)
2034{
2035 srb_t *sp = (srb_t *)data;
2036 struct srb_iocb *lio = &sp->u.iocb_cmd;
2037 fc_port_t *fcport = sp->fcport;
2038 struct scsi_qla_host *vha = fcport->vha;
2039 struct qla_hw_data *ha = vha->hw;
2040 unsigned long flags = 0;
2041
2042 ql_dbg(ql_dbg_io, vha, 0x3069,
2043 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2044 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2045 fcport->d_id.b.al_pa);
2046
2047 /* Abort the exchange */
2048 spin_lock_irqsave(&ha->hardware_lock, flags);
2049 if (ha->isp_ops->abort_command(sp)) {
2050 ql_dbg(ql_dbg_io, vha, 0x3070,
2051 "mbx abort_command failed.\n");
2052 } else {
2053 ql_dbg(ql_dbg_io, vha, 0x3071,
2054 "mbx abort_command success.\n");
2055 }
2056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2057
2058 complete(&lio->u.els_logo.comp);
2059}
2060
2061static void
2062qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
2063{
2064 srb_t *sp = (srb_t *)ptr;
2065 fc_port_t *fcport = sp->fcport;
2066 struct srb_iocb *lio = &sp->u.iocb_cmd;
2067 struct scsi_qla_host *vha = fcport->vha;
2068
2069 ql_dbg(ql_dbg_io, vha, 0x3072,
2070 "%s hdl=%x, portid=%02x%02x%02x done\n",
2071 sp->name, sp->handle, fcport->d_id.b.domain,
2072 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2073
2074 complete(&lio->u.els_logo.comp);
2075}
2076
2077int
2078qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2079 port_id_t remote_did)
2080{
2081 srb_t *sp;
2082 fc_port_t *fcport = NULL;
2083 struct srb_iocb *elsio = NULL;
2084 struct qla_hw_data *ha = vha->hw;
2085 struct els_logo_payload logo_pyld;
2086 int rval = QLA_SUCCESS;
2087
2088 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2089 if (!fcport) {
2090 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2091 return -ENOMEM;
2092 }
2093
2094 /* Alloc SRB structure */
2095 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2096 if (!sp) {
2097 kfree(fcport);
2098 ql_log(ql_log_info, vha, 0x70e6,
2099 "SRB allocation failed\n");
2100 return -ENOMEM;
2101 }
2102
2103 elsio = &sp->u.iocb_cmd;
2104 fcport->loop_id = 0xFFFF;
2105 fcport->d_id.b.domain = remote_did.b.domain;
2106 fcport->d_id.b.area = remote_did.b.area;
2107 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2108
2109 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2110 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2111
2112 sp->type = SRB_ELS_DCMD;
2113 sp->name = "ELS_DCMD";
2114 sp->fcport = fcport;
2115 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2116 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2117 sp->done = qla2x00_els_dcmd_sp_done;
2118 sp->free = qla2x00_els_dcmd_sp_free;
2119
2120 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2121 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2122 GFP_KERNEL);
2123
2124 if (!elsio->u.els_logo.els_logo_pyld) {
2125 sp->free(vha, sp);
2126 return QLA_FUNCTION_FAILED;
2127 }
2128
2129 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2130
2131 elsio->u.els_logo.els_cmd = els_opcode;
2132 logo_pyld.opcode = els_opcode;
2133 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2134 logo_pyld.s_id[1] = vha->d_id.b.area;
2135 logo_pyld.s_id[2] = vha->d_id.b.domain;
2136 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2137 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2138
2139 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2140 sizeof(struct els_logo_payload));
2141
2142 rval = qla2x00_start_sp(sp);
2143 if (rval != QLA_SUCCESS) {
2144 sp->free(vha, sp);
2145 return QLA_FUNCTION_FAILED;
2146 }
2147
2148 ql_dbg(ql_dbg_io, vha, 0x3074,
2149 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2150 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2151 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2152
2153 wait_for_completion(&elsio->u.els_logo.comp);
2154
2155 sp->free(vha, sp);
2156 return rval;
2157}
2158
2159static void
2160qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2161{
2162 scsi_qla_host_t *vha = sp->fcport->vha;
2163 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2164
2165 els_iocb->entry_type = ELS_IOCB_TYPE;
2166 els_iocb->entry_count = 1;
2167 els_iocb->sys_define = 0;
2168 els_iocb->entry_status = 0;
2169 els_iocb->handle = sp->handle;
2170 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2171 els_iocb->tx_dsd_count = 1;
2172 els_iocb->vp_index = vha->vp_idx;
2173 els_iocb->sof_type = EST_SOFI3;
2174 els_iocb->rx_dsd_count = 0;
2175 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2176
2177 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2178 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2179 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2180 els_iocb->control_flags = 0;
2181
2182 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2183 els_iocb->tx_address[0] =
2184 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2185 els_iocb->tx_address[1] =
2186 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2187 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2188
2189 els_iocb->rx_byte_count = 0;
2190 els_iocb->rx_address[0] = 0;
2191 els_iocb->rx_address[1] = 0;
2192 els_iocb->rx_len = 0;
2193
2194 sp->fcport->vha->qla_stats.control_requests++;
2195}
2196
2197static void
2013qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2198qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2014{ 2199{
2015 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2200 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
@@ -2623,6 +2808,9 @@ qla2x00_start_sp(srb_t *sp)
2623 qlafx00_abort_iocb(sp, pkt) : 2808 qlafx00_abort_iocb(sp, pkt) :
2624 qla24xx_abort_iocb(sp, pkt); 2809 qla24xx_abort_iocb(sp, pkt);
2625 break; 2810 break;
2811 case SRB_ELS_DCMD:
2812 qla24xx_els_logo_iocb(sp, pkt);
2813 break;
2626 default: 2814 default:
2627 break; 2815 break;
2628 } 2816 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ccf6a7f99024..d4d65eb0e9b4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -18,6 +18,10 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *); 20 sts_entry_t *);
21static void qla_irq_affinity_notify(struct irq_affinity_notify *,
22 const cpumask_t *);
23static void qla_irq_affinity_release(struct kref *);
24
21 25
22/** 26/**
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -1418,6 +1422,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1418 case SRB_CT_CMD: 1422 case SRB_CT_CMD:
1419 type = "ct pass-through"; 1423 type = "ct pass-through";
1420 break; 1424 break;
1425 case SRB_ELS_DCMD:
1426 type = "Driver ELS logo";
1427 ql_dbg(ql_dbg_user, vha, 0x5047,
1428 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1429 sp->done(vha, sp, 0);
1430 return;
1421 default: 1431 default:
1422 ql_dbg(ql_dbg_user, vha, 0x503e, 1432 ql_dbg(ql_dbg_user, vha, 0x503e,
1423 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1433 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
@@ -2542,6 +2552,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2542 if (!vha->flags.online) 2552 if (!vha->flags.online)
2543 return; 2553 return;
2544 2554
2555 if (rsp->msix->cpuid != smp_processor_id()) {
2556 /* if kernel does not notify qla of IRQ's CPU change,
2557 * then set it here.
2558 */
2559 rsp->msix->cpuid = smp_processor_id();
2560 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2561 }
2562
2545 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2563 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2546 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2564 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2547 2565
@@ -2587,8 +2605,14 @@ process_err:
2587 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2605 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2588 break; 2606 break;
2589 case ABTS_RECV_24XX: 2607 case ABTS_RECV_24XX:
2590 /* ensure that the ATIO queue is empty */ 2608 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2591 qlt_24xx_process_atio_queue(vha); 2609 /* ensure that the ATIO queue is empty */
2610 qlt_handle_abts_recv(vha, (response_t *)pkt);
2611 break;
2612 } else {
2613 /* drop through */
2614 qlt_24xx_process_atio_queue(vha, 1);
2615 }
2592 case ABTS_RESP_24XX: 2616 case ABTS_RESP_24XX:
2593 case CTIO_TYPE7: 2617 case CTIO_TYPE7:
2594 case NOTIFY_ACK_TYPE: 2618 case NOTIFY_ACK_TYPE:
@@ -2755,13 +2779,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
2755 case INTR_RSP_QUE_UPDATE_83XX: 2779 case INTR_RSP_QUE_UPDATE_83XX:
2756 qla24xx_process_response_queue(vha, rsp); 2780 qla24xx_process_response_queue(vha, rsp);
2757 break; 2781 break;
2758 case INTR_ATIO_QUE_UPDATE: 2782 case INTR_ATIO_QUE_UPDATE:{
2759 qlt_24xx_process_atio_queue(vha); 2783 unsigned long flags2;
2784 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2785 qlt_24xx_process_atio_queue(vha, 1);
2786 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2760 break; 2787 break;
2761 case INTR_ATIO_RSP_QUE_UPDATE: 2788 }
2762 qlt_24xx_process_atio_queue(vha); 2789 case INTR_ATIO_RSP_QUE_UPDATE: {
2790 unsigned long flags2;
2791 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2792 qlt_24xx_process_atio_queue(vha, 1);
2793 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2794
2763 qla24xx_process_response_queue(vha, rsp); 2795 qla24xx_process_response_queue(vha, rsp);
2764 break; 2796 break;
2797 }
2765 default: 2798 default:
2766 ql_dbg(ql_dbg_async, vha, 0x504f, 2799 ql_dbg(ql_dbg_async, vha, 0x504f,
2767 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2800 "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2920,13 +2953,22 @@ qla24xx_msix_default(int irq, void *dev_id)
2920 case INTR_RSP_QUE_UPDATE_83XX: 2953 case INTR_RSP_QUE_UPDATE_83XX:
2921 qla24xx_process_response_queue(vha, rsp); 2954 qla24xx_process_response_queue(vha, rsp);
2922 break; 2955 break;
2923 case INTR_ATIO_QUE_UPDATE: 2956 case INTR_ATIO_QUE_UPDATE:{
2924 qlt_24xx_process_atio_queue(vha); 2957 unsigned long flags2;
2958 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2959 qlt_24xx_process_atio_queue(vha, 1);
2960 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2925 break; 2961 break;
2926 case INTR_ATIO_RSP_QUE_UPDATE: 2962 }
2927 qlt_24xx_process_atio_queue(vha); 2963 case INTR_ATIO_RSP_QUE_UPDATE: {
2964 unsigned long flags2;
2965 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2966 qlt_24xx_process_atio_queue(vha, 1);
2967 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2968
2928 qla24xx_process_response_queue(vha, rsp); 2969 qla24xx_process_response_queue(vha, rsp);
2929 break; 2970 break;
2971 }
2930 default: 2972 default:
2931 ql_dbg(ql_dbg_async, vha, 0x5051, 2973 ql_dbg(ql_dbg_async, vha, 0x5051,
2932 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2974 "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2973,8 +3015,11 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2973 3015
2974 for (i = 0; i < ha->msix_count; i++) { 3016 for (i = 0; i < ha->msix_count; i++) {
2975 qentry = &ha->msix_entries[i]; 3017 qentry = &ha->msix_entries[i];
2976 if (qentry->have_irq) 3018 if (qentry->have_irq) {
3019 /* un-register irq cpu affinity notification */
3020 irq_set_affinity_notifier(qentry->vector, NULL);
2977 free_irq(qentry->vector, qentry->rsp); 3021 free_irq(qentry->vector, qentry->rsp);
3022 }
2978 } 3023 }
2979 pci_disable_msix(ha->pdev); 3024 pci_disable_msix(ha->pdev);
2980 kfree(ha->msix_entries); 3025 kfree(ha->msix_entries);
@@ -3037,6 +3082,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3037 qentry->entry = entries[i].entry; 3082 qentry->entry = entries[i].entry;
3038 qentry->have_irq = 0; 3083 qentry->have_irq = 0;
3039 qentry->rsp = NULL; 3084 qentry->rsp = NULL;
3085 qentry->irq_notify.notify = qla_irq_affinity_notify;
3086 qentry->irq_notify.release = qla_irq_affinity_release;
3087 qentry->cpuid = -1;
3040 } 3088 }
3041 3089
3042 /* Enable MSI-X vectors for the base queue */ 3090 /* Enable MSI-X vectors for the base queue */
@@ -3055,6 +3103,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3055 qentry->have_irq = 1; 3103 qentry->have_irq = 1;
3056 qentry->rsp = rsp; 3104 qentry->rsp = rsp;
3057 rsp->msix = qentry; 3105 rsp->msix = qentry;
3106
3107 /* Register for CPU affinity notification. */
3108 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3109
3110 /* Schedule work (ie. trigger a notification) to read cpu
3111 * mask for this specific irq.
3112 * kref_get is required because
3113 * irq_affinity_notify() will do
3114 * kref_put().
3115 */
3116 kref_get(&qentry->irq_notify.kref);
3117 schedule_work(&qentry->irq_notify.work);
3058 } 3118 }
3059 3119
3060 /* 3120 /*
@@ -3234,3 +3294,47 @@ int qla25xx_request_irq(struct rsp_que *rsp)
3234 msix->rsp = rsp; 3294 msix->rsp = rsp;
3235 return ret; 3295 return ret;
3236} 3296}
3297
3298
3299/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3300static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3301 const cpumask_t *mask)
3302{
3303 struct qla_msix_entry *e =
3304 container_of(notify, struct qla_msix_entry, irq_notify);
3305 struct qla_hw_data *ha;
3306 struct scsi_qla_host *base_vha;
3307
3308 /* user is recommended to set mask to just 1 cpu */
3309 e->cpuid = cpumask_first(mask);
3310
3311 ha = e->rsp->hw;
3312 base_vha = pci_get_drvdata(ha->pdev);
3313
3314 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3315 "%s: host %ld : vector %d cpu %d \n", __func__,
3316 base_vha->host_no, e->vector, e->cpuid);
3317
3318 if (e->have_irq) {
3319 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3320 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3321 ha->tgt.rspq_vector_cpuid = e->cpuid;
3322 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3323 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3324 __func__, base_vha->host_no, e->vector, e->cpuid);
3325 }
3326 }
3327}
3328
3329static void qla_irq_affinity_release(struct kref *ref)
3330{
3331 struct irq_affinity_notify *notify =
3332 container_of(ref, struct irq_affinity_notify, kref);
3333 struct qla_msix_entry *e =
3334 container_of(notify, struct qla_msix_entry, irq_notify);
3335 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
3336
3337 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3338 "%s: host%ld: vector %d cpu %d \n", __func__,
3339 base_vha->host_no, e->vector, e->cpuid);
3340}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb11e04be568..87e6758302f6 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -489,6 +489,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
489 EXTENDED_BB_CREDITS); 489 EXTENDED_BB_CREDITS);
490 } else 490 } else
491 mcp->mb[4] = 0; 491 mcp->mb[4] = 0;
492
493 if (ha->flags.exlogins_enabled)
494 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
495
496 if (ha->flags.exchoffld_enabled)
497 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
498
492 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; 499 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
493 mcp->in_mb |= MBX_1; 500 mcp->in_mb |= MBX_1;
494 } else { 501 } else {
@@ -521,6 +528,226 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
521} 528}
522 529
523/* 530/*
531 * qla_get_exlogin_status
532 * Get extended login status
533 * uses the memory offload control/status Mailbox
534 *
535 * Input:
536 * ha: adapter state pointer.
537 * fwopt: firmware options
538 *
539 * Returns:
540 * qla2x00 local function status
541 *
542 * Context:
543 * Kernel context.
544 */
545#define FETCH_XLOGINS_STAT 0x8
546int
547qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
548 uint16_t *ex_logins_cnt)
549{
550 int rval;
551 mbx_cmd_t mc;
552 mbx_cmd_t *mcp = &mc;
553
554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
555 "Entered %s\n", __func__);
556
557 memset(mcp->mb, 0 , sizeof(mcp->mb));
558 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
559 mcp->mb[1] = FETCH_XLOGINS_STAT;
560 mcp->out_mb = MBX_1|MBX_0;
561 mcp->in_mb = MBX_10|MBX_4|MBX_0;
562 mcp->tov = MBX_TOV_SECONDS;
563 mcp->flags = 0;
564
565 rval = qla2x00_mailbox_command(vha, mcp);
566 if (rval != QLA_SUCCESS) {
567 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
568 } else {
569 *buf_sz = mcp->mb[4];
570 *ex_logins_cnt = mcp->mb[10];
571
572 ql_log(ql_log_info, vha, 0x1190,
573 "buffer size 0x%x, exchange login count=%d\n",
574 mcp->mb[4], mcp->mb[10]);
575
576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
577 "Done %s.\n", __func__);
578 }
579
580 return rval;
581}
582
583/*
584 * qla_set_exlogin_mem_cfg
585 * set extended login memory configuration
586 * Mbx needs to be issues before init_cb is set
587 *
588 * Input:
589 * ha: adapter state pointer.
590 * buffer: buffer pointer
591 * phys_addr: physical address of buffer
592 * size: size of buffer
593 * TARGET_QUEUE_LOCK must be released
594 * ADAPTER_STATE_LOCK must be release
595 *
596 * Returns:
597 * qla2x00 local funxtion status code.
598 *
599 * Context:
600 * Kernel context.
601 */
602#define CONFIG_XLOGINS_MEM 0x3
603int
604qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
605{
606 int rval;
607 mbx_cmd_t mc;
608 mbx_cmd_t *mcp = &mc;
609 struct qla_hw_data *ha = vha->hw;
610 int configured_count;
611
612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
613 "Entered %s.\n", __func__);
614
615 memset(mcp->mb, 0 , sizeof(mcp->mb));
616 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
617 mcp->mb[1] = CONFIG_XLOGINS_MEM;
618 mcp->mb[2] = MSW(phys_addr);
619 mcp->mb[3] = LSW(phys_addr);
620 mcp->mb[6] = MSW(MSD(phys_addr));
621 mcp->mb[7] = LSW(MSD(phys_addr));
622 mcp->mb[8] = MSW(ha->exlogin_size);
623 mcp->mb[9] = LSW(ha->exlogin_size);
624 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
625 mcp->in_mb = MBX_11|MBX_0;
626 mcp->tov = MBX_TOV_SECONDS;
627 mcp->flags = 0;
628 rval = qla2x00_mailbox_command(vha, mcp);
629 if (rval != QLA_SUCCESS) {
630 /*EMPTY*/
631 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
632 } else {
633 configured_count = mcp->mb[11];
634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
635 "Done %s.\n", __func__);
636 }
637
638 return rval;
639}
640
641/*
642 * qla_get_exchoffld_status
643 * Get exchange offload status
644 * uses the memory offload control/status Mailbox
645 *
646 * Input:
647 * ha: adapter state pointer.
648 * fwopt: firmware options
649 *
650 * Returns:
651 * qla2x00 local function status
652 *
653 * Context:
654 * Kernel context.
655 */
656#define FETCH_XCHOFFLD_STAT 0x2
657int
658qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
659 uint16_t *ex_logins_cnt)
660{
661 int rval;
662 mbx_cmd_t mc;
663 mbx_cmd_t *mcp = &mc;
664
665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
666 "Entered %s\n", __func__);
667
668 memset(mcp->mb, 0 , sizeof(mcp->mb));
669 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
670 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
671 mcp->out_mb = MBX_1|MBX_0;
672 mcp->in_mb = MBX_10|MBX_4|MBX_0;
673 mcp->tov = MBX_TOV_SECONDS;
674 mcp->flags = 0;
675
676 rval = qla2x00_mailbox_command(vha, mcp);
677 if (rval != QLA_SUCCESS) {
678 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
679 } else {
680 *buf_sz = mcp->mb[4];
681 *ex_logins_cnt = mcp->mb[10];
682
683 ql_log(ql_log_info, vha, 0x118e,
684 "buffer size 0x%x, exchange offload count=%d\n",
685 mcp->mb[4], mcp->mb[10]);
686
687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
688 "Done %s.\n", __func__);
689 }
690
691 return rval;
692}
693
694/*
695 * qla_set_exchoffld_mem_cfg
696 * Set exchange offload memory configuration
697 * Mbx needs to be issues before init_cb is set
698 *
699 * Input:
700 * ha: adapter state pointer.
701 * buffer: buffer pointer
702 * phys_addr: physical address of buffer
703 * size: size of buffer
704 * TARGET_QUEUE_LOCK must be released
705 * ADAPTER_STATE_LOCK must be release
706 *
707 * Returns:
708 * qla2x00 local funxtion status code.
709 *
710 * Context:
711 * Kernel context.
712 */
713#define CONFIG_XCHOFFLD_MEM 0x3
714int
715qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
716{
717 int rval;
718 mbx_cmd_t mc;
719 mbx_cmd_t *mcp = &mc;
720 struct qla_hw_data *ha = vha->hw;
721
722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
723 "Entered %s.\n", __func__);
724
725 memset(mcp->mb, 0 , sizeof(mcp->mb));
726 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
727 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
728 mcp->mb[2] = MSW(phys_addr);
729 mcp->mb[3] = LSW(phys_addr);
730 mcp->mb[6] = MSW(MSD(phys_addr));
731 mcp->mb[7] = LSW(MSD(phys_addr));
732 mcp->mb[8] = MSW(ha->exlogin_size);
733 mcp->mb[9] = LSW(ha->exlogin_size);
734 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
735 mcp->in_mb = MBX_11|MBX_0;
736 mcp->tov = MBX_TOV_SECONDS;
737 mcp->flags = 0;
738 rval = qla2x00_mailbox_command(vha, mcp);
739 if (rval != QLA_SUCCESS) {
740 /*EMPTY*/
741 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
742 } else {
743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
744 "Done %s.\n", __func__);
745 }
746
747 return rval;
748}
749
750/*
524 * qla2x00_get_fw_version 751 * qla2x00_get_fw_version
525 * Get firmware version. 752 * Get firmware version.
526 * 753 *
@@ -594,6 +821,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
595 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 822 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
596 __func__, mcp->mb[17], mcp->mb[16]); 823 __func__, mcp->mb[17], mcp->mb[16]);
824
825 if (ha->fw_attributes_h & 0x4)
826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
827 "%s: Firmware supports Extended Login 0x%x\n",
828 __func__, ha->fw_attributes_h);
829
830 if (ha->fw_attributes_h & 0x8)
831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
832 "%s: Firmware supports Exchange Offload 0x%x\n",
833 __func__, ha->fw_attributes_h);
597 } 834 }
598 835
599 if (IS_QLA27XX(ha)) { 836 if (IS_QLA27XX(ha)) {
@@ -2383,10 +2620,9 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2383 * Kernel context. 2620 * Kernel context.
2384 */ 2621 */
2385int 2622int
2386qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, 2623qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2387 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2388 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2389{ 2624{
2625 struct qla_hw_data *ha = vha->hw;
2390 int rval; 2626 int rval;
2391 mbx_cmd_t mc; 2627 mbx_cmd_t mc;
2392 mbx_cmd_t *mcp = &mc; 2628 mbx_cmd_t *mcp = &mc;
@@ -2414,19 +2650,16 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2414 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2650 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2415 mcp->mb[11], mcp->mb[12]); 2651 mcp->mb[11], mcp->mb[12]);
2416 2652
2417 if (cur_xchg_cnt) 2653 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2418 *cur_xchg_cnt = mcp->mb[3]; 2654 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2419 if (orig_xchg_cnt) 2655 ha->cur_fw_xcb_count = mcp->mb[3];
2420 *orig_xchg_cnt = mcp->mb[6]; 2656 ha->orig_fw_xcb_count = mcp->mb[6];
2421 if (cur_iocb_cnt) 2657 ha->cur_fw_iocb_count = mcp->mb[7];
2422 *cur_iocb_cnt = mcp->mb[7]; 2658 ha->orig_fw_iocb_count = mcp->mb[10];
2423 if (orig_iocb_cnt) 2659 if (ha->flags.npiv_supported)
2424 *orig_iocb_cnt = mcp->mb[10]; 2660 ha->max_npiv_vports = mcp->mb[11];
2425 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2661 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2426 *max_npiv_vports = mcp->mb[11]; 2662 ha->fw_max_fcf_count = mcp->mb[12];
2427 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2428 IS_QLA27XX(vha->hw)) && max_fcfs)
2429 *max_fcfs = mcp->mb[12];
2430 } 2663 }
2431 2664
2432 return (rval); 2665 return (rval);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6be32fdab365..f1788db43195 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -221,6 +221,18 @@ MODULE_PARM_DESC(ql2xmdenable,
221 "0 - MiniDump disabled. " 221 "0 - MiniDump disabled. "
222 "1 (Default) - MiniDump enabled."); 222 "1 (Default) - MiniDump enabled.");
223 223
224int ql2xexlogins = 0;
225module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
226MODULE_PARM_DESC(ql2xexlogins,
227 "Number of extended Logins. "
228 "0 (Default)- Disabled.");
229
230int ql2xexchoffld = 0;
231module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
232MODULE_PARM_DESC(ql2xexchoffld,
233 "Number of exchanges to offload. "
234 "0 (Default)- Disabled.");
235
224/* 236/*
225 * SCSI host template entry points 237 * SCSI host template entry points
226 */ 238 */
@@ -2324,6 +2336,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2324 ha->tgt.enable_class_2 = ql2xenableclass2; 2336 ha->tgt.enable_class_2 = ql2xenableclass2;
2325 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2337 INIT_LIST_HEAD(&ha->tgt.q_full_list);
2326 spin_lock_init(&ha->tgt.q_full_lock); 2338 spin_lock_init(&ha->tgt.q_full_lock);
2339 spin_lock_init(&ha->tgt.sess_lock);
2340 spin_lock_init(&ha->tgt.atio_lock);
2341
2327 2342
2328 /* Clear our data area */ 2343 /* Clear our data area */
2329 ha->bars = bars; 2344 ha->bars = bars;
@@ -2468,7 +2483,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2468 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2483 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2469 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2484 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2470 req_length = REQUEST_ENTRY_CNT_83XX; 2485 req_length = REQUEST_ENTRY_CNT_83XX;
2471 rsp_length = RESPONSE_ENTRY_CNT_2300; 2486 rsp_length = RESPONSE_ENTRY_CNT_83XX;
2472 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2487 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2473 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2488 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2474 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2489 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -2498,8 +2513,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2498 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2513 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2499 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2514 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2500 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2515 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2501 req_length = REQUEST_ENTRY_CNT_24XX; 2516 req_length = REQUEST_ENTRY_CNT_83XX;
2502 rsp_length = RESPONSE_ENTRY_CNT_2300; 2517 rsp_length = RESPONSE_ENTRY_CNT_83XX;
2503 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2518 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2504 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2519 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2505 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2520 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -3128,6 +3143,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
3128 3143
3129 base_vha->flags.online = 0; 3144 base_vha->flags.online = 0;
3130 3145
3146 /* free DMA memory */
3147 if (ha->exlogin_buf)
3148 qla2x00_free_exlogin_buffer(ha);
3149
3150 /* free DMA memory */
3151 if (ha->exchoffld_buf)
3152 qla2x00_free_exchoffld_buffer(ha);
3153
3131 qla2x00_destroy_deferred_work(ha); 3154 qla2x00_destroy_deferred_work(ha);
3132 3155
3133 qlt_remove_target(ha, base_vha); 3156 qlt_remove_target(ha, base_vha);
@@ -3587,6 +3610,140 @@ fail:
3587 return -ENOMEM; 3610 return -ENOMEM;
3588} 3611}
3589 3612
3613int
3614qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
3615{
3616 int rval;
3617 uint16_t size, max_cnt, temp;
3618 struct qla_hw_data *ha = vha->hw;
3619
3620 /* Return if we don't need to alloacate any extended logins */
3621 if (!ql2xexlogins)
3622 return QLA_SUCCESS;
3623
3624 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
3625 max_cnt = 0;
3626 rval = qla_get_exlogin_status(vha, &size, &max_cnt);
3627 if (rval != QLA_SUCCESS) {
3628 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
3629 "Failed to get exlogin status.\n");
3630 return rval;
3631 }
3632
3633 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
3634 ha->exlogin_size = (size * temp);
3635 ql_log(ql_log_info, vha, 0xd024,
3636 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
3637 max_cnt, size, temp);
3638
3639 ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
3640 ha->exlogin_size);
3641
3642 /* Get consistent memory for extended logins */
3643 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
3644 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
3645 if (!ha->exlogin_buf) {
3646 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
3647 "Failed to allocate memory for exlogin_buf_dma.\n");
3648 return -ENOMEM;
3649 }
3650
3651 /* Now configure the dma buffer */
3652 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
3653 if (rval) {
3654 ql_log(ql_log_fatal, vha, 0x00cf,
3655 "Setup extended login buffer ****FAILED****.\n");
3656 qla2x00_free_exlogin_buffer(ha);
3657 }
3658
3659 return rval;
3660}
3661
3662/*
3663* qla2x00_free_exlogin_buffer
3664*
3665* Input:
3666* ha = adapter block pointer
3667*/
3668void
3669qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
3670{
3671 if (ha->exlogin_buf) {
3672 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
3673 ha->exlogin_buf, ha->exlogin_buf_dma);
3674 ha->exlogin_buf = NULL;
3675 ha->exlogin_size = 0;
3676 }
3677}
3678
3679int
3680qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
3681{
3682 int rval;
3683 uint16_t size, max_cnt, temp;
3684 struct qla_hw_data *ha = vha->hw;
3685
3686 /* Return if we don't need to alloacate any extended logins */
3687 if (!ql2xexchoffld)
3688 return QLA_SUCCESS;
3689
3690 ql_log(ql_log_info, vha, 0xd014,
3691 "Exchange offload count: %d.\n", ql2xexlogins);
3692
3693 max_cnt = 0;
3694 rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
3695 if (rval != QLA_SUCCESS) {
3696 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
3697 "Failed to get exlogin status.\n");
3698 return rval;
3699 }
3700
3701 temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
3702 ha->exchoffld_size = (size * temp);
3703 ql_log(ql_log_info, vha, 0xd016,
3704 "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
3705 max_cnt, size, temp);
3706
3707 ql_log(ql_log_info, vha, 0xd017,
3708 "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
3709
3710 /* Get consistent memory for extended logins */
3711 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
3712 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
3713 if (!ha->exchoffld_buf) {
3714 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
3715 "Failed to allocate memory for exchoffld_buf_dma.\n");
3716 return -ENOMEM;
3717 }
3718
3719 /* Now configure the dma buffer */
3720 rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
3721 if (rval) {
3722 ql_log(ql_log_fatal, vha, 0xd02e,
3723 "Setup exchange offload buffer ****FAILED****.\n");
3724 qla2x00_free_exchoffld_buffer(ha);
3725 }
3726
3727 return rval;
3728}
3729
3730/*
3731* qla2x00_free_exchoffld_buffer
3732*
3733* Input:
3734* ha = adapter block pointer
3735*/
3736void
3737qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
3738{
3739 if (ha->exchoffld_buf) {
3740 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
3741 ha->exchoffld_buf, ha->exchoffld_buf_dma);
3742 ha->exchoffld_buf = NULL;
3743 ha->exchoffld_size = 0;
3744 }
3745}
3746
3590/* 3747/*
3591* qla2x00_free_fw_dump 3748* qla2x00_free_fw_dump
3592* Frees fw dump stuff. 3749* Frees fw dump stuff.
@@ -3766,6 +3923,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3766 INIT_LIST_HEAD(&vha->list); 3923 INIT_LIST_HEAD(&vha->list);
3767 INIT_LIST_HEAD(&vha->qla_cmd_list); 3924 INIT_LIST_HEAD(&vha->qla_cmd_list);
3768 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 3925 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3926 INIT_LIST_HEAD(&vha->logo_list);
3927 INIT_LIST_HEAD(&vha->plogi_ack_list);
3769 3928
3770 spin_lock_init(&vha->work_lock); 3929 spin_lock_init(&vha->work_lock);
3771 spin_lock_init(&vha->cmd_list_lock); 3930 spin_lock_init(&vha->cmd_list_lock);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 75514a15bea0..8075a4cdb45c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -100,7 +100,7 @@ enum fcp_resp_rsp_codes {
100 */ 100 */
101/* Predefs for callbacks handed to qla2xxx LLD */ 101/* Predefs for callbacks handed to qla2xxx LLD */
102static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 102static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt); 103 struct atio_from_isp *pkt, uint8_t);
104static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 104static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags); 106 int fn, void *iocb, int flags);
@@ -118,10 +118,13 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy, 118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
121static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
122 struct imm_ntfy_from_isp *imm, int ha_locked);
121/* 123/*
122 * Global Variables 124 * Global Variables
123 */ 125 */
124static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 126static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
127static struct kmem_cache *qla_tgt_plogi_cachep;
125static mempool_t *qla_tgt_mgmt_cmd_mempool; 128static mempool_t *qla_tgt_mgmt_cmd_mempool;
126static struct workqueue_struct *qla_tgt_wq; 129static struct workqueue_struct *qla_tgt_wq;
127static DEFINE_MUTEX(qla_tgt_mutex); 130static DEFINE_MUTEX(qla_tgt_mutex);
@@ -226,8 +229,8 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
226 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 229 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
227} 230}
228 231
229static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 232static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
230 struct atio_from_isp *atio) 233 struct atio_from_isp *atio, uint8_t ha_locked)
231{ 234{
232 ql_dbg(ql_dbg_tgt, vha, 0xe072, 235 ql_dbg(ql_dbg_tgt, vha, 0xe072,
233 "%s: qla_target(%d): type %x ox_id %04x\n", 236 "%s: qla_target(%d): type %x ox_id %04x\n",
@@ -248,7 +251,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
248 atio->u.isp24.fcp_hdr.d_id[2]); 251 atio->u.isp24.fcp_hdr.d_id[2]);
249 break; 252 break;
250 } 253 }
251 qlt_24xx_atio_pkt(host, atio); 254 qlt_24xx_atio_pkt(host, atio, ha_locked);
252 break; 255 break;
253 } 256 }
254 257
@@ -271,7 +274,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
271 break; 274 break;
272 } 275 }
273 } 276 }
274 qlt_24xx_atio_pkt(host, atio); 277 qlt_24xx_atio_pkt(host, atio, ha_locked);
275 break; 278 break;
276 } 279 }
277 280
@@ -282,7 +285,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
282 break; 285 break;
283 } 286 }
284 287
285 return; 288 return false;
286} 289}
287 290
288void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 291void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
@@ -389,6 +392,131 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
389 392
390} 393}
391 394
395/*
396 * All qlt_plogi_ack_t operations are protected by hardware_lock
397 */
398
399/*
400 * This is a zero-base ref-counting solution, since hardware_lock
401 * guarantees that ref_count is not modified concurrently.
402 * Upon successful return content of iocb is undefined
403 */
404static qlt_plogi_ack_t *
405qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
406 struct imm_ntfy_from_isp *iocb)
407{
408 qlt_plogi_ack_t *pla;
409
410 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
411 if (pla->id.b24 == id->b24) {
412 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
413 pla->iocb = *iocb;
414 return pla;
415 }
416 }
417
418 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
419 if (!pla) {
420 ql_dbg(ql_dbg_async, vha, 0x5088,
421 "qla_target(%d): Allocation of plogi_ack failed\n",
422 vha->vp_idx);
423 return NULL;
424 }
425
426 pla->iocb = *iocb;
427 pla->id = *id;
428 list_add_tail(&pla->list, &vha->plogi_ack_list);
429
430 return pla;
431}
432
433static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
434{
435 BUG_ON(!pla->ref_count);
436 pla->ref_count--;
437
438 if (pla->ref_count)
439 return;
440
441 ql_dbg(ql_dbg_async, vha, 0x5089,
442 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
443 " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
444 pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
445 pla->iocb.u.isp24.port_id[0],
446 le16_to_cpu(pla->iocb.u.isp24.nport_handle),
447 pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
448 qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
449
450 list_del(&pla->list);
451 kmem_cache_free(qla_tgt_plogi_cachep, pla);
452}
453
454static void
455qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
456 struct qla_tgt_sess *sess, qlt_plogi_link_t link)
457{
458 /* Inc ref_count first because link might already be pointing at pla */
459 pla->ref_count++;
460
461 if (sess->plogi_link[link])
462 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
463
464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
465 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
466 " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
467 pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
468 pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
469 pla->ref_count);
470
471 sess->plogi_link[link] = pla;
472}
473
474typedef struct {
475 /* These fields must be initialized by the caller */
476 port_id_t id;
477 /*
478 * number of cmds dropped while we were waiting for
479 * initiator to ack LOGO initialize to 1 if LOGO is
480 * triggered by a command, otherwise, to 0
481 */
482 int cmd_count;
483
484 /* These fields are used by callee */
485 struct list_head list;
486} qlt_port_logo_t;
487
488static void
489qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
490{
491 qlt_port_logo_t *tmp;
492 int res;
493
494 mutex_lock(&vha->vha_tgt.tgt_mutex);
495
496 list_for_each_entry(tmp, &vha->logo_list, list) {
497 if (tmp->id.b24 == logo->id.b24) {
498 tmp->cmd_count += logo->cmd_count;
499 mutex_unlock(&vha->vha_tgt.tgt_mutex);
500 return;
501 }
502 }
503
504 list_add_tail(&logo->list, &vha->logo_list);
505
506 mutex_unlock(&vha->vha_tgt.tgt_mutex);
507
508 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
509
510 mutex_lock(&vha->vha_tgt.tgt_mutex);
511 list_del(&logo->list);
512 mutex_unlock(&vha->vha_tgt.tgt_mutex);
513
514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
515 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
516 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
517 logo->cmd_count, res);
518}
519
392static void qlt_free_session_done(struct work_struct *work) 520static void qlt_free_session_done(struct work_struct *work)
393{ 521{
394 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 522 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
@@ -402,14 +530,21 @@ static void qlt_free_session_done(struct work_struct *work)
402 530
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 532 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n", 533 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 534 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, 535 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle, 536 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed); 537 sess->send_els_logo);
410 538
411 BUG_ON(!tgt); 539 BUG_ON(!tgt);
412 540
541 if (sess->send_els_logo) {
542 qlt_port_logo_t logo;
543 logo.id = sess->s_id;
544 logo.cmd_count = 0;
545 qlt_send_first_logo(vha, &logo);
546 }
547
413 if (sess->logout_on_delete) { 548 if (sess->logout_on_delete) {
414 int rc; 549 int rc;
415 550
@@ -455,9 +590,34 @@ static void qlt_free_session_done(struct work_struct *work)
455 590
456 spin_lock_irqsave(&ha->hardware_lock, flags); 591 spin_lock_irqsave(&ha->hardware_lock, flags);
457 592
458 if (sess->plogi_ack_needed) 593 {
459 qlt_send_notify_ack(vha, &sess->tm_iocb, 594 qlt_plogi_ack_t *own =
460 0, 0, 0, 0, 0, 0); 595 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
596 qlt_plogi_ack_t *con =
597 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
598
599 if (con) {
600 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
601 "se_sess %p / sess %p port %8phC is gone,"
602 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
603 sess->se_sess, sess, sess->port_name,
604 own ? "releasing own PLOGI" :
605 "no own PLOGI pending",
606 own ? own->ref_count : -1,
607 con->iocb.u.isp24.port_name, con->ref_count);
608 qlt_plogi_ack_unref(vha, con);
609 } else {
610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
611 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
612 sess->se_sess, sess, sess->port_name,
613 own ? "releasing own PLOGI" :
614 "no own PLOGI pending",
615 own ? own->ref_count : -1);
616 }
617
618 if (own)
619 qlt_plogi_ack_unref(vha, own);
620 }
461 621
462 list_del(&sess->sess_list_entry); 622 list_del(&sess->sess_list_entry);
463 623
@@ -476,7 +636,7 @@ static void qlt_free_session_done(struct work_struct *work)
476 wake_up_all(&tgt->waitQ); 636 wake_up_all(&tgt->waitQ);
477} 637}
478 638
479/* ha->hardware_lock supposed to be held on entry */ 639/* ha->tgt.sess_lock supposed to be held on entry */
480void qlt_unreg_sess(struct qla_tgt_sess *sess) 640void qlt_unreg_sess(struct qla_tgt_sess *sess)
481{ 641{
482 struct scsi_qla_host *vha = sess->vha; 642 struct scsi_qla_host *vha = sess->vha;
@@ -492,7 +652,7 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
492} 652}
493EXPORT_SYMBOL(qlt_unreg_sess); 653EXPORT_SYMBOL(qlt_unreg_sess);
494 654
495/* ha->hardware_lock supposed to be held on entry */ 655
496static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 656static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
497{ 657{
498 struct qla_hw_data *ha = vha->hw; 658 struct qla_hw_data *ha = vha->hw;
@@ -502,12 +662,15 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
502 int res = 0; 662 int res = 0;
503 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 663 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
504 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 664 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
665 unsigned long flags;
505 666
506 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 667 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
507 if (loop_id == 0xFFFF) { 668 if (loop_id == 0xFFFF) {
508 /* Global event */ 669 /* Global event */
509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 670 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
671 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 672 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
673 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
511#if 0 /* FIXME: do we need to choose a session here? */ 674#if 0 /* FIXME: do we need to choose a session here? */
512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 675 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 676 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
@@ -534,7 +697,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
534 sess = NULL; 697 sess = NULL;
535#endif 698#endif
536 } else { 699 } else {
700 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
537 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 701 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
702 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
538 } 703 }
539 704
540 ql_dbg(ql_dbg_tgt, vha, 0xe000, 705 ql_dbg(ql_dbg_tgt, vha, 0xe000,
@@ -556,7 +721,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
556 iocb, QLA24XX_MGMT_SEND_NACK); 721 iocb, QLA24XX_MGMT_SEND_NACK);
557} 722}
558 723
559/* ha->hardware_lock supposed to be held on entry */ 724/* ha->tgt.sess_lock supposed to be held on entry */
560static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 725static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
561 bool immediate) 726 bool immediate)
562{ 727{
@@ -600,7 +765,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
600 sess->expires - jiffies); 765 sess->expires - jiffies);
601} 766}
602 767
603/* ha->hardware_lock supposed to be held on entry */ 768/* ha->tgt.sess_lock supposed to be held on entry */
604static void qlt_clear_tgt_db(struct qla_tgt *tgt) 769static void qlt_clear_tgt_db(struct qla_tgt *tgt)
605{ 770{
606 struct qla_tgt_sess *sess; 771 struct qla_tgt_sess *sess;
@@ -636,12 +801,12 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 801 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
637 "qla_target(%d): get_id_list() failed: %x\n", 802 "qla_target(%d): get_id_list() failed: %x\n",
638 vha->vp_idx, rc); 803 vha->vp_idx, rc);
639 res = -1; 804 res = -EBUSY;
640 goto out_free_id_list; 805 goto out_free_id_list;
641 } 806 }
642 807
643 id_iter = (char *)gid_list; 808 id_iter = (char *)gid_list;
644 res = -1; 809 res = -ENOENT;
645 for (i = 0; i < entries; i++) { 810 for (i = 0; i < entries; i++) {
646 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 811 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
647 if ((gid->al_pa == s_id[2]) && 812 if ((gid->al_pa == s_id[2]) &&
@@ -660,7 +825,7 @@ out_free_id_list:
660 return res; 825 return res;
661} 826}
662 827
663/* ha->hardware_lock supposed to be held on entry */ 828/* ha->tgt.sess_lock supposed to be held on entry */
664static void qlt_undelete_sess(struct qla_tgt_sess *sess) 829static void qlt_undelete_sess(struct qla_tgt_sess *sess)
665{ 830{
666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); 831 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
@@ -678,7 +843,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
678 struct qla_tgt_sess *sess; 843 struct qla_tgt_sess *sess;
679 unsigned long flags, elapsed; 844 unsigned long flags, elapsed;
680 845
681 spin_lock_irqsave(&ha->hardware_lock, flags); 846 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
682 while (!list_empty(&tgt->del_sess_list)) { 847 while (!list_empty(&tgt->del_sess_list)) {
683 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 848 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
684 del_list_entry); 849 del_list_entry);
@@ -699,7 +864,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
699 break; 864 break;
700 } 865 }
701 } 866 }
702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
703} 868}
704 869
705/* 870/*
@@ -717,7 +882,7 @@ static struct qla_tgt_sess *qlt_create_sess(
717 unsigned char be_sid[3]; 882 unsigned char be_sid[3];
718 883
719 /* Check to avoid double sessions */ 884 /* Check to avoid double sessions */
720 spin_lock_irqsave(&ha->hardware_lock, flags); 885 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
721 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 886 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
722 sess_list_entry) { 887 sess_list_entry) {
723 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 888 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
@@ -732,7 +897,7 @@ static struct qla_tgt_sess *qlt_create_sess(
732 897
733 /* Cannot undelete at this point */ 898 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 899 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock, 900 spin_unlock_irqrestore(&ha->tgt.sess_lock,
736 flags); 901 flags);
737 return NULL; 902 return NULL;
738 } 903 }
@@ -749,12 +914,12 @@ static struct qla_tgt_sess *qlt_create_sess(
749 914
750 qlt_do_generation_tick(vha, &sess->generation); 915 qlt_do_generation_tick(vha, &sess->generation);
751 916
752 spin_unlock_irqrestore(&ha->hardware_lock, flags); 917 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
753 918
754 return sess; 919 return sess;
755 } 920 }
756 } 921 }
757 spin_unlock_irqrestore(&ha->hardware_lock, flags); 922 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
758 923
759 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 924 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
760 if (!sess) { 925 if (!sess) {
@@ -799,7 +964,7 @@ static struct qla_tgt_sess *qlt_create_sess(
799 } 964 }
800 /* 965 /*
801 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 966 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
802 * access across ->hardware_lock reaquire. 967 * access across ->tgt.sess_lock reaquire.
803 */ 968 */
804 kref_get(&sess->se_sess->sess_kref); 969 kref_get(&sess->se_sess->sess_kref);
805 970
@@ -807,11 +972,11 @@ static struct qla_tgt_sess *qlt_create_sess(
807 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 972 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
808 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 973 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
809 974
810 spin_lock_irqsave(&ha->hardware_lock, flags); 975 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 976 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
812 vha->vha_tgt.qla_tgt->sess_count++; 977 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation); 978 qlt_do_generation_tick(vha, &sess->generation);
814 spin_unlock_irqrestore(&ha->hardware_lock, flags); 979 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
815 980
816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 981 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
817 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 982 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
@@ -842,23 +1007,23 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
842 if (qla_ini_mode_enabled(vha)) 1007 if (qla_ini_mode_enabled(vha))
843 return; 1008 return;
844 1009
845 spin_lock_irqsave(&ha->hardware_lock, flags); 1010 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
846 if (tgt->tgt_stop) { 1011 if (tgt->tgt_stop) {
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1012 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
848 return; 1013 return;
849 } 1014 }
850 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 1015 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
851 if (!sess) { 1016 if (!sess) {
852 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1017 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
853 1018
854 mutex_lock(&vha->vha_tgt.tgt_mutex); 1019 mutex_lock(&vha->vha_tgt.tgt_mutex);
855 sess = qlt_create_sess(vha, fcport, false); 1020 sess = qlt_create_sess(vha, fcport, false);
856 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1021 mutex_unlock(&vha->vha_tgt.tgt_mutex);
857 1022
858 spin_lock_irqsave(&ha->hardware_lock, flags); 1023 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1024 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */ 1025 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1026 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
862 return; 1027 return;
863 } else { 1028 } else {
864 kref_get(&sess->se_sess->sess_kref); 1029 kref_get(&sess->se_sess->sess_kref);
@@ -887,7 +1052,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
887 sess->local = 0; 1052 sess->local = 0;
888 } 1053 }
889 ha->tgt.tgt_ops->put_sess(sess); 1054 ha->tgt.tgt_ops->put_sess(sess);
890 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1055 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
891} 1056}
892 1057
893/* 1058/*
@@ -899,6 +1064,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
899{ 1064{
900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1065 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
901 struct qla_tgt_sess *sess; 1066 struct qla_tgt_sess *sess;
1067 unsigned long flags;
902 1068
903 if (!vha->hw->tgt.tgt_ops) 1069 if (!vha->hw->tgt.tgt_ops)
904 return; 1070 return;
@@ -906,15 +1072,19 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
906 if (!tgt) 1072 if (!tgt)
907 return; 1073 return;
908 1074
1075 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
909 if (tgt->tgt_stop) { 1076 if (tgt->tgt_stop) {
1077 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
910 return; 1078 return;
911 } 1079 }
912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 1080 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
913 if (!sess) { 1081 if (!sess) {
1082 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
914 return; 1083 return;
915 } 1084 }
916 1085
917 if (max_gen - sess->generation < 0) { 1086 if (max_gen - sess->generation < 0) {
1087 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p" 1089 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n", 1090 " for port %8phC, req_gen %d, sess_gen %d\n",
@@ -927,6 +1097,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
927 1097
928 sess->local = 1; 1098 sess->local = 1;
929 qlt_schedule_sess_for_deletion(sess, false); 1099 qlt_schedule_sess_for_deletion(sess, false);
1100 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
930} 1101}
931 1102
932static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1103static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -984,10 +1155,10 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
984 * Lock is needed, because we still can get an incoming packet. 1155 * Lock is needed, because we still can get an incoming packet.
985 */ 1156 */
986 mutex_lock(&vha->vha_tgt.tgt_mutex); 1157 mutex_lock(&vha->vha_tgt.tgt_mutex);
987 spin_lock_irqsave(&ha->hardware_lock, flags); 1158 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
988 tgt->tgt_stop = 1; 1159 tgt->tgt_stop = 1;
989 qlt_clear_tgt_db(tgt); 1160 qlt_clear_tgt_db(tgt);
990 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1161 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
991 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1162 mutex_unlock(&vha->vha_tgt.tgt_mutex);
992 mutex_unlock(&qla_tgt_mutex); 1163 mutex_unlock(&qla_tgt_mutex);
993 1164
@@ -1040,7 +1211,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
1040 1211
1041 mutex_lock(&vha->vha_tgt.tgt_mutex); 1212 mutex_lock(&vha->vha_tgt.tgt_mutex);
1042 spin_lock_irqsave(&ha->hardware_lock, flags); 1213 spin_lock_irqsave(&ha->hardware_lock, flags);
1043 while (tgt->irq_cmd_count != 0) { 1214 while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1044 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1045 udelay(2); 1216 udelay(2);
1046 spin_lock_irqsave(&ha->hardware_lock, flags); 1217 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1309,7 +1480,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1309 1480
1310 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1481 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1311 if (tag == cmd->atio.u.isp24.exchange_addr) { 1482 if (tag == cmd->atio.u.isp24.exchange_addr) {
1312 cmd->state = QLA_TGT_STATE_ABORTED; 1483 cmd->aborted = 1;
1313 spin_unlock(&vha->cmd_list_lock); 1484 spin_unlock(&vha->cmd_list_lock);
1314 return 1; 1485 return 1;
1315 } 1486 }
@@ -1351,7 +1522,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1351 cmd_lun = scsilun_to_int( 1522 cmd_lun = scsilun_to_int(
1352 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1523 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1353 if (cmd_key == key && cmd_lun == lun) 1524 if (cmd_key == key && cmd_lun == lun)
1354 cmd->state = QLA_TGT_STATE_ABORTED; 1525 cmd->aborted = 1;
1355 } 1526 }
1356 spin_unlock(&vha->cmd_list_lock); 1527 spin_unlock(&vha->cmd_list_lock);
1357} 1528}
@@ -1435,6 +1606,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1435 uint32_t tag = abts->exchange_addr_to_abort; 1606 uint32_t tag = abts->exchange_addr_to_abort;
1436 uint8_t s_id[3]; 1607 uint8_t s_id[3];
1437 int rc; 1608 int rc;
1609 unsigned long flags;
1438 1610
1439 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1611 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1440 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
@@ -1462,6 +1634,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1462 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1634 s_id[1] = abts->fcp_hdr_le.s_id[1];
1463 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1635 s_id[2] = abts->fcp_hdr_le.s_id[0];
1464 1636
1637 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1465 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1638 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1466 if (!sess) { 1639 if (!sess) {
1467 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1640 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
@@ -1469,12 +1642,17 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1469 vha->vp_idx); 1642 vha->vp_idx);
1470 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1643 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1471 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1644 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1645
1646 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1647
1472 if (rc != 0) { 1648 if (rc != 0) {
1473 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1649 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1474 false); 1650 false);
1475 } 1651 }
1476 return; 1652 return;
1477 } 1653 }
1654 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1655
1478 1656
1479 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1657 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1480 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1658 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
@@ -1560,15 +1738,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1560 1738
1561 spin_lock_irqsave(&ha->hardware_lock, flags); 1739 spin_lock_irqsave(&ha->hardware_lock, flags);
1562 1740
1563 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) { 1741 if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
1564 /* 1742 /*
1565 * Either a chip reset is active or this request was from 1743 * Either the port is not online or this request was from
1566 * previous life, just abort the processing. 1744 * previous life, just abort the processing.
1567 */ 1745 */
1568 ql_dbg(ql_dbg_async, vha, 0xe100, 1746 ql_dbg(ql_dbg_async, vha, 0xe100,
1569 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n", 1747 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
1570 qla2x00_reset_active(vha), mcmd->reset_count, 1748 vha->flags.online, qla2x00_reset_active(vha),
1571 ha->chip_reset); 1749 mcmd->reset_count, ha->chip_reset);
1572 ha->tgt.tgt_ops->free_mcmd(mcmd); 1750 ha->tgt.tgt_ops->free_mcmd(mcmd);
1573 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1751 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1574 return; 1752 return;
@@ -2510,17 +2688,22 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2510 2688
2511 spin_lock_irqsave(&ha->hardware_lock, flags); 2689 spin_lock_irqsave(&ha->hardware_lock, flags);
2512 2690
2513 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2691 if (xmit_type == QLA_TGT_XMIT_STATUS)
2692 vha->tgt_counters.core_qla_snd_status++;
2693 else
2694 vha->tgt_counters.core_qla_que_buf++;
2695
2696 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
2514 /* 2697 /*
2515 * Either a chip reset is active or this request was from 2698 * Either the port is not online or this request was from
2516 * previous life, just abort the processing. 2699 * previous life, just abort the processing.
2517 */ 2700 */
2518 cmd->state = QLA_TGT_STATE_PROCESSED; 2701 cmd->state = QLA_TGT_STATE_PROCESSED;
2519 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2702 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2520 ql_dbg(ql_dbg_async, vha, 0xe101, 2703 ql_dbg(ql_dbg_async, vha, 0xe101,
2521 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n", 2704 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
2522 qla2x00_reset_active(vha), cmd->reset_count, 2705 vha->flags.online, qla2x00_reset_active(vha),
2523 ha->chip_reset); 2706 cmd->reset_count, ha->chip_reset);
2524 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2707 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2525 return 0; 2708 return 0;
2526 } 2709 }
@@ -2651,18 +2834,18 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2651 2834
2652 spin_lock_irqsave(&ha->hardware_lock, flags); 2835 spin_lock_irqsave(&ha->hardware_lock, flags);
2653 2836
2654 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || 2837 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
2655 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { 2838 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2656 /* 2839 /*
2657 * Either a chip reset is active or this request was from 2840 * Either the port is not online or this request was from
2658 * previous life, just abort the processing. 2841 * previous life, just abort the processing.
2659 */ 2842 */
2660 cmd->state = QLA_TGT_STATE_NEED_DATA; 2843 cmd->state = QLA_TGT_STATE_NEED_DATA;
2661 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2844 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2662 ql_dbg(ql_dbg_async, vha, 0xe102, 2845 ql_dbg(ql_dbg_async, vha, 0xe102,
2663 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n", 2846 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2664 qla2x00_reset_active(vha), cmd->reset_count, 2847 vha->flags.online, qla2x00_reset_active(vha),
2665 ha->chip_reset); 2848 cmd->reset_count, ha->chip_reset);
2666 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667 return 0; 2850 return 0;
2668 } 2851 }
@@ -2957,12 +3140,13 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2957 ret = 1; 3140 ret = 1;
2958 } 3141 }
2959 3142
3143 vha->tgt_counters.num_term_xchg_sent++;
2960 pkt->entry_count = 1; 3144 pkt->entry_count = 1;
2961 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3145 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2962 3146
2963 ctio24 = (struct ctio7_to_24xx *)pkt; 3147 ctio24 = (struct ctio7_to_24xx *)pkt;
2964 ctio24->entry_type = CTIO_TYPE7; 3148 ctio24->entry_type = CTIO_TYPE7;
2965 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 3149 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
2966 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3150 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2967 ctio24->vp_index = vha->vp_idx; 3151 ctio24->vp_index = vha->vp_idx;
2968 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3152 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
@@ -3009,7 +3193,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3009 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3010 3194
3011done: 3195done:
3012 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3196 if (cmd && (!cmd->aborted ||
3013 !cmd->cmd_sent_to_fw)) { 3197 !cmd->cmd_sent_to_fw)) {
3014 if (cmd->sg_mapped) 3198 if (cmd->sg_mapped)
3015 qlt_unmap_sg(vha, cmd); 3199 qlt_unmap_sg(vha, cmd);
@@ -3028,7 +3212,7 @@ static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3028 struct qla_tgt_cmd *cmd, *tcmd; 3212 struct qla_tgt_cmd *cmd, *tcmd;
3029 3213
3030 vha->hw->tgt.leak_exchg_thresh_hold = 3214 vha->hw->tgt.leak_exchg_thresh_hold =
3031 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3215 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3032 3216
3033 cmd = tcmd = NULL; 3217 cmd = tcmd = NULL;
3034 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3218 if (!list_empty(&vha->hw->tgt.q_full_list)) {
@@ -3058,7 +3242,7 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3058 3242
3059 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3243 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3060 "Chip reset due to exchange starvation: %d/%d.\n", 3244 "Chip reset due to exchange starvation: %d/%d.\n",
3061 total_leaked, vha->hw->fw_xcb_count); 3245 total_leaked, vha->hw->cur_fw_xcb_count);
3062 3246
3063 if (IS_P3P_TYPE(vha->hw)) 3247 if (IS_P3P_TYPE(vha->hw))
3064 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3248 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -3080,7 +3264,7 @@ void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3080 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3081 se_cmd->tag); 3265 se_cmd->tag);
3082 3266
3083 cmd->state = QLA_TGT_STATE_ABORTED; 3267 cmd->aborted = 1;
3084 cmd->cmd_flags |= BIT_6; 3268 cmd->cmd_flags |= BIT_6;
3085 3269
3086 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
@@ -3300,9 +3484,6 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3300 3484
3301 ha->tgt.tgt_ops->handle_data(cmd); 3485 ha->tgt.tgt_ops->handle_data(cmd);
3302 return; 3486 return;
3303 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3304 ql_dbg(ql_dbg_io, vha, 0xff02,
3305 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3306 } else { 3487 } else {
3307 ql_dbg(ql_dbg_io, vha, 0xff03, 3488 ql_dbg(ql_dbg_io, vha, 0xff03,
3308 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, 3489 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
@@ -3398,13 +3579,26 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3398 3579
3399 case CTIO_PORT_LOGGED_OUT: 3580 case CTIO_PORT_LOGGED_OUT:
3400 case CTIO_PORT_UNAVAILABLE: 3581 case CTIO_PORT_UNAVAILABLE:
3582 {
3583 int logged_out = (status & 0xFFFF);
3401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3402 "qla_target(%d): CTIO with PORT LOGGED " 3585 "qla_target(%d): CTIO with %s status %x "
3403 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3404 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3586 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3587 (logged_out == CTIO_PORT_LOGGED_OUT) ?
3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3405 status, cmd->state, se_cmd); 3589 status, cmd->state, se_cmd);
3406 break;
3407 3590
3591 if (logged_out && cmd->sess) {
3592 /*
3593 * Session is already logged out, but we need
3594 * to notify initiator, who's not aware of this
3595 */
3596 cmd->sess->logout_on_delete = 0;
3597 cmd->sess->send_els_logo = 1;
3598 qlt_schedule_sess_for_deletion(cmd->sess, true);
3599 }
3600 break;
3601 }
3408 case CTIO_SRR_RECEIVED: 3602 case CTIO_SRR_RECEIVED:
3409 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 3603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3410 "qla_target(%d): CTIO with SRR_RECEIVED" 3604 "qla_target(%d): CTIO with SRR_RECEIVED"
@@ -3454,14 +3648,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3454 } 3648 }
3455 3649
3456 3650
3457 /* "cmd->state == QLA_TGT_STATE_ABORTED" means 3651 /* "cmd->aborted" means
3458 * cmd is already aborted/terminated, we don't 3652 * cmd is already aborted/terminated, we don't
3459 * need to terminate again. The exchange is already 3653 * need to terminate again. The exchange is already
3460 * cleaned up/freed at FW level. Just cleanup at driver 3654 * cleaned up/freed at FW level. Just cleanup at driver
3461 * level. 3655 * level.
3462 */ 3656 */
3463 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3657 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3464 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3658 (!cmd->aborted)) {
3465 cmd->cmd_flags |= BIT_13; 3659 cmd->cmd_flags |= BIT_13;
3466 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3660 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3467 return; 3661 return;
@@ -3479,7 +3673,7 @@ skip_term:
3479 3673
3480 ha->tgt.tgt_ops->handle_data(cmd); 3674 ha->tgt.tgt_ops->handle_data(cmd);
3481 return; 3675 return;
3482 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3676 } else if (cmd->aborted) {
3483 cmd->cmd_flags |= BIT_18; 3677 cmd->cmd_flags |= BIT_18;
3484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3678 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3485 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3679 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
@@ -3491,7 +3685,7 @@ skip_term:
3491 } 3685 }
3492 3686
3493 if (unlikely(status != CTIO_SUCCESS) && 3687 if (unlikely(status != CTIO_SUCCESS) &&
3494 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3688 !cmd->aborted) {
3495 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3689 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3496 dump_stack(); 3690 dump_stack();
3497 } 3691 }
@@ -3553,7 +3747,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3553 if (tgt->tgt_stop) 3747 if (tgt->tgt_stop)
3554 goto out_term; 3748 goto out_term;
3555 3749
3556 if (cmd->state == QLA_TGT_STATE_ABORTED) { 3750 if (cmd->aborted) {
3557 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 3751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3558 "cmd with tag %u is aborted\n", 3752 "cmd with tag %u is aborted\n",
3559 cmd->atio.u.isp24.exchange_addr); 3753 cmd->atio.u.isp24.exchange_addr);
@@ -3589,9 +3783,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3589 /* 3783 /*
3590 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 3784 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3591 */ 3785 */
3592 spin_lock_irqsave(&ha->hardware_lock, flags); 3786 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3593 ha->tgt.tgt_ops->put_sess(sess); 3787 ha->tgt.tgt_ops->put_sess(sess);
3594 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3788 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3595 return; 3789 return;
3596 3790
3597out_term: 3791out_term:
@@ -3606,8 +3800,11 @@ out_term:
3606 3800
3607 qlt_decr_num_pend_cmds(vha); 3801 qlt_decr_num_pend_cmds(vha);
3608 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3609 ha->tgt.tgt_ops->put_sess(sess);
3610 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3804
3805 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3806 ha->tgt.tgt_ops->put_sess(sess);
3807 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3611} 3808}
3612 3809
3613static void qlt_do_work(struct work_struct *work) 3810static void qlt_do_work(struct work_struct *work)
@@ -3692,10 +3889,8 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3692 goto out_term; 3889 goto out_term;
3693 } 3890 }
3694 3891
3695 mutex_lock(&vha->vha_tgt.tgt_mutex);
3696 sess = qlt_make_local_sess(vha, s_id); 3892 sess = qlt_make_local_sess(vha, s_id);
3697 /* sess has an extra creation ref. */ 3893 /* sess has an extra creation ref. */
3698 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3699 3894
3700 if (!sess) 3895 if (!sess)
3701 goto out_term; 3896 goto out_term;
@@ -3787,13 +3982,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3787 3982
3788 cmd->cmd_in_wq = 1; 3983 cmd->cmd_in_wq = 1;
3789 cmd->cmd_flags |= BIT_0; 3984 cmd->cmd_flags |= BIT_0;
3985 cmd->se_cmd.cpuid = -1;
3790 3986
3791 spin_lock(&vha->cmd_list_lock); 3987 spin_lock(&vha->cmd_list_lock);
3792 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3793 spin_unlock(&vha->cmd_list_lock); 3989 spin_unlock(&vha->cmd_list_lock);
3794 3990
3795 INIT_WORK(&cmd->work, qlt_do_work); 3991 INIT_WORK(&cmd->work, qlt_do_work);
3796 queue_work(qla_tgt_wq, &cmd->work); 3992 if (ha->msix_count) {
3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
3995 queue_work_on(smp_processor_id(), qla_tgt_wq,
3996 &cmd->work);
3997 else
3998 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
3999 &cmd->work);
4000 } else {
4001 queue_work(qla_tgt_wq, &cmd->work);
4002 }
3797 return 0; 4003 return 0;
3798 4004
3799} 4005}
@@ -3917,13 +4123,18 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3917 struct qla_tgt_sess *sess; 4123 struct qla_tgt_sess *sess;
3918 uint32_t lun, unpacked_lun; 4124 uint32_t lun, unpacked_lun;
3919 int fn; 4125 int fn;
4126 unsigned long flags;
3920 4127
3921 tgt = vha->vha_tgt.qla_tgt; 4128 tgt = vha->vha_tgt.qla_tgt;
3922 4129
3923 lun = a->u.isp24.fcp_cmnd.lun; 4130 lun = a->u.isp24.fcp_cmnd.lun;
3924 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4131 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4132
4133 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3925 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4134 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3926 a->u.isp24.fcp_hdr.s_id); 4135 a->u.isp24.fcp_hdr.s_id);
4136 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4137
3927 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4138 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3928 4139
3929 if (!sess) { 4140 if (!sess) {
@@ -3987,10 +4198,14 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3987 struct qla_hw_data *ha = vha->hw; 4198 struct qla_hw_data *ha = vha->hw;
3988 struct qla_tgt_sess *sess; 4199 struct qla_tgt_sess *sess;
3989 int loop_id; 4200 int loop_id;
4201 unsigned long flags;
3990 4202
3991 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4203 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
3992 4204
4205 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3993 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4206 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4208
3994 if (sess == NULL) { 4209 if (sess == NULL) {
3995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
3996 "qla_target(%d): task abort for unexisting " 4211 "qla_target(%d): task abort for unexisting "
@@ -4022,15 +4237,6 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4022 } 4237 }
4023} 4238}
4024 4239
4025static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4026 struct imm_ntfy_from_isp *b)
4027{
4028 struct imm_ntfy_from_isp tmp;
4029 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4030 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4031 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4032}
4033
4034/* 4240/*
4035* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4241* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4036* 4242*
@@ -4040,11 +4246,13 @@ static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4040*/ 4246*/
4041static struct qla_tgt_sess * 4247static struct qla_tgt_sess *
4042qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, 4248qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4043 port_id_t port_id, uint16_t loop_id) 4249 port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
4044{ 4250{
4045 struct qla_tgt_sess *sess = NULL, *other_sess; 4251 struct qla_tgt_sess *sess = NULL, *other_sess;
4046 uint64_t other_wwn; 4252 uint64_t other_wwn;
4047 4253
4254 *conflict_sess = NULL;
4255
4048 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { 4256 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4049 4257
4050 other_wwn = wwn_to_u64(other_sess->port_name); 4258 other_wwn = wwn_to_u64(other_sess->port_name);
@@ -4072,9 +4280,10 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4072 } else { 4280 } else {
4073 /* 4281 /*
4074 * Another wwn used to have our s_id/loop_id 4282 * Another wwn used to have our s_id/loop_id
4075 * combo - kill the session, but don't log out 4283 * kill the session, but don't free the loop_id
4076 */ 4284 */
4077 sess->logout_on_delete = 0; 4285 other_sess->keep_nport_handle = 1;
4286 *conflict_sess = other_sess;
4078 qlt_schedule_sess_for_deletion(other_sess, 4287 qlt_schedule_sess_for_deletion(other_sess,
4079 true); 4288 true);
4080 } 4289 }
@@ -4119,7 +4328,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4119 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4328 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4120 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4329 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4121 if (cmd_key == key) { 4330 if (cmd_key == key) {
4122 cmd->state = QLA_TGT_STATE_ABORTED; 4331 cmd->aborted = 1;
4123 count++; 4332 count++;
4124 } 4333 }
4125 } 4334 }
@@ -4136,12 +4345,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4136{ 4345{
4137 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4346 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4138 struct qla_hw_data *ha = vha->hw; 4347 struct qla_hw_data *ha = vha->hw;
4139 struct qla_tgt_sess *sess = NULL; 4348 struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
4140 uint64_t wwn; 4349 uint64_t wwn;
4141 port_id_t port_id; 4350 port_id_t port_id;
4142 uint16_t loop_id; 4351 uint16_t loop_id;
4143 uint16_t wd3_lo; 4352 uint16_t wd3_lo;
4144 int res = 0; 4353 int res = 0;
4354 qlt_plogi_ack_t *pla;
4355 unsigned long flags;
4145 4356
4146 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4357 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4147 4358
@@ -4165,27 +4376,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4165 /* Mark all stale commands in qla_tgt_wq for deletion */ 4376 /* Mark all stale commands in qla_tgt_wq for deletion */
4166 abort_cmds_for_s_id(vha, &port_id); 4377 abort_cmds_for_s_id(vha, &port_id);
4167 4378
4168 if (wwn) 4379 if (wwn) {
4380 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4169 sess = qlt_find_sess_invalidate_other(tgt, wwn, 4381 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4170 port_id, loop_id); 4382 port_id, loop_id, &conflict_sess);
4383 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4384 }
4171 4385
4172 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) { 4386 if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
4173 res = 1; 4387 res = 1;
4174 break; 4388 break;
4175 } 4389 }
4176 4390
4177 if (sess->plogi_ack_needed) { 4391 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4178 /* 4392 if (!pla) {
4179 * Initiator sent another PLOGI before last PLOGI could
4180 * finish. Swap plogi iocbs and terminate old one
4181 * without acking, new one will get acked when session
4182 * deletion completes.
4183 */
4184 ql_log(ql_log_warn, sess->vha, 0xf094,
4185 "sess %p received double plogi.\n", sess);
4186
4187 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4188
4189 qlt_send_term_imm_notif(vha, iocb, 1); 4393 qlt_send_term_imm_notif(vha, iocb, 1);
4190 4394
4191 res = 0; 4395 res = 0;
@@ -4194,13 +4398,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4194 4398
4195 res = 0; 4399 res = 0;
4196 4400
4197 /* 4401 if (conflict_sess)
4198 * Save immediate Notif IOCB for Ack when sess is done 4402 qlt_plogi_ack_link(vha, pla, conflict_sess,
4199 * and being deleted. 4403 QLT_PLOGI_LINK_CONFLICT);
4200 */ 4404
4201 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb)); 4405 if (!sess)
4202 sess->plogi_ack_needed = 1; 4406 break;
4203 4407
4408 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4204 /* 4409 /*
4205 * Under normal circumstances we want to release nport handle 4410 * Under normal circumstances we want to release nport handle
4206 * during LOGO process to avoid nport handle leaks inside FW. 4411 * during LOGO process to avoid nport handle leaks inside FW.
@@ -4227,9 +4432,21 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4227 case ELS_PRLI: 4432 case ELS_PRLI:
4228 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4433 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4229 4434
4230 if (wwn) 4435 if (wwn) {
4436 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4231 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, 4437 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4232 loop_id); 4438 loop_id, &conflict_sess);
4439 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4440 }
4441
4442 if (conflict_sess) {
4443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4444 "PRLI with conflicting sess %p port %8phC\n",
4445 conflict_sess, conflict_sess->port_name);
4446 qlt_send_term_imm_notif(vha, iocb, 1);
4447 res = 0;
4448 break;
4449 }
4233 4450
4234 if (sess != NULL) { 4451 if (sess != NULL) {
4235 if (sess->deleted) { 4452 if (sess->deleted) {
@@ -4899,9 +5116,12 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
4899 struct qla_hw_data *ha = vha->hw; 5116 struct qla_hw_data *ha = vha->hw;
4900 request_t *pkt; 5117 request_t *pkt;
4901 struct qla_tgt_sess *sess = NULL; 5118 struct qla_tgt_sess *sess = NULL;
5119 unsigned long flags;
4902 5120
5121 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4903 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5122 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4904 atio->u.isp24.fcp_hdr.s_id); 5123 atio->u.isp24.fcp_hdr.s_id);
5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4905 if (!sess) { 5125 if (!sess) {
4906 qlt_send_term_exchange(vha, NULL, atio, 1); 5126 qlt_send_term_exchange(vha, NULL, atio, 1);
4907 return 0; 5127 return 0;
@@ -4916,6 +5136,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
4916 return -ENOMEM; 5136 return -ENOMEM;
4917 } 5137 }
4918 5138
5139 vha->tgt_counters.num_q_full_sent++;
4919 pkt->entry_count = 1; 5140 pkt->entry_count = 1;
4920 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5141 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
4921 5142
@@ -5129,11 +5350,12 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5129/* ha->hardware_lock supposed to be held on entry */ 5350/* ha->hardware_lock supposed to be held on entry */
5130/* called via callback from qla2xxx */ 5351/* called via callback from qla2xxx */
5131static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5352static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5132 struct atio_from_isp *atio) 5353 struct atio_from_isp *atio, uint8_t ha_locked)
5133{ 5354{
5134 struct qla_hw_data *ha = vha->hw; 5355 struct qla_hw_data *ha = vha->hw;
5135 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5356 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5136 int rc; 5357 int rc;
5358 unsigned long flags;
5137 5359
5138 if (unlikely(tgt == NULL)) { 5360 if (unlikely(tgt == NULL)) {
5139 ql_dbg(ql_dbg_io, vha, 0x3064, 5361 ql_dbg(ql_dbg_io, vha, 0x3064,
@@ -5145,7 +5367,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5145 * Otherwise, some commands can stuck. 5367 * Otherwise, some commands can stuck.
5146 */ 5368 */
5147 5369
5148 tgt->irq_cmd_count++; 5370 tgt->atio_irq_cmd_count++;
5149 5371
5150 switch (atio->u.raw.entry_type) { 5372 switch (atio->u.raw.entry_type) {
5151 case ATIO_TYPE7: 5373 case ATIO_TYPE7:
@@ -5155,7 +5377,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5155 "qla_target(%d): ATIO_TYPE7 " 5377 "qla_target(%d): ATIO_TYPE7 "
5156 "received with UNKNOWN exchange address, " 5378 "received with UNKNOWN exchange address, "
5157 "sending QUEUE_FULL\n", vha->vp_idx); 5379 "sending QUEUE_FULL\n", vha->vp_idx);
5380 if (!ha_locked)
5381 spin_lock_irqsave(&ha->hardware_lock, flags);
5158 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 5382 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5383 if (!ha_locked)
5384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5159 break; 5385 break;
5160 } 5386 }
5161 5387
@@ -5164,7 +5390,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5164 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5390 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5165 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5391 rc = qlt_chk_qfull_thresh_hold(vha, atio);
5166 if (rc != 0) { 5392 if (rc != 0) {
5167 tgt->irq_cmd_count--; 5393 tgt->atio_irq_cmd_count--;
5168 return; 5394 return;
5169 } 5395 }
5170 rc = qlt_handle_cmd_for_atio(vha, atio); 5396 rc = qlt_handle_cmd_for_atio(vha, atio);
@@ -5173,11 +5399,20 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5173 } 5399 }
5174 if (unlikely(rc != 0)) { 5400 if (unlikely(rc != 0)) {
5175 if (rc == -ESRCH) { 5401 if (rc == -ESRCH) {
5402 if (!ha_locked)
5403 spin_lock_irqsave
5404 (&ha->hardware_lock, flags);
5405
5176#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5406#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5177 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5178#else 5408#else
5179 qlt_send_term_exchange(vha, NULL, atio, 1); 5409 qlt_send_term_exchange(vha, NULL, atio, 1);
5180#endif 5410#endif
5411
5412 if (!ha_locked)
5413 spin_unlock_irqrestore
5414 (&ha->hardware_lock, flags);
5415
5181 } else { 5416 } else {
5182 if (tgt->tgt_stop) { 5417 if (tgt->tgt_stop) {
5183 ql_dbg(ql_dbg_tgt, vha, 0xe059, 5418 ql_dbg(ql_dbg_tgt, vha, 0xe059,
@@ -5189,7 +5424,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5189 "qla_target(%d): Unable to send " 5424 "qla_target(%d): Unable to send "
5190 "command to target, sending BUSY " 5425 "command to target, sending BUSY "
5191 "status.\n", vha->vp_idx); 5426 "status.\n", vha->vp_idx);
5427 if (!ha_locked)
5428 spin_lock_irqsave(
5429 &ha->hardware_lock, flags);
5192 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5430 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5431 if (!ha_locked)
5432 spin_unlock_irqrestore(
5433 &ha->hardware_lock, flags);
5193 } 5434 }
5194 } 5435 }
5195 } 5436 }
@@ -5206,7 +5447,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5206 break; 5447 break;
5207 } 5448 }
5208 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5449 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5450
5451 if (!ha_locked)
5452 spin_lock_irqsave(&ha->hardware_lock, flags);
5209 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5453 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5454 if (!ha_locked)
5455 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5210 break; 5456 break;
5211 } 5457 }
5212 5458
@@ -5217,7 +5463,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5217 break; 5463 break;
5218 } 5464 }
5219 5465
5220 tgt->irq_cmd_count--; 5466 tgt->atio_irq_cmd_count--;
5221} 5467}
5222 5468
5223/* ha->hardware_lock supposed to be held on entry */ 5469/* ha->hardware_lock supposed to be held on entry */
@@ -5534,12 +5780,16 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
5534 int rc, global_resets; 5780 int rc, global_resets;
5535 uint16_t loop_id = 0; 5781 uint16_t loop_id = 0;
5536 5782
5783 mutex_lock(&vha->vha_tgt.tgt_mutex);
5784
5537retry: 5785retry:
5538 global_resets = 5786 global_resets =
5539 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 5787 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5540 5788
5541 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 5789 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5542 if (rc != 0) { 5790 if (rc != 0) {
5791 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5792
5543 if ((s_id[0] == 0xFF) && 5793 if ((s_id[0] == 0xFF) &&
5544 (s_id[1] == 0xFC)) { 5794 (s_id[1] == 0xFC)) {
5545 /* 5795 /*
@@ -5550,17 +5800,27 @@ retry:
5550 "Unable to find initiator with S_ID %x:%x:%x", 5800 "Unable to find initiator with S_ID %x:%x:%x",
5551 s_id[0], s_id[1], s_id[2]); 5801 s_id[0], s_id[1], s_id[2]);
5552 } else 5802 } else
5553 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 5803 ql_log(ql_log_info, vha, 0xf071,
5554 "qla_target(%d): Unable to find " 5804 "qla_target(%d): Unable to find "
5555 "initiator with S_ID %x:%x:%x", 5805 "initiator with S_ID %x:%x:%x",
5556 vha->vp_idx, s_id[0], s_id[1], 5806 vha->vp_idx, s_id[0], s_id[1],
5557 s_id[2]); 5807 s_id[2]);
5808
5809 if (rc == -ENOENT) {
5810 qlt_port_logo_t logo;
5811 sid_to_portid(s_id, &logo.id);
5812 logo.cmd_count = 1;
5813 qlt_send_first_logo(vha, &logo);
5814 }
5815
5558 return NULL; 5816 return NULL;
5559 } 5817 }
5560 5818
5561 fcport = qlt_get_port_database(vha, loop_id); 5819 fcport = qlt_get_port_database(vha, loop_id);
5562 if (!fcport) 5820 if (!fcport) {
5821 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5563 return NULL; 5822 return NULL;
5823 }
5564 5824
5565 if (global_resets != 5825 if (global_resets !=
5566 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 5826 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
@@ -5575,6 +5835,8 @@ retry:
5575 5835
5576 sess = qlt_create_sess(vha, fcport, true); 5836 sess = qlt_create_sess(vha, fcport, true);
5577 5837
5838 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5839
5578 kfree(fcport); 5840 kfree(fcport);
5579 return sess; 5841 return sess;
5580} 5842}
@@ -5585,15 +5847,15 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5585 struct scsi_qla_host *vha = tgt->vha; 5847 struct scsi_qla_host *vha = tgt->vha;
5586 struct qla_hw_data *ha = vha->hw; 5848 struct qla_hw_data *ha = vha->hw;
5587 struct qla_tgt_sess *sess = NULL; 5849 struct qla_tgt_sess *sess = NULL;
5588 unsigned long flags; 5850 unsigned long flags = 0, flags2 = 0;
5589 uint32_t be_s_id; 5851 uint32_t be_s_id;
5590 uint8_t s_id[3]; 5852 uint8_t s_id[3];
5591 int rc; 5853 int rc;
5592 5854
5593 spin_lock_irqsave(&ha->hardware_lock, flags); 5855 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5594 5856
5595 if (tgt->tgt_stop) 5857 if (tgt->tgt_stop)
5596 goto out_term; 5858 goto out_term2;
5597 5859
5598 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 5860 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5599 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 5861 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
@@ -5602,41 +5864,47 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5602 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5864 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5603 (unsigned char *)&be_s_id); 5865 (unsigned char *)&be_s_id);
5604 if (!sess) { 5866 if (!sess) {
5605 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5606 5868
5607 mutex_lock(&vha->vha_tgt.tgt_mutex);
5608 sess = qlt_make_local_sess(vha, s_id); 5869 sess = qlt_make_local_sess(vha, s_id);
5609 /* sess has got an extra creation ref */ 5870 /* sess has got an extra creation ref */
5610 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5611 5871
5612 spin_lock_irqsave(&ha->hardware_lock, flags); 5872 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5613 if (!sess) 5873 if (!sess)
5614 goto out_term; 5874 goto out_term2;
5615 } else { 5875 } else {
5616 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 5876 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5617 sess = NULL; 5877 sess = NULL;
5618 goto out_term; 5878 goto out_term2;
5619 } 5879 }
5620 5880
5621 kref_get(&sess->se_sess->sess_kref); 5881 kref_get(&sess->se_sess->sess_kref);
5622 } 5882 }
5623 5883
5884 spin_lock_irqsave(&ha->hardware_lock, flags);
5885
5624 if (tgt->tgt_stop) 5886 if (tgt->tgt_stop)
5625 goto out_term; 5887 goto out_term;
5626 5888
5627 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5889 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5628 if (rc != 0) 5890 if (rc != 0)
5629 goto out_term; 5891 goto out_term;
5892 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5630 5893
5631 ha->tgt.tgt_ops->put_sess(sess); 5894 ha->tgt.tgt_ops->put_sess(sess);
5632 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5895 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5633 return; 5896 return;
5634 5897
5898out_term2:
5899 spin_lock_irqsave(&ha->hardware_lock, flags);
5900
5635out_term: 5901out_term:
5636 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5902 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5904
5637 if (sess) 5905 if (sess)
5638 ha->tgt.tgt_ops->put_sess(sess); 5906 ha->tgt.tgt_ops->put_sess(sess);
5639 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5907 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5640} 5908}
5641 5909
5642static void qlt_tmr_work(struct qla_tgt *tgt, 5910static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5653,7 +5921,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5653 int fn; 5921 int fn;
5654 void *iocb; 5922 void *iocb;
5655 5923
5656 spin_lock_irqsave(&ha->hardware_lock, flags); 5924 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5657 5925
5658 if (tgt->tgt_stop) 5926 if (tgt->tgt_stop)
5659 goto out_term; 5927 goto out_term;
@@ -5661,14 +5929,12 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5661 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5929 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5662 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 5930 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5663 if (!sess) { 5931 if (!sess) {
5664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5932 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5665 5933
5666 mutex_lock(&vha->vha_tgt.tgt_mutex);
5667 sess = qlt_make_local_sess(vha, s_id); 5934 sess = qlt_make_local_sess(vha, s_id);
5668 /* sess has got an extra creation ref */ 5935 /* sess has got an extra creation ref */
5669 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5670 5936
5671 spin_lock_irqsave(&ha->hardware_lock, flags); 5937 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5672 if (!sess) 5938 if (!sess)
5673 goto out_term; 5939 goto out_term;
5674 } else { 5940 } else {
@@ -5690,14 +5956,14 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5690 goto out_term; 5956 goto out_term;
5691 5957
5692 ha->tgt.tgt_ops->put_sess(sess); 5958 ha->tgt.tgt_ops->put_sess(sess);
5693 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5959 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5694 return; 5960 return;
5695 5961
5696out_term: 5962out_term:
5697 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
5698 if (sess) 5964 if (sess)
5699 ha->tgt.tgt_ops->put_sess(sess); 5965 ha->tgt.tgt_ops->put_sess(sess);
5700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5701} 5967}
5702 5968
5703static void qlt_sess_work_fn(struct work_struct *work) 5969static void qlt_sess_work_fn(struct work_struct *work)
@@ -6002,6 +6268,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6002 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6268 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6003 unsigned long flags; 6269 unsigned long flags;
6004 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6270 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6271 int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6005 6272
6006 if (!tgt) { 6273 if (!tgt) {
6007 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6274 ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -6020,6 +6287,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6020 qla24xx_disable_vp(vha); 6287 qla24xx_disable_vp(vha);
6021 qla24xx_enable_vp(vha); 6288 qla24xx_enable_vp(vha);
6022 } else { 6289 } else {
6290 if (ha->msix_entries) {
6291 ql_dbg(ql_dbg_tgt, vha, 0xffff,
6292 "%s: host%ld : vector %d cpu %d\n",
6293 __func__, vha->host_no,
6294 ha->msix_entries[rspq_ent].vector,
6295 ha->msix_entries[rspq_ent].cpuid);
6296
6297 ha->tgt.rspq_vector_cpuid =
6298 ha->msix_entries[rspq_ent].cpuid;
6299 }
6300
6023 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6301 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6024 qla2xxx_wake_dpc(base_vha); 6302 qla2xxx_wake_dpc(base_vha);
6025 qla2x00_wait_for_hba_online(base_vha); 6303 qla2x00_wait_for_hba_online(base_vha);
@@ -6131,7 +6409,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6131 * @ha: SCSI driver HA context 6409 * @ha: SCSI driver HA context
6132 */ 6410 */
6133void 6411void
6134qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 6412qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6135{ 6413{
6136 struct qla_hw_data *ha = vha->hw; 6414 struct qla_hw_data *ha = vha->hw;
6137 struct atio_from_isp *pkt; 6415 struct atio_from_isp *pkt;
@@ -6144,7 +6422,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
6144 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6422 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6145 cnt = pkt->u.raw.entry_count; 6423 cnt = pkt->u.raw.entry_count;
6146 6424
6147 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 6425 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
6426 ha_locked);
6148 6427
6149 for (i = 0; i < cnt; i++) { 6428 for (i = 0; i < cnt; i++) {
6150 ha->tgt.atio_ring_index++; 6429 ha->tgt.atio_ring_index++;
@@ -6265,10 +6544,21 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6265{ 6544{
6266 struct qla_hw_data *ha = vha->hw; 6545 struct qla_hw_data *ha = vha->hw;
6267 6546
6547 if (!QLA_TGT_MODE_ENABLED())
6548 return;
6549
6268 if (ha->tgt.node_name_set) { 6550 if (ha->tgt.node_name_set) {
6269 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6551 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6270 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6552 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6271 } 6553 }
6554
6555 /* disable ZIO at start time. */
6556 if (!vha->flags.init_done) {
6557 uint32_t tmp;
6558 tmp = le32_to_cpu(icb->firmware_options_2);
6559 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6560 icb->firmware_options_2 = cpu_to_le32(tmp);
6561 }
6272} 6562}
6273 6563
6274void 6564void
@@ -6359,6 +6649,15 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6359 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6649 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6360 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6650 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6361 } 6651 }
6652
6653 /* disable ZIO at start time. */
6654 if (!vha->flags.init_done) {
6655 uint32_t tmp;
6656 tmp = le32_to_cpu(icb->firmware_options_2);
6657 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6658 icb->firmware_options_2 = cpu_to_le32(tmp);
6659 }
6660
6362} 6661}
6363 6662
6364void 6663void
@@ -6428,16 +6727,59 @@ qla83xx_msix_atio_q(int irq, void *dev_id)
6428 ha = rsp->hw; 6727 ha = rsp->hw;
6429 vha = pci_get_drvdata(ha->pdev); 6728 vha = pci_get_drvdata(ha->pdev);
6430 6729
6431 spin_lock_irqsave(&ha->hardware_lock, flags); 6730 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6432 6731
6433 qlt_24xx_process_atio_queue(vha); 6732 qlt_24xx_process_atio_queue(vha, 0);
6434 qla24xx_process_response_queue(vha, rsp);
6435 6733
6436 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6734 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6437 6735
6438 return IRQ_HANDLED; 6736 return IRQ_HANDLED;
6439} 6737}
6440 6738
6739static void
6740qlt_handle_abts_recv_work(struct work_struct *work)
6741{
6742 struct qla_tgt_sess_op *op = container_of(work,
6743 struct qla_tgt_sess_op, work);
6744 scsi_qla_host_t *vha = op->vha;
6745 struct qla_hw_data *ha = vha->hw;
6746 unsigned long flags;
6747
6748 if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
6749 return;
6750
6751 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6752 qlt_24xx_process_atio_queue(vha, 0);
6753 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6754
6755 spin_lock_irqsave(&ha->hardware_lock, flags);
6756 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6758}
6759
6760void
6761qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
6762{
6763 struct qla_tgt_sess_op *op;
6764
6765 op = kzalloc(sizeof(*op), GFP_ATOMIC);
6766
6767 if (!op) {
6768 /* do not reach for ATIO queue here. This is best effort err
6769 * recovery at this point.
6770 */
6771 qlt_response_pkt_all_vps(vha, pkt);
6772 return;
6773 }
6774
6775 memcpy(&op->atio, pkt, sizeof(*pkt));
6776 op->vha = vha;
6777 op->chip_reset = vha->hw->chip_reset;
6778 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
6779 queue_work(qla_tgt_wq, &op->work);
6780 return;
6781}
6782
6441int 6783int
6442qlt_mem_alloc(struct qla_hw_data *ha) 6784qlt_mem_alloc(struct qla_hw_data *ha)
6443{ 6785{
@@ -6532,13 +6874,25 @@ int __init qlt_init(void)
6532 return -ENOMEM; 6874 return -ENOMEM;
6533 } 6875 }
6534 6876
6877 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6878 sizeof(qlt_plogi_ack_t),
6879 __alignof__(qlt_plogi_ack_t),
6880 0, NULL);
6881
6882 if (!qla_tgt_plogi_cachep) {
6883 ql_log(ql_log_fatal, NULL, 0xe06d,
6884 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
6885 ret = -ENOMEM;
6886 goto out_mgmt_cmd_cachep;
6887 }
6888
6535 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 6889 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
6536 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 6890 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
6537 if (!qla_tgt_mgmt_cmd_mempool) { 6891 if (!qla_tgt_mgmt_cmd_mempool) {
6538 ql_log(ql_log_fatal, NULL, 0xe06e, 6892 ql_log(ql_log_fatal, NULL, 0xe06e,
6539 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 6893 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6540 ret = -ENOMEM; 6894 ret = -ENOMEM;
6541 goto out_mgmt_cmd_cachep; 6895 goto out_plogi_cachep;
6542 } 6896 }
6543 6897
6544 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 6898 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
@@ -6555,6 +6909,8 @@ int __init qlt_init(void)
6555 6909
6556out_cmd_mempool: 6910out_cmd_mempool:
6557 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 6911 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6912out_plogi_cachep:
6913 kmem_cache_destroy(qla_tgt_plogi_cachep);
6558out_mgmt_cmd_cachep: 6914out_mgmt_cmd_cachep:
6559 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 6915 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6560 return ret; 6916 return ret;
@@ -6567,5 +6923,6 @@ void qlt_exit(void)
6567 6923
6568 destroy_workqueue(qla_tgt_wq); 6924 destroy_workqueue(qla_tgt_wq);
6569 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 6925 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6926 kmem_cache_destroy(qla_tgt_plogi_cachep);
6570 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 6927 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6571} 6928}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bca584ae45b7..71b2865ba3c8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -787,7 +787,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
787#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ 787#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
788#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ 788#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
789#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ 789#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
790#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */ 790
791 791
792/* Special handles */ 792/* Special handles */
793#define QLA_TGT_NULL_HANDLE 0 793#define QLA_TGT_NULL_HANDLE 0
@@ -835,6 +835,7 @@ struct qla_tgt {
835 * HW lock. 835 * HW lock.
836 */ 836 */
837 int irq_cmd_count; 837 int irq_cmd_count;
838 int atio_irq_cmd_count;
838 839
839 int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; 840 int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
840 841
@@ -883,6 +884,7 @@ struct qla_tgt {
883 884
884struct qla_tgt_sess_op { 885struct qla_tgt_sess_op {
885 struct scsi_qla_host *vha; 886 struct scsi_qla_host *vha;
887 uint32_t chip_reset;
886 struct atio_from_isp atio; 888 struct atio_from_isp atio;
887 struct work_struct work; 889 struct work_struct work;
888 struct list_head cmd_list; 890 struct list_head cmd_list;
@@ -896,6 +898,19 @@ enum qla_sess_deletion {
896 QLA_SESS_DELETION_IN_PROGRESS = 2, 898 QLA_SESS_DELETION_IN_PROGRESS = 2,
897}; 899};
898 900
901typedef enum {
902 QLT_PLOGI_LINK_SAME_WWN,
903 QLT_PLOGI_LINK_CONFLICT,
904 QLT_PLOGI_LINK_MAX
905} qlt_plogi_link_t;
906
907typedef struct {
908 struct list_head list;
909 struct imm_ntfy_from_isp iocb;
910 port_id_t id;
911 int ref_count;
912} qlt_plogi_ack_t;
913
899/* 914/*
900 * Equivilant to IT Nexus (Initiator-Target) 915 * Equivilant to IT Nexus (Initiator-Target)
901 */ 916 */
@@ -907,8 +922,8 @@ struct qla_tgt_sess {
907 unsigned int deleted:2; 922 unsigned int deleted:2;
908 unsigned int local:1; 923 unsigned int local:1;
909 unsigned int logout_on_delete:1; 924 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1; 925 unsigned int keep_nport_handle:1;
926 unsigned int send_els_logo:1;
912 927
913 unsigned char logout_completed; 928 unsigned char logout_completed;
914 929
@@ -925,9 +940,7 @@ struct qla_tgt_sess {
925 uint8_t port_name[WWN_SIZE]; 940 uint8_t port_name[WWN_SIZE];
926 struct work_struct free_work; 941 struct work_struct free_work;
927 942
928 union { 943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
929 struct imm_ntfy_from_isp tm_iocb;
930 };
931}; 944};
932 945
933struct qla_tgt_cmd { 946struct qla_tgt_cmd {
@@ -949,6 +962,7 @@ struct qla_tgt_cmd {
949 unsigned int term_exchg:1; 962 unsigned int term_exchg:1;
950 unsigned int cmd_sent_to_fw:1; 963 unsigned int cmd_sent_to_fw:1;
951 unsigned int cmd_in_wq:1; 964 unsigned int cmd_in_wq:1;
965 unsigned int aborted:1;
952 966
953 struct scatterlist *sg; /* cmd data buffer SG vector */ 967 struct scatterlist *sg; /* cmd data buffer SG vector */
954 int sg_cnt; /* SG segments count */ 968 int sg_cnt; /* SG segments count */
@@ -1120,6 +1134,14 @@ static inline uint32_t sid_to_key(const uint8_t *s_id)
1120 return key; 1134 return key;
1121} 1135}
1122 1136
1137static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
1138{
1139 memset(p, 0, sizeof(*p));
1140 p->b.domain = s_id[0];
1141 p->b.area = s_id[1];
1142 p->b.al_pa = s_id[2];
1143}
1144
1123/* 1145/*
1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1146 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1125 */ 1147 */
@@ -1135,7 +1157,7 @@ extern void qlt_enable_vha(struct scsi_qla_host *);
1135extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); 1157extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
1136extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); 1158extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
1137extern void qlt_init_atio_q_entries(struct scsi_qla_host *); 1159extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
1138extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); 1160extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
1139extern void qlt_24xx_config_rings(struct scsi_qla_host *); 1161extern void qlt_24xx_config_rings(struct scsi_qla_host *);
1140extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, 1162extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
1141 struct nvram_24xx *); 1163 struct nvram_24xx *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 81af294f15a7..faf0a126627f 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -284,6 +284,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
284 284
285 WARN_ON(cmd->cmd_flags & BIT_16); 285 WARN_ON(cmd->cmd_flags & BIT_16);
286 286
287 cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
287 cmd->cmd_flags |= BIT_16; 288 cmd->cmd_flags |= BIT_16;
288 transport_generic_free_cmd(&cmd->se_cmd, 0); 289 transport_generic_free_cmd(&cmd->se_cmd, 0);
289} 290}
@@ -295,9 +296,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
295 */ 296 */
296static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 297static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
297{ 298{
299 cmd->vha->tgt_counters.core_qla_free_cmd++;
298 cmd->cmd_in_wq = 1; 300 cmd->cmd_in_wq = 1;
299 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 301 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
300 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 302 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
301} 303}
302 304
303/* 305/*
@@ -342,9 +344,9 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
342 BUG_ON(!sess); 344 BUG_ON(!sess);
343 vha = sess->vha; 345 vha = sess->vha;
344 346
345 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 347 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
346 target_sess_cmd_list_set_waiting(se_sess); 348 target_sess_cmd_list_set_waiting(se_sess);
347 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 349 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
348 350
349 return 1; 351 return 1;
350} 352}
@@ -358,9 +360,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
358 BUG_ON(!sess); 360 BUG_ON(!sess);
359 vha = sess->vha; 361 vha = sess->vha;
360 362
361 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 363 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
362 qlt_unreg_sess(sess); 364 qlt_unreg_sess(sess);
363 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 365 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
364} 366}
365 367
366static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) 368static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -454,6 +456,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
454 return -EINVAL; 456 return -EINVAL;
455 } 457 }
456 458
459 cmd->vha->tgt_counters.qla_core_sbt_cmd++;
457 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 460 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
458 cmd->unpacked_lun, data_length, fcp_task_attr, 461 cmd->unpacked_lun, data_length, fcp_task_attr,
459 data_dir, flags); 462 data_dir, flags);
@@ -469,6 +472,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
469 */ 472 */
470 cmd->cmd_in_wq = 0; 473 cmd->cmd_in_wq = 0;
471 cmd->cmd_flags |= BIT_11; 474 cmd->cmd_flags |= BIT_11;
475 cmd->vha->tgt_counters.qla_core_ret_ctio++;
472 if (!cmd->write_data_transferred) { 476 if (!cmd->write_data_transferred) {
473 /* 477 /*
474 * Check if se_cmd has already been aborted via LUN_RESET, and 478 * Check if se_cmd has already been aborted via LUN_RESET, and
@@ -500,7 +504,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
500 cmd->cmd_flags |= BIT_10; 504 cmd->cmd_flags |= BIT_10;
501 cmd->cmd_in_wq = 1; 505 cmd->cmd_in_wq = 1;
502 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 506 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
503 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 507 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
504} 508}
505 509
506static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) 510static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
@@ -643,7 +647,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
643static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 647static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
644 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 648 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
645/* 649/*
646 * Expected to be called with struct qla_hw_data->hardware_lock held 650 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
647 */ 651 */
648static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) 652static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
649{ 653{
@@ -697,13 +701,13 @@ static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
697 if (!sess) 701 if (!sess)
698 return; 702 return;
699 703
700 assert_spin_locked(&sess->vha->hw->hardware_lock); 704 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
701 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 705 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
702} 706}
703 707
704static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) 708static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
705{ 709{
706 assert_spin_locked(&sess->vha->hw->hardware_lock); 710 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
707 target_sess_cmd_list_set_waiting(sess->se_sess); 711 target_sess_cmd_list_set_waiting(sess->se_sess);
708} 712}
709 713
@@ -1077,7 +1081,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1077} 1081}
1078 1082
1079/* 1083/*
1080 * Expected to be called with struct qla_hw_data->hardware_lock held 1084 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
1081 */ 1085 */
1082static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( 1086static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1083 scsi_qla_host_t *vha, 1087 scsi_qla_host_t *vha,
@@ -1116,7 +1120,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1116} 1120}
1117 1121
1118/* 1122/*
1119 * Expected to be called with struct qla_hw_data->hardware_lock held 1123 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
1120 */ 1124 */
1121static void tcm_qla2xxx_set_sess_by_s_id( 1125static void tcm_qla2xxx_set_sess_by_s_id(
1122 struct tcm_qla2xxx_lport *lport, 1126 struct tcm_qla2xxx_lport *lport,
@@ -1182,7 +1186,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1182} 1186}
1183 1187
1184/* 1188/*
1185 * Expected to be called with struct qla_hw_data->hardware_lock held 1189 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
1186 */ 1190 */
1187static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( 1191static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1188 scsi_qla_host_t *vha, 1192 scsi_qla_host_t *vha,
@@ -1221,7 +1225,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1221} 1225}
1222 1226
1223/* 1227/*
1224 * Expected to be called with struct qla_hw_data->hardware_lock held 1228 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
1225 */ 1229 */
1226static void tcm_qla2xxx_set_sess_by_loop_id( 1230static void tcm_qla2xxx_set_sess_by_loop_id(
1227 struct tcm_qla2xxx_lport *lport, 1231 struct tcm_qla2xxx_lport *lport,
@@ -1285,7 +1289,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
1285} 1289}
1286 1290
1287/* 1291/*
1288 * Should always be called with qla_hw_data->hardware_lock held. 1292 * Should always be called with qla_hw_data->tgt.sess_lock held.
1289 */ 1293 */
1290static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1294static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1291 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) 1295 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
@@ -1353,7 +1357,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1353 struct qla_tgt_sess *sess = qla_tgt_sess; 1357 struct qla_tgt_sess *sess = qla_tgt_sess;
1354 unsigned char port_name[36]; 1358 unsigned char port_name[36];
1355 unsigned long flags; 1359 unsigned long flags;
1356 int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : 1360 int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
1357 TCM_QLA2XXX_DEFAULT_TAGS; 1361 TCM_QLA2XXX_DEFAULT_TAGS;
1358 1362
1359 lport = vha->vha_tgt.target_lport_ptr; 1363 lport = vha->vha_tgt.target_lport_ptr;
@@ -1401,12 +1405,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1401 * And now setup the new se_nacl and session pointers into our HW lport 1405 * And now setup the new se_nacl and session pointers into our HW lport
1402 * mappings for fabric S_ID and LOOP_ID. 1406 * mappings for fabric S_ID and LOOP_ID.
1403 */ 1407 */
1404 spin_lock_irqsave(&ha->hardware_lock, flags); 1408 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1405 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, 1409 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1406 qla_tgt_sess, s_id); 1410 qla_tgt_sess, s_id);
1407 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, 1411 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1408 qla_tgt_sess, loop_id); 1412 qla_tgt_sess, loop_id);
1409 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1413 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1410 /* 1414 /*
1411 * Finally register the new FC Nexus with TCM 1415 * Finally register the new FC Nexus with TCM
1412 */ 1416 */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 72204fbf2bb1..576a7a43470c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1333,7 +1333,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1333 /* 1333 /*
1334 * Check if a delayed TASK_ABORTED status needs to 1334 * Check if a delayed TASK_ABORTED status needs to
1335 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1335 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1336 * received with the unsolicitied data out. 1336 * received with the unsolicited data out.
1337 */ 1337 */
1338 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1338 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1339 iscsit_stop_dataout_timer(cmd); 1339 iscsit_stop_dataout_timer(cmd);
@@ -3435,7 +3435,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3435 3435
3436 if ((tpg->tpg_attrib.generate_node_acls == 0) && 3436 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3437 (tpg->tpg_attrib.demo_mode_discovery == 0) && 3437 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3438 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, 3438 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3439 cmd->conn->sess->sess_ops->InitiatorName))) { 3439 cmd->conn->sess->sess_ops->InitiatorName))) {
3440 continue; 3440 continue;
3441 } 3441 }
@@ -4459,9 +4459,6 @@ int iscsit_close_connection(
4459 4459
4460 return 0; 4460 return 0;
4461 } 4461 }
4462 spin_unlock_bh(&sess->conn_lock);
4463
4464 return 0;
4465} 4462}
4466 4463
4467int iscsit_close_session(struct iscsi_session *sess) 4464int iscsit_close_session(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 255204cc43e6..2f821de63049 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -725,11 +725,8 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
725 725
726 if (iscsit_get_tpg(tpg) < 0) 726 if (iscsit_get_tpg(tpg) < 0)
727 return -EINVAL; 727 return -EINVAL;
728 /* 728
729 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1 729 ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
730 */
731 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
732 config_item_name(acl_ci), cmdsn_depth, 1);
733 730
734 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for" 731 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
735 "InitiatorName: %s\n", config_item_name(wwn_ci), 732 "InitiatorName: %s\n", config_item_name(wwn_ci),
@@ -1593,28 +1590,30 @@ static int lio_tpg_check_prot_fabric_only(
1593} 1590}
1594 1591
1595/* 1592/*
1596 * Called with spin_lock_bh(struct se_portal_group->session_lock) held.. 1593 * This function calls iscsit_inc_session_usage_count() on the
1597 *
1598 * Also, this function calls iscsit_inc_session_usage_count() on the
1599 * struct iscsi_session in question. 1594 * struct iscsi_session in question.
1600 */ 1595 */
1601static int lio_tpg_shutdown_session(struct se_session *se_sess) 1596static int lio_tpg_shutdown_session(struct se_session *se_sess)
1602{ 1597{
1603 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1598 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1599 struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
1604 1600
1601 spin_lock_bh(&se_tpg->session_lock);
1605 spin_lock(&sess->conn_lock); 1602 spin_lock(&sess->conn_lock);
1606 if (atomic_read(&sess->session_fall_back_to_erl0) || 1603 if (atomic_read(&sess->session_fall_back_to_erl0) ||
1607 atomic_read(&sess->session_logout) || 1604 atomic_read(&sess->session_logout) ||
1608 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 1605 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1609 spin_unlock(&sess->conn_lock); 1606 spin_unlock(&sess->conn_lock);
1607 spin_unlock_bh(&se_tpg->session_lock);
1610 return 0; 1608 return 0;
1611 } 1609 }
1612 atomic_set(&sess->session_reinstatement, 1); 1610 atomic_set(&sess->session_reinstatement, 1);
1613 spin_unlock(&sess->conn_lock); 1611 spin_unlock(&sess->conn_lock);
1614 1612
1615 iscsit_stop_time2retain_timer(sess); 1613 iscsit_stop_time2retain_timer(sess);
1616 iscsit_stop_session(sess, 1, 1); 1614 spin_unlock_bh(&se_tpg->session_lock);
1617 1615
1616 iscsit_stop_session(sess, 1, 1);
1618 return 1; 1617 return 1;
1619} 1618}
1620 1619
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 2e561deb30a2..9214c9dafa2b 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -160,8 +160,7 @@ static int iscsit_handle_r2t_snack(
160 " protocol error.\n", cmd->init_task_tag, begrun, 160 " protocol error.\n", cmd->init_task_tag, begrun,
161 (begrun + runlength), cmd->acked_data_sn); 161 (begrun + runlength), cmd->acked_data_sn);
162 162
163 return iscsit_reject_cmd(cmd, 163 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
164 ISCSI_REASON_PROTOCOL_ERROR, buf);
165 } 164 }
166 165
167 if (runlength) { 166 if (runlength) {
@@ -628,8 +627,8 @@ int iscsit_dataout_datapduinorder_no_fbit(
628 if (cmd->pdu_list[i].seq_no == pdu->seq_no) { 627 if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
629 if (!first_pdu) 628 if (!first_pdu)
630 first_pdu = &cmd->pdu_list[i]; 629 first_pdu = &cmd->pdu_list[i];
631 xfer_len += cmd->pdu_list[i].length; 630 xfer_len += cmd->pdu_list[i].length;
632 pdu_count++; 631 pdu_count++;
633 } else if (pdu_count) 632 } else if (pdu_count)
634 break; 633 break;
635 } 634 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 2cbea2af7cd0..3a1f9a7e6bb6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1668,7 +1668,7 @@ void iscsi_set_session_parameters(
1668 param->value); 1668 param->value);
1669 } else if (!strcmp(param->name, INITIALR2T)) { 1669 } else if (!strcmp(param->name, INITIALR2T)) {
1670 ops->InitialR2T = !strcmp(param->value, YES); 1670 ops->InitialR2T = !strcmp(param->value, YES);
1671 pr_debug("InitialR2T: %s\n", 1671 pr_debug("InitialR2T: %s\n",
1672 param->value); 1672 param->value);
1673 } else if (!strcmp(param->name, IMMEDIATEDATA)) { 1673 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
1674 ops->ImmediateData = !strcmp(param->value, YES); 1674 ops->ImmediateData = !strcmp(param->value, YES);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 11320df939f7..3d637055c36f 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -82,7 +82,7 @@ int iscsit_tmr_task_warm_reset(
82 pr_err("TMR Opcode TARGET_WARM_RESET authorization" 82 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
83 " failed for Initiator Node: %s\n", 83 " failed for Initiator Node: %s\n",
84 sess->se_sess->se_node_acl->initiatorname); 84 sess->se_sess->se_node_acl->initiatorname);
85 return -1; 85 return -1;
86 } 86 }
87 /* 87 /*
88 * Do the real work in transport_generic_do_tmr(). 88 * Do the real work in transport_generic_do_tmr().
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 23c95cd14167..0814e5894a96 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -590,16 +590,6 @@ int iscsit_tpg_del_network_portal(
590 return iscsit_tpg_release_np(tpg_np, tpg, np); 590 return iscsit_tpg_release_np(tpg_np, tpg, np);
591} 591}
592 592
593int iscsit_tpg_set_initiator_node_queue_depth(
594 struct iscsi_portal_group *tpg,
595 unsigned char *initiatorname,
596 u32 queue_depth,
597 int force)
598{
599 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
600 initiatorname, queue_depth, force);
601}
602
603int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) 593int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
604{ 594{
605 unsigned char buf1[256], buf2[256], *none = NULL; 595 unsigned char buf1[256], buf2[256], *none = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 9db32bd24cd4..2da211920c18 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -26,8 +26,6 @@ extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_gr
26 int); 26 int);
27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, 27extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
28 struct iscsi_tpg_np *); 28 struct iscsi_tpg_np *);
29extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
30 unsigned char *, u32, int);
31extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32); 29extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
32extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32); 30extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
33extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32); 31extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4fb0eca86857..d41a5c300e31 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -1036,12 +1036,26 @@ static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
1036 return -EINVAL; 1036 return -EINVAL;
1037} 1037}
1038 1038
1039static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
1040 char *page)
1041{
1042 struct se_portal_group *se_tpg = to_tpg(item);
1043 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1044 struct tcm_loop_tpg, tl_se_tpg);
1045 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1046
1047 return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
1048 tl_hba->sh->host_no, tl_tpg->tl_tpgt);
1049}
1050
1039CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 1051CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
1040CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 1052CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
1053CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
1041 1054
1042static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1055static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1043 &tcm_loop_tpg_attr_nexus, 1056 &tcm_loop_tpg_attr_nexus,
1044 &tcm_loop_tpg_attr_transport_status, 1057 &tcm_loop_tpg_attr_transport_status,
1058 &tcm_loop_tpg_attr_address,
1045 NULL, 1059 NULL,
1046}; 1060};
1047 1061
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 35f7d31b29d2..3072f1aca8ec 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -39,8 +39,6 @@
39 39
40#include "sbp_target.h" 40#include "sbp_target.h"
41 41
42static const struct target_core_fabric_ops sbp_ops;
43
44/* FireWire address region for management and command block address handlers */ 42/* FireWire address region for management and command block address handlers */
45static const struct fw_address_region sbp_register_region = { 43static const struct fw_address_region sbp_register_region = {
46 .start = CSR_REGISTER_BASE + 0x10000, 44 .start = CSR_REGISTER_BASE + 0x10000,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index b9b9ffde4c7a..3327c49674d3 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(target_depend_item);
278 278
279void target_undepend_item(struct config_item *item) 279void target_undepend_item(struct config_item *item)
280{ 280{
281 return configfs_undepend_item(&target_core_fabrics, item); 281 return configfs_undepend_item(item);
282} 282}
283EXPORT_SYMBOL(target_undepend_item); 283EXPORT_SYMBOL(target_undepend_item);
284 284
@@ -499,6 +499,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
499DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); 499DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
500DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); 500DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
501DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); 501DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
502DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
502DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 503DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
503 504
504#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ 505#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
@@ -548,7 +549,8 @@ static ssize_t _name##_store(struct config_item *item, const char *page,\
548 size_t count) \ 549 size_t count) \
549{ \ 550{ \
550 printk_once(KERN_WARNING \ 551 printk_once(KERN_WARNING \
551 "ignoring deprecated ##_name## attribute\n"); \ 552 "ignoring deprecated %s attribute\n", \
553 __stringify(_name)); \
552 return count; \ 554 return count; \
553} 555}
554 556
@@ -866,6 +868,39 @@ static ssize_t emulate_rest_reord_store(struct config_item *item,
866 return count; 868 return count;
867} 869}
868 870
871static ssize_t unmap_zeroes_data_store(struct config_item *item,
872 const char *page, size_t count)
873{
874 struct se_dev_attrib *da = to_attrib(item);
875 bool flag;
876 int ret;
877
878 ret = strtobool(page, &flag);
879 if (ret < 0)
880 return ret;
881
882 if (da->da_dev->export_count) {
883 pr_err("dev[%p]: Unable to change SE Device"
884 " unmap_zeroes_data while export_count is %d\n",
885 da->da_dev, da->da_dev->export_count);
886 return -EINVAL;
887 }
888 /*
889 * We expect this value to be non-zero when generic Block Layer
890 * Discard supported is detected iblock_configure_device().
891 */
892 if (flag && !da->max_unmap_block_desc_count) {
893 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
894 " because max_unmap_block_desc_count is zero\n",
895 da->da_dev);
896 return -ENOSYS;
897 }
898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
900 da->da_dev, flag);
901 return 0;
902}
903
869/* 904/*
870 * Note, this can only be called on unexported SE Device Object. 905 * Note, this can only be called on unexported SE Device Object.
871 */ 906 */
@@ -998,6 +1033,7 @@ CONFIGFS_ATTR(, max_unmap_lba_count);
998CONFIGFS_ATTR(, max_unmap_block_desc_count); 1033CONFIGFS_ATTR(, max_unmap_block_desc_count);
999CONFIGFS_ATTR(, unmap_granularity); 1034CONFIGFS_ATTR(, unmap_granularity);
1000CONFIGFS_ATTR(, unmap_granularity_alignment); 1035CONFIGFS_ATTR(, unmap_granularity_alignment);
1036CONFIGFS_ATTR(, unmap_zeroes_data);
1001CONFIGFS_ATTR(, max_write_same_len); 1037CONFIGFS_ATTR(, max_write_same_len);
1002 1038
1003/* 1039/*
@@ -1034,6 +1070,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
1034 &attr_max_unmap_block_desc_count, 1070 &attr_max_unmap_block_desc_count,
1035 &attr_unmap_granularity, 1071 &attr_unmap_granularity,
1036 &attr_unmap_granularity_alignment, 1072 &attr_unmap_granularity_alignment,
1073 &attr_unmap_zeroes_data,
1037 &attr_max_write_same_len, 1074 &attr_max_write_same_len,
1038 NULL, 1075 NULL,
1039}; 1076};
@@ -1980,14 +2017,14 @@ static ssize_t target_dev_lba_map_store(struct config_item *item,
1980 struct se_device *dev = to_device(item); 2017 struct se_device *dev = to_device(item);
1981 struct t10_alua_lba_map *lba_map = NULL; 2018 struct t10_alua_lba_map *lba_map = NULL;
1982 struct list_head lba_list; 2019 struct list_head lba_list;
1983 char *map_entries, *ptr; 2020 char *map_entries, *orig, *ptr;
1984 char state; 2021 char state;
1985 int pg_num = -1, pg; 2022 int pg_num = -1, pg;
1986 int ret = 0, num = 0, pg_id, alua_state; 2023 int ret = 0, num = 0, pg_id, alua_state;
1987 unsigned long start_lba = -1, end_lba = -1; 2024 unsigned long start_lba = -1, end_lba = -1;
1988 unsigned long segment_size = -1, segment_mult = -1; 2025 unsigned long segment_size = -1, segment_mult = -1;
1989 2026
1990 map_entries = kstrdup(page, GFP_KERNEL); 2027 orig = map_entries = kstrdup(page, GFP_KERNEL);
1991 if (!map_entries) 2028 if (!map_entries)
1992 return -ENOMEM; 2029 return -ENOMEM;
1993 2030
@@ -2085,7 +2122,7 @@ out:
2085 } else 2122 } else
2086 core_alua_set_lba_map(dev, &lba_list, 2123 core_alua_set_lba_map(dev, &lba_list,
2087 segment_size, segment_mult); 2124 segment_size, segment_mult);
2088 kfree(map_entries); 2125 kfree(orig);
2089 return count; 2126 return count;
2090} 2127}
2091 2128
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 88ea4e4f124b..cacd97a8cbd0 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -813,6 +813,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
813 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 813 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
814 dev->dev_attrib.unmap_granularity_alignment = 814 dev->dev_attrib.unmap_granularity_alignment =
815 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 815 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
816 dev->dev_attrib.unmap_zeroes_data =
817 DA_UNMAP_ZEROES_DATA_DEFAULT;
816 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 818 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
817 819
818 xcopy_lun = &dev->xcopy_lun; 820 xcopy_lun = &dev->xcopy_lun;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f29c69120054..e77d15000caa 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -138,6 +138,8 @@ static int iblock_configure_device(struct se_device *dev)
138 q->limits.discard_granularity >> 9; 138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment = 139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment; 140 q->limits.discard_alignment;
141 dev->dev_attrib.unmap_zeroes_data =
142 q->limits.discard_zeroes_data;
141 143
142 pr_debug("IBLOCK: BLOCK Discard support available," 144 pr_debug("IBLOCK: BLOCK Discard support available,"
143 " disabled by default\n"); 145 " disabled by default\n");
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index e7933115087a..b1795735eafc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1457,8 +1457,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1457static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1457static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1458{ 1458{
1459 struct se_lun_acl *lun_acl; 1459 struct se_lun_acl *lun_acl;
1460 struct se_node_acl *nacl; 1460
1461 struct se_portal_group *tpg;
1462 /* 1461 /*
1463 * For nacl->dynamic_node_acl=1 1462 * For nacl->dynamic_node_acl=1
1464 */ 1463 */
@@ -1467,17 +1466,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1467 if (!lun_acl) 1466 if (!lun_acl)
1468 return 0; 1467 return 0;
1469 1468
1470 nacl = lun_acl->se_lun_nacl;
1471 tpg = nacl->se_tpg;
1472
1473 return target_depend_item(&lun_acl->se_lun_group.cg_item); 1469 return target_depend_item(&lun_acl->se_lun_group.cg_item);
1474} 1470}
1475 1471
1476static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) 1472static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1477{ 1473{
1478 struct se_lun_acl *lun_acl; 1474 struct se_lun_acl *lun_acl;
1479 struct se_node_acl *nacl; 1475
1480 struct se_portal_group *tpg;
1481 /* 1476 /*
1482 * For nacl->dynamic_node_acl=1 1477 * For nacl->dynamic_node_acl=1
1483 */ 1478 */
@@ -1487,8 +1482,6 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1487 kref_put(&se_deve->pr_kref, target_pr_kref_release); 1482 kref_put(&se_deve->pr_kref, target_pr_kref_release);
1488 return; 1483 return;
1489 } 1484 }
1490 nacl = lun_acl->se_lun_nacl;
1491 tpg = nacl->se_tpg;
1492 1485
1493 target_undepend_item(&lun_acl->se_lun_group.cg_item); 1486 target_undepend_item(&lun_acl->se_lun_group.cg_item);
1494 kref_put(&se_deve->pr_kref, target_pr_kref_release); 1487 kref_put(&se_deve->pr_kref, target_pr_kref_release);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 98698d875742..a9057aa07176 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -141,9 +141,17 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
141 * Set Thin Provisioning Enable bit following sbc3r22 in section 141 * Set Thin Provisioning Enable bit following sbc3r22 in section
142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 142 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
143 */ 143 */
144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 144 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
145 buf[14] |= 0x80; 145 buf[14] |= 0x80;
146 146
147 /*
148 * LBPRZ signifies that zeroes will be read back from an LBA after
149 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
150 */
151 if (dev->dev_attrib.unmap_zeroes_data)
152 buf[14] |= 0x40;
153 }
154
147 rbuf = transport_kmap_data_sg(cmd); 155 rbuf = transport_kmap_data_sg(cmd);
148 if (rbuf) { 156 if (rbuf) {
149 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 157 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9413e1a949e5..0aa47babd16c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -635,6 +635,18 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
635 if (dev->dev_attrib.emulate_tpws != 0) 635 if (dev->dev_attrib.emulate_tpws != 0)
636 buf[5] |= 0x40 | 0x20; 636 buf[5] |= 0x40 | 0x20;
637 637
638 /*
639 * The unmap_zeroes_data set means that the underlying device supports
640 * REQ_DISCARD and has the discard_zeroes_data bit set. This satisfies
641 * the SBC requirements for LBPRZ, meaning that a subsequent read
642 * will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
643 * See sbc4r36 6.6.4.
644 */
645 if (((dev->dev_attrib.emulate_tpu != 0) ||
646 (dev->dev_attrib.emulate_tpws != 0)) &&
647 (dev->dev_attrib.unmap_zeroes_data != 0))
648 buf[5] |= 0x04;
649
638 return 0; 650 return 0;
639} 651}
640 652
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 28fb3016370f..fcdcb117c60d 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -201,7 +201,7 @@ static void core_tmr_drain_tmr_list(
201 /* 201 /*
202 * If this function was called with a valid pr_res_key 202 * If this function was called with a valid pr_res_key
203 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 203 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
204 * skip non regisration key matching TMRs. 204 * skip non registration key matching TMRs.
205 */ 205 */
206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
207 continue; 207 continue;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5fb9dd7f08bb..3608b1b5ecf7 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -75,9 +75,21 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
75 unsigned char *initiatorname) 75 unsigned char *initiatorname)
76{ 76{
77 struct se_node_acl *acl; 77 struct se_node_acl *acl;
78 78 /*
79 * Obtain se_node_acl->acl_kref using fabric driver provided
80 * initiatorname[] during node acl endpoint lookup driven by
81 * new se_session login.
82 *
83 * The reference is held until se_session shutdown -> release
84 * occurs via fabric driver invoked transport_deregister_session()
85 * or transport_free_session() code.
86 */
79 mutex_lock(&tpg->acl_node_mutex); 87 mutex_lock(&tpg->acl_node_mutex);
80 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
92 }
81 mutex_unlock(&tpg->acl_node_mutex); 93 mutex_unlock(&tpg->acl_node_mutex);
82 94
83 return acl; 95 return acl;
@@ -157,28 +169,25 @@ void core_tpg_add_node_to_devs(
157 mutex_unlock(&tpg->tpg_lun_mutex); 169 mutex_unlock(&tpg->tpg_lun_mutex);
158} 170}
159 171
160/* core_set_queue_depth_for_node(): 172static void
161 * 173target_set_nacl_queue_depth(struct se_portal_group *tpg,
162 * 174 struct se_node_acl *acl, u32 queue_depth)
163 */
164static int core_set_queue_depth_for_node(
165 struct se_portal_group *tpg,
166 struct se_node_acl *acl)
167{ 175{
176 acl->queue_depth = queue_depth;
177
168 if (!acl->queue_depth) { 178 if (!acl->queue_depth) {
169 pr_err("Queue depth for %s Initiator Node: %s is 0," 179 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
170 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 180 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
171 acl->initiatorname); 181 acl->initiatorname);
172 acl->queue_depth = 1; 182 acl->queue_depth = 1;
173 } 183 }
174
175 return 0;
176} 184}
177 185
178static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, 186static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
179 const unsigned char *initiatorname) 187 const unsigned char *initiatorname)
180{ 188{
181 struct se_node_acl *acl; 189 struct se_node_acl *acl;
190 u32 queue_depth;
182 191
183 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), 192 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
184 GFP_KERNEL); 193 GFP_KERNEL);
@@ -193,24 +202,20 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
193 spin_lock_init(&acl->nacl_sess_lock); 202 spin_lock_init(&acl->nacl_sess_lock);
194 mutex_init(&acl->lun_entry_mutex); 203 mutex_init(&acl->lun_entry_mutex);
195 atomic_set(&acl->acl_pr_ref_count, 0); 204 atomic_set(&acl->acl_pr_ref_count, 0);
205
196 if (tpg->se_tpg_tfo->tpg_get_default_depth) 206 if (tpg->se_tpg_tfo->tpg_get_default_depth)
197 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 207 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
198 else 208 else
199 acl->queue_depth = 1; 209 queue_depth = 1;
210 target_set_nacl_queue_depth(tpg, acl, queue_depth);
211
200 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 212 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
201 acl->se_tpg = tpg; 213 acl->se_tpg = tpg;
202 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 214 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
203 215
204 tpg->se_tpg_tfo->set_default_node_attributes(acl); 216 tpg->se_tpg_tfo->set_default_node_attributes(acl);
205 217
206 if (core_set_queue_depth_for_node(tpg, acl) < 0)
207 goto out_free_acl;
208
209 return acl; 218 return acl;
210
211out_free_acl:
212 kfree(acl);
213 return NULL;
214} 219}
215 220
216static void target_add_node_acl(struct se_node_acl *acl) 221static void target_add_node_acl(struct se_node_acl *acl)
@@ -219,7 +224,6 @@ static void target_add_node_acl(struct se_node_acl *acl)
219 224
220 mutex_lock(&tpg->acl_node_mutex); 225 mutex_lock(&tpg->acl_node_mutex);
221 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 226 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
222 tpg->num_node_acls++;
223 mutex_unlock(&tpg->acl_node_mutex); 227 mutex_unlock(&tpg->acl_node_mutex);
224 228
225 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 229 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
@@ -232,6 +236,25 @@ static void target_add_node_acl(struct se_node_acl *acl)
232 acl->initiatorname); 236 acl->initiatorname);
233} 237}
234 238
239bool target_tpg_has_node_acl(struct se_portal_group *tpg,
240 const char *initiatorname)
241{
242 struct se_node_acl *acl;
243 bool found = false;
244
245 mutex_lock(&tpg->acl_node_mutex);
246 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
247 if (!strcmp(acl->initiatorname, initiatorname)) {
248 found = true;
249 break;
250 }
251 }
252 mutex_unlock(&tpg->acl_node_mutex);
253
254 return found;
255}
256EXPORT_SYMBOL(target_tpg_has_node_acl);
257
235struct se_node_acl *core_tpg_check_initiator_node_acl( 258struct se_node_acl *core_tpg_check_initiator_node_acl(
236 struct se_portal_group *tpg, 259 struct se_portal_group *tpg,
237 unsigned char *initiatorname) 260 unsigned char *initiatorname)
@@ -248,6 +271,15 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
248 acl = target_alloc_node_acl(tpg, initiatorname); 271 acl = target_alloc_node_acl(tpg, initiatorname);
249 if (!acl) 272 if (!acl)
250 return NULL; 273 return NULL;
274 /*
275 * When allocating a dynamically generated node_acl, go ahead
276 * and take the extra kref now before returning to the fabric
277 * driver caller.
278 *
279 * Note this reference will be released at session shutdown
280 * time within transport_free_session() code.
281 */
282 kref_get(&acl->acl_kref);
251 acl->dynamic_node_acl = 1; 283 acl->dynamic_node_acl = 1;
252 284
253 /* 285 /*
@@ -318,7 +350,6 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
318 acl->dynamic_node_acl = 0; 350 acl->dynamic_node_acl = 0;
319 } 351 }
320 list_del(&acl->acl_list); 352 list_del(&acl->acl_list);
321 tpg->num_node_acls--;
322 mutex_unlock(&tpg->acl_node_mutex); 353 mutex_unlock(&tpg->acl_node_mutex);
323 354
324 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 355 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
@@ -329,7 +360,8 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
329 if (sess->sess_tearing_down != 0) 360 if (sess->sess_tearing_down != 0)
330 continue; 361 continue;
331 362
332 target_get_session(sess); 363 if (!target_get_session(sess))
364 continue;
333 list_move(&sess->sess_acl_list, &sess_list); 365 list_move(&sess->sess_acl_list, &sess_list);
334 } 366 }
335 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 367 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
@@ -366,108 +398,52 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
366 * 398 *
367 */ 399 */
368int core_tpg_set_initiator_node_queue_depth( 400int core_tpg_set_initiator_node_queue_depth(
369 struct se_portal_group *tpg, 401 struct se_node_acl *acl,
370 unsigned char *initiatorname, 402 u32 queue_depth)
371 u32 queue_depth,
372 int force)
373{ 403{
374 struct se_session *sess, *init_sess = NULL; 404 LIST_HEAD(sess_list);
375 struct se_node_acl *acl; 405 struct se_portal_group *tpg = acl->se_tpg;
406 struct se_session *sess, *sess_tmp;
376 unsigned long flags; 407 unsigned long flags;
377 int dynamic_acl = 0; 408 int rc;
378
379 mutex_lock(&tpg->acl_node_mutex);
380 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
381 if (!acl) {
382 pr_err("Access Control List entry for %s Initiator"
383 " Node %s does not exists for TPG %hu, ignoring"
384 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
385 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
386 mutex_unlock(&tpg->acl_node_mutex);
387 return -ENODEV;
388 }
389 if (acl->dynamic_node_acl) {
390 acl->dynamic_node_acl = 0;
391 dynamic_acl = 1;
392 }
393 mutex_unlock(&tpg->acl_node_mutex);
394
395 spin_lock_irqsave(&tpg->session_lock, flags);
396 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
397 if (sess->se_node_acl != acl)
398 continue;
399
400 if (!force) {
401 pr_err("Unable to change queue depth for %s"
402 " Initiator Node: %s while session is"
403 " operational. To forcefully change the queue"
404 " depth and force session reinstatement"
405 " use the \"force=1\" parameter.\n",
406 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
407 spin_unlock_irqrestore(&tpg->session_lock, flags);
408
409 mutex_lock(&tpg->acl_node_mutex);
410 if (dynamic_acl)
411 acl->dynamic_node_acl = 1;
412 mutex_unlock(&tpg->acl_node_mutex);
413 return -EEXIST;
414 }
415 /*
416 * Determine if the session needs to be closed by our context.
417 */
418 if (!tpg->se_tpg_tfo->shutdown_session(sess))
419 continue;
420
421 init_sess = sess;
422 break;
423 }
424 409
425 /* 410 /*
426 * User has requested to change the queue depth for a Initiator Node. 411 * User has requested to change the queue depth for a Initiator Node.
427 * Change the value in the Node's struct se_node_acl, and call 412 * Change the value in the Node's struct se_node_acl, and call
428 * core_set_queue_depth_for_node() to add the requested queue depth. 413 * target_set_nacl_queue_depth() to set the new queue depth.
429 *
430 * Finally call tpg->se_tpg_tfo->close_session() to force session
431 * reinstatement to occur if there is an active session for the
432 * $FABRIC_MOD Initiator Node in question.
433 */ 414 */
434 acl->queue_depth = queue_depth; 415 target_set_nacl_queue_depth(tpg, acl, queue_depth);
416
417 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
418 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
419 sess_acl_list) {
420 if (sess->sess_tearing_down != 0)
421 continue;
422 if (!target_get_session(sess))
423 continue;
424 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
435 425
436 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
437 spin_unlock_irqrestore(&tpg->session_lock, flags);
438 /* 426 /*
439 * Force session reinstatement if 427 * Finally call tpg->se_tpg_tfo->close_session() to force session
440 * core_set_queue_depth_for_node() failed, because we assume 428 * reinstatement to occur if there is an active session for the
441 * the $FABRIC_MOD has already the set session reinstatement 429 * $FABRIC_MOD Initiator Node in question.
442 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
443 */ 430 */
444 if (init_sess) 431 rc = tpg->se_tpg_tfo->shutdown_session(sess);
445 tpg->se_tpg_tfo->close_session(init_sess); 432 target_put_session(sess);
446 433 if (!rc) {
447 mutex_lock(&tpg->acl_node_mutex); 434 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
448 if (dynamic_acl) 435 continue;
449 acl->dynamic_node_acl = 1; 436 }
450 mutex_unlock(&tpg->acl_node_mutex); 437 target_put_session(sess);
451 return -EINVAL; 438 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
452 } 439 }
453 spin_unlock_irqrestore(&tpg->session_lock, flags); 440 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
454 /*
455 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
456 * forcefully shutdown the $FABRIC_MOD session/nexus.
457 */
458 if (init_sess)
459 tpg->se_tpg_tfo->close_session(init_sess);
460 441
461 pr_debug("Successfully changed queue depth to: %d for Initiator" 442 pr_debug("Successfully changed queue depth to: %d for Initiator"
462 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 443 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
463 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 444 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
464 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 445 tpg->se_tpg_tfo->tpg_get_tag(tpg));
465 446
466 mutex_lock(&tpg->acl_node_mutex);
467 if (dynamic_acl)
468 acl->dynamic_node_acl = 1;
469 mutex_unlock(&tpg->acl_node_mutex);
470
471 return 0; 447 return 0;
472} 448}
473EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 449EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
@@ -595,7 +571,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
595 */ 571 */
596 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 572 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
597 list_del(&nacl->acl_list); 573 list_del(&nacl->acl_list);
598 se_tpg->num_node_acls--;
599 574
600 core_tpg_wait_for_nacl_pr_ref(nacl); 575 core_tpg_wait_for_nacl_pr_ref(nacl);
601 core_free_device_list_for_node(nacl, se_tpg); 576 core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4fdcee2006d1..9f3608e10f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -341,7 +341,6 @@ void __transport_register_session(
341 &buf[0], PR_REG_ISID_LEN); 341 &buf[0], PR_REG_ISID_LEN);
342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
343 } 343 }
344 kref_get(&se_nacl->acl_kref);
345 344
346 spin_lock_irq(&se_nacl->nacl_sess_lock); 345 spin_lock_irq(&se_nacl->nacl_sess_lock);
347 /* 346 /*
@@ -384,9 +383,9 @@ static void target_release_session(struct kref *kref)
384 se_tpg->se_tpg_tfo->close_session(se_sess); 383 se_tpg->se_tpg_tfo->close_session(se_sess);
385} 384}
386 385
387void target_get_session(struct se_session *se_sess) 386int target_get_session(struct se_session *se_sess)
388{ 387{
389 kref_get(&se_sess->sess_kref); 388 return kref_get_unless_zero(&se_sess->sess_kref);
390} 389}
391EXPORT_SYMBOL(target_get_session); 390EXPORT_SYMBOL(target_get_session);
392 391
@@ -432,6 +431,7 @@ void target_put_nacl(struct se_node_acl *nacl)
432{ 431{
433 kref_put(&nacl->acl_kref, target_complete_nacl); 432 kref_put(&nacl->acl_kref, target_complete_nacl);
434} 433}
434EXPORT_SYMBOL(target_put_nacl);
435 435
436void transport_deregister_session_configfs(struct se_session *se_sess) 436void transport_deregister_session_configfs(struct se_session *se_sess)
437{ 437{
@@ -464,6 +464,15 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
464 464
465void transport_free_session(struct se_session *se_sess) 465void transport_free_session(struct se_session *se_sess)
466{ 466{
467 struct se_node_acl *se_nacl = se_sess->se_node_acl;
468 /*
469 * Drop the se_node_acl->nacl_kref obtained from within
470 * core_tpg_get_initiator_node_acl().
471 */
472 if (se_nacl) {
473 se_sess->se_node_acl = NULL;
474 target_put_nacl(se_nacl);
475 }
467 if (se_sess->sess_cmd_map) { 476 if (se_sess->sess_cmd_map) {
468 percpu_ida_destroy(&se_sess->sess_tag_pool); 477 percpu_ida_destroy(&se_sess->sess_tag_pool);
469 kvfree(se_sess->sess_cmd_map); 478 kvfree(se_sess->sess_cmd_map);
@@ -478,7 +487,7 @@ void transport_deregister_session(struct se_session *se_sess)
478 const struct target_core_fabric_ops *se_tfo; 487 const struct target_core_fabric_ops *se_tfo;
479 struct se_node_acl *se_nacl; 488 struct se_node_acl *se_nacl;
480 unsigned long flags; 489 unsigned long flags;
481 bool comp_nacl = true, drop_nacl = false; 490 bool drop_nacl = false;
482 491
483 if (!se_tpg) { 492 if (!se_tpg) {
484 transport_free_session(se_sess); 493 transport_free_session(se_sess);
@@ -502,7 +511,6 @@ void transport_deregister_session(struct se_session *se_sess)
502 if (se_nacl && se_nacl->dynamic_node_acl) { 511 if (se_nacl && se_nacl->dynamic_node_acl) {
503 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 512 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
504 list_del(&se_nacl->acl_list); 513 list_del(&se_nacl->acl_list);
505 se_tpg->num_node_acls--;
506 drop_nacl = true; 514 drop_nacl = true;
507 } 515 }
508 } 516 }
@@ -511,18 +519,16 @@ void transport_deregister_session(struct se_session *se_sess)
511 if (drop_nacl) { 519 if (drop_nacl) {
512 core_tpg_wait_for_nacl_pr_ref(se_nacl); 520 core_tpg_wait_for_nacl_pr_ref(se_nacl);
513 core_free_device_list_for_node(se_nacl, se_tpg); 521 core_free_device_list_for_node(se_nacl, se_tpg);
522 se_sess->se_node_acl = NULL;
514 kfree(se_nacl); 523 kfree(se_nacl);
515 comp_nacl = false;
516 } 524 }
517 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 525 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
518 se_tpg->se_tpg_tfo->get_fabric_name()); 526 se_tpg->se_tpg_tfo->get_fabric_name());
519 /* 527 /*
520 * If last kref is dropping now for an explicit NodeACL, awake sleeping 528 * If last kref is dropping now for an explicit NodeACL, awake sleeping
521 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 529 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
522 * removal context. 530 * removal context from within transport_free_session() code.
523 */ 531 */
524 if (se_nacl && comp_nacl)
525 target_put_nacl(se_nacl);
526 532
527 transport_free_session(se_sess); 533 transport_free_session(se_sess);
528} 534}
@@ -715,7 +721,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
715 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 721 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
717 723
718 queue_work(target_completion_wq, &cmd->work); 724 if (cmd->cpuid == -1)
725 queue_work(target_completion_wq, &cmd->work);
726 else
727 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
719} 728}
720EXPORT_SYMBOL(target_complete_cmd); 729EXPORT_SYMBOL(target_complete_cmd);
721 730
@@ -1309,7 +1318,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1309 1318
1310/* 1319/*
1311 * Used by fabric module frontends to queue tasks directly. 1320 * Used by fabric module frontends to queue tasks directly.
1312 * Many only be used from process context only 1321 * May only be used from process context.
1313 */ 1322 */
1314int transport_handle_cdb_direct( 1323int transport_handle_cdb_direct(
1315 struct se_cmd *cmd) 1324 struct se_cmd *cmd)
@@ -1582,7 +1591,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
1582int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1591int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1583 unsigned char *sense, u64 unpacked_lun, 1592 unsigned char *sense, u64 unpacked_lun,
1584 void *fabric_tmr_ptr, unsigned char tm_type, 1593 void *fabric_tmr_ptr, unsigned char tm_type,
1585 gfp_t gfp, unsigned int tag, int flags) 1594 gfp_t gfp, u64 tag, int flags)
1586{ 1595{
1587 struct se_portal_group *se_tpg; 1596 struct se_portal_group *se_tpg;
1588 int ret; 1597 int ret;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5e6d6cb348fc..dd600e5ead71 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -152,6 +152,7 @@ static struct genl_family tcmu_genl_family = {
152 .maxattr = TCMU_ATTR_MAX, 152 .maxattr = TCMU_ATTR_MAX,
153 .mcgrps = tcmu_mcgrps, 153 .mcgrps = tcmu_mcgrps,
154 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 154 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
155 .netnsok = true,
155}; 156};
156 157
157static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 158static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -194,7 +195,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
194 195
195static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 196static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
196{ 197{
197 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; 198 unsigned long offset = offset_in_page(vaddr);
198 199
199 size = round_up(size+offset, PAGE_SIZE); 200 size = round_up(size+offset, PAGE_SIZE);
200 vaddr -= offset; 201 vaddr -= offset;
@@ -840,7 +841,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
840 841
841 genlmsg_end(skb, msg_header); 842 genlmsg_end(skb, msg_header);
842 843
843 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, 844 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
844 TCMU_MCGRP_CONFIG, GFP_KERNEL); 845 TCMU_MCGRP_CONFIG, GFP_KERNEL);
845 846
846 /* We don't care if no one is listening */ 847 /* We don't care if no one is listening */
@@ -917,8 +918,10 @@ static int tcmu_configure_device(struct se_device *dev)
917 if (ret) 918 if (ret)
918 goto err_register; 919 goto err_register;
919 920
921 /* User can set hw_block_size before enable the device */
922 if (dev->dev_attrib.hw_block_size == 0)
923 dev->dev_attrib.hw_block_size = 512;
920 /* Other attributes can be configured in userspace */ 924 /* Other attributes can be configured in userspace */
921 dev->dev_attrib.hw_block_size = 512;
922 dev->dev_attrib.hw_max_sectors = 128; 925 dev->dev_attrib.hw_max_sectors = 128;
923 dev->dev_attrib.hw_queue_depth = 128; 926 dev->dev_attrib.hw_queue_depth = 128;
924 927
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 39909dadef3e..c30003bd4ff0 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -166,7 +166,6 @@ void ft_aborted_task(struct se_cmd *);
166 */ 166 */
167void ft_recv_req(struct ft_sess *, struct fc_frame *); 167void ft_recv_req(struct ft_sess *, struct fc_frame *);
168struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 168struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
169struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
170 169
171void ft_recv_write_data(struct ft_cmd *, struct fc_frame *); 170void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
172void ft_dump_cmd(struct ft_cmd *, const char *caller); 171void ft_dump_cmd(struct ft_cmd *, const char *caller);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 85aeaa0ad303..4d375e95841b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -171,9 +171,31 @@ static ssize_t ft_nacl_node_name_store(struct config_item *item,
171CONFIGFS_ATTR(ft_nacl_, node_name); 171CONFIGFS_ATTR(ft_nacl_, node_name);
172CONFIGFS_ATTR(ft_nacl_, port_name); 172CONFIGFS_ATTR(ft_nacl_, port_name);
173 173
174static ssize_t ft_nacl_tag_show(struct config_item *item,
175 char *page)
176{
177 return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
178}
179
180static ssize_t ft_nacl_tag_store(struct config_item *item,
181 const char *page, size_t count)
182{
183 struct se_node_acl *se_nacl = acl_to_nacl(item);
184 int ret;
185
186 ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
187
188 if (ret < 0)
189 return ret;
190 return count;
191}
192
193CONFIGFS_ATTR(ft_nacl_, tag);
194
174static struct configfs_attribute *ft_nacl_base_attrs[] = { 195static struct configfs_attribute *ft_nacl_base_attrs[] = {
175 &ft_nacl_attr_port_name, 196 &ft_nacl_attr_port_name,
176 &ft_nacl_attr_node_name, 197 &ft_nacl_attr_node_name,
198 &ft_nacl_attr_tag,
177 NULL, 199 NULL,
178}; 200};
179 201
@@ -198,31 +220,6 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
198 return 0; 220 return 0;
199} 221}
200 222
201struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
202{
203 struct ft_node_acl *found = NULL;
204 struct ft_node_acl *acl;
205 struct se_portal_group *se_tpg = &tpg->se_tpg;
206 struct se_node_acl *se_acl;
207
208 mutex_lock(&se_tpg->acl_node_mutex);
209 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
210 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
211 pr_debug("acl %p port_name %llx\n",
212 acl, (unsigned long long)acl->node_auth.port_name);
213 if (acl->node_auth.port_name == rdata->ids.port_name ||
214 acl->node_auth.node_name == rdata->ids.node_name) {
215 pr_debug("acl %p port_name %llx matched\n", acl,
216 (unsigned long long)rdata->ids.port_name);
217 found = acl;
218 /* XXX need to hold onto ACL */
219 break;
220 }
221 }
222 mutex_unlock(&se_tpg->acl_node_mutex);
223 return found;
224}
225
226/* 223/*
227 * local_port port_group (tpg) ops. 224 * local_port port_group (tpg) ops.
228 */ 225 */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 847c1aa6fbf4..6f7c65abfe2a 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -154,9 +154,9 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
154 BUG_ON(!page); 154 BUG_ON(!page);
155 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); 155 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
156 page_addr = from; 156 page_addr = from;
157 from += mem_off & ~PAGE_MASK; 157 from += offset_in_page(mem_off);
158 tlen = min(tlen, (size_t)(PAGE_SIZE - 158 tlen = min(tlen, (size_t)(PAGE_SIZE -
159 (mem_off & ~PAGE_MASK))); 159 offset_in_page(mem_off)));
160 memcpy(to, from, tlen); 160 memcpy(to, from, tlen);
161 kunmap_atomic(page_addr); 161 kunmap_atomic(page_addr);
162 to += tlen; 162 to += tlen;
@@ -314,9 +314,9 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
314 314
315 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); 315 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
316 page_addr = to; 316 page_addr = to;
317 to += mem_off & ~PAGE_MASK; 317 to += offset_in_page(mem_off);
318 tlen = min(tlen, (size_t)(PAGE_SIZE - 318 tlen = min(tlen, (size_t)(PAGE_SIZE -
319 (mem_off & ~PAGE_MASK))); 319 offset_in_page(mem_off)));
320 memcpy(to, from, tlen); 320 memcpy(to, from, tlen);
321 kunmap_atomic(page_addr); 321 kunmap_atomic(page_addr);
322 322
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 7b934eac995d..e19f4c58c6fa 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -191,10 +191,15 @@ out:
191 * Caller holds ft_lport_lock. 191 * Caller holds ft_lport_lock.
192 */ 192 */
193static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, 193static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
194 struct ft_node_acl *acl) 194 struct fc_rport_priv *rdata)
195{ 195{
196 struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
197 struct se_node_acl *se_acl;
196 struct ft_sess *sess; 198 struct ft_sess *sess;
197 struct hlist_head *head; 199 struct hlist_head *head;
200 unsigned char initiatorname[TRANSPORT_IQN_LEN];
201
202 ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name);
198 203
199 head = &tport->hash[ft_sess_hash(port_id)]; 204 head = &tport->hash[ft_sess_hash(port_id)];
200 hlist_for_each_entry_rcu(sess, head, hash) 205 hlist_for_each_entry_rcu(sess, head, hash)
@@ -212,7 +217,14 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
212 kfree(sess); 217 kfree(sess);
213 return NULL; 218 return NULL;
214 } 219 }
215 sess->se_sess->se_node_acl = &acl->se_node_acl; 220
221 se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
222 if (!se_acl) {
223 transport_free_session(sess->se_sess);
224 kfree(sess);
225 return NULL;
226 }
227 sess->se_sess->se_node_acl = se_acl;
216 sess->tport = tport; 228 sess->tport = tport;
217 sess->port_id = port_id; 229 sess->port_id = port_id;
218 kref_init(&sess->kref); /* ref for table entry */ 230 kref_init(&sess->kref); /* ref for table entry */
@@ -221,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
221 233
222 pr_debug("port_id %x sess %p\n", port_id, sess); 234 pr_debug("port_id %x sess %p\n", port_id, sess);
223 235
224 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, 236 transport_register_session(&tport->tpg->se_tpg, se_acl,
225 sess->se_sess, sess); 237 sess->se_sess, sess);
226 return sess; 238 return sess;
227} 239}
@@ -260,6 +272,14 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
260 return NULL; 272 return NULL;
261} 273}
262 274
275static void ft_close_sess(struct ft_sess *sess)
276{
277 transport_deregister_session_configfs(sess->se_sess);
278 target_sess_cmd_list_set_waiting(sess->se_sess);
279 target_wait_for_sess_cmds(sess->se_sess);
280 ft_sess_put(sess);
281}
282
263/* 283/*
264 * Delete all sessions from tport. 284 * Delete all sessions from tport.
265 * Caller holds ft_lport_lock. 285 * Caller holds ft_lport_lock.
@@ -273,8 +293,7 @@ static void ft_sess_delete_all(struct ft_tport *tport)
273 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { 293 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
274 hlist_for_each_entry_rcu(sess, head, hash) { 294 hlist_for_each_entry_rcu(sess, head, hash) {
275 ft_sess_unhash(sess); 295 ft_sess_unhash(sess);
276 transport_deregister_session_configfs(sess->se_sess); 296 ft_close_sess(sess); /* release from table */
277 ft_sess_put(sess); /* release from table */
278 } 297 }
279 } 298 }
280} 299}
@@ -313,8 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
313 pr_debug("port_id %x\n", port_id); 332 pr_debug("port_id %x\n", port_id);
314 ft_sess_unhash(sess); 333 ft_sess_unhash(sess);
315 mutex_unlock(&ft_lport_lock); 334 mutex_unlock(&ft_lport_lock);
316 transport_deregister_session_configfs(se_sess); 335 ft_close_sess(sess);
317 ft_sess_put(sess);
318 /* XXX Send LOGO or PRLO */ 336 /* XXX Send LOGO or PRLO */
319 synchronize_rcu(); /* let transport deregister happen */ 337 synchronize_rcu(); /* let transport deregister happen */
320} 338}
@@ -343,17 +361,12 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
343{ 361{
344 struct ft_tport *tport; 362 struct ft_tport *tport;
345 struct ft_sess *sess; 363 struct ft_sess *sess;
346 struct ft_node_acl *acl;
347 u32 fcp_parm; 364 u32 fcp_parm;
348 365
349 tport = ft_tport_get(rdata->local_port); 366 tport = ft_tport_get(rdata->local_port);
350 if (!tport) 367 if (!tport)
351 goto not_target; /* not a target for this local port */ 368 goto not_target; /* not a target for this local port */
352 369
353 acl = ft_acl_get(tport->tpg, rdata);
354 if (!acl)
355 goto not_target; /* no target for this remote */
356
357 if (!rspp) 370 if (!rspp)
358 goto fill; 371 goto fill;
359 372
@@ -375,7 +388,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
375 spp->spp_flags |= FC_SPP_EST_IMG_PAIR; 388 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
376 if (!(fcp_parm & FCP_SPPF_INIT_FCN)) 389 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
377 return FC_SPP_RESP_CONF; 390 return FC_SPP_RESP_CONF;
378 sess = ft_sess_create(tport, rdata->ids.port_id, acl); 391 sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
379 if (!sess) 392 if (!sess)
380 return FC_SPP_RESP_RES; 393 return FC_SPP_RESP_RES;
381 if (!sess->params) 394 if (!sess->params)
@@ -460,8 +473,7 @@ static void ft_prlo(struct fc_rport_priv *rdata)
460 return; 473 return;
461 } 474 }
462 mutex_unlock(&ft_lport_lock); 475 mutex_unlock(&ft_lport_lock);
463 transport_deregister_session_configfs(sess->se_sess); 476 ft_close_sess(sess); /* release from table */
464 ft_sess_put(sess); /* release from table */
465 rdata->prli_count--; 477 rdata->prli_count--;
466 /* XXX TBD - clearing actions. unit attn, see 4.10 */ 478 /* XXX TBD - clearing actions. unit attn, see 4.10 */
467} 479}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index be5aab9c13f2..af5d922a8f5d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -205,6 +205,9 @@ config USB_F_HID
205config USB_F_PRINTER 205config USB_F_PRINTER
206 tristate 206 tristate
207 207
208config USB_F_TCM
209 tristate
210
208choice 211choice
209 tristate "USB Gadget Drivers" 212 tristate "USB Gadget Drivers"
210 default USB_ETH 213 default USB_ETH
@@ -457,6 +460,20 @@ config USB_CONFIGFS_F_PRINTER
457 For more information, see Documentation/usb/gadget_printer.txt 460 For more information, see Documentation/usb/gadget_printer.txt
458 which includes sample code for accessing the device file. 461 which includes sample code for accessing the device file.
459 462
463config USB_CONFIGFS_F_TCM
464 bool "USB Gadget Target Fabric"
465 depends on TARGET_CORE
466 depends on USB_CONFIGFS
467 select USB_LIBCOMPOSITE
468 select USB_F_TCM
469 help
470 This fabric is a USB gadget component. Two USB protocols are
471 supported that is BBB or BOT (Bulk Only Transport) and UAS
472 (USB Attached SCSI). BOT is advertised on alternative
473 interface 0 (primary) and UAS is on alternative interface 1.
474 Both protocols can work on USB2.0 and USB3.0.
475 UAS utilizes the USB 3.0 feature called streams support.
476
460source "drivers/usb/gadget/legacy/Kconfig" 477source "drivers/usb/gadget/legacy/Kconfig"
461 478
462endchoice 479endchoice
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index bd7def576955..cb8c225e8549 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -44,3 +44,5 @@ usb_f_hid-y := f_hid.o
44obj-$(CONFIG_USB_F_HID) += usb_f_hid.o 44obj-$(CONFIG_USB_F_HID) += usb_f_hid.o
45usb_f_printer-y := f_printer.o 45usb_f_printer-y := f_printer.o
46obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o 46obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o
47usb_f_tcm-y := f_tcm.o
48obj-$(CONFIG_USB_F_TCM) += usb_f_tcm.o
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
new file mode 100644
index 000000000000..bad007b5a190
--- /dev/null
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -0,0 +1,2381 @@
1/* Target based USB-Gadget
2 *
3 * UAS protocol handling, target callbacks, configfs handling,
4 * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
5 *
6 * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
7 * License: GPLv2 as published by FSF.
8 */
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/configfs.h>
14#include <linux/ctype.h>
15#include <linux/usb/ch9.h>
16#include <linux/usb/composite.h>
17#include <linux/usb/gadget.h>
18#include <linux/usb/storage.h>
19#include <scsi/scsi_tcq.h>
20#include <target/target_core_base.h>
21#include <target/target_core_fabric.h>
22#include <asm/unaligned.h>
23
24#include "tcm.h"
25#include "u_tcm.h"
26#include "configfs.h"
27
28#define TPG_INSTANCES 1
29
30struct tpg_instance {
31 struct usb_function_instance *func_inst;
32 struct usbg_tpg *tpg;
33};
34
35static struct tpg_instance tpg_instances[TPG_INSTANCES];
36
37static DEFINE_MUTEX(tpg_instances_lock);
38
39static inline struct f_uas *to_f_uas(struct usb_function *f)
40{
41 return container_of(f, struct f_uas, function);
42}
43
44static void usbg_cmd_release(struct kref *);
45
46static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
47{
48 kref_put(&cmd->ref, usbg_cmd_release);
49}
50
51/* Start bot.c code */
52
53static int bot_enqueue_cmd_cbw(struct f_uas *fu)
54{
55 int ret;
56
57 if (fu->flags & USBG_BOT_CMD_PEND)
58 return 0;
59
60 ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
61 if (!ret)
62 fu->flags |= USBG_BOT_CMD_PEND;
63 return ret;
64}
65
66static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
67{
68 struct usbg_cmd *cmd = req->context;
69 struct f_uas *fu = cmd->fu;
70
71 usbg_cleanup_cmd(cmd);
72 if (req->status < 0) {
73 pr_err("ERR %s(%d)\n", __func__, __LINE__);
74 return;
75 }
76
77 /* CSW completed, wait for next CBW */
78 bot_enqueue_cmd_cbw(fu);
79}
80
81static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
82{
83 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
84 int ret;
85 unsigned int csw_stat;
86
87 csw_stat = cmd->csw_code;
88 csw->Tag = cmd->bot_tag;
89 csw->Status = csw_stat;
90 fu->bot_status.req->context = cmd;
91 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
92 if (ret)
93 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
94}
95
96static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
97{
98 struct usbg_cmd *cmd = req->context;
99 struct f_uas *fu = cmd->fu;
100
101 if (req->status < 0)
102 pr_err("ERR %s(%d)\n", __func__, __LINE__);
103
104 if (cmd->data_len) {
105 if (cmd->data_len > ep->maxpacket) {
106 req->length = ep->maxpacket;
107 cmd->data_len -= ep->maxpacket;
108 } else {
109 req->length = cmd->data_len;
110 cmd->data_len = 0;
111 }
112
113 usb_ep_queue(ep, req, GFP_ATOMIC);
114 return;
115 }
116 bot_enqueue_sense_code(fu, cmd);
117}
118
119static void bot_send_bad_status(struct usbg_cmd *cmd)
120{
121 struct f_uas *fu = cmd->fu;
122 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
123 struct usb_request *req;
124 struct usb_ep *ep;
125
126 csw->Residue = cpu_to_le32(cmd->data_len);
127
128 if (cmd->data_len) {
129 if (cmd->is_read) {
130 ep = fu->ep_in;
131 req = fu->bot_req_in;
132 } else {
133 ep = fu->ep_out;
134 req = fu->bot_req_out;
135 }
136
137 if (cmd->data_len > fu->ep_in->maxpacket) {
138 req->length = ep->maxpacket;
139 cmd->data_len -= ep->maxpacket;
140 } else {
141 req->length = cmd->data_len;
142 cmd->data_len = 0;
143 }
144 req->complete = bot_err_compl;
145 req->context = cmd;
146 req->buf = fu->cmd.buf;
147 usb_ep_queue(ep, req, GFP_KERNEL);
148 } else {
149 bot_enqueue_sense_code(fu, cmd);
150 }
151}
152
153static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
154{
155 struct f_uas *fu = cmd->fu;
156 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
157 int ret;
158
159 if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
160 if (!moved_data && cmd->data_len) {
161 /*
162 * the host wants to move data, we don't. Fill / empty
163 * the pipe and then send the csw with reside set.
164 */
165 cmd->csw_code = US_BULK_STAT_OK;
166 bot_send_bad_status(cmd);
167 return 0;
168 }
169
170 csw->Tag = cmd->bot_tag;
171 csw->Residue = cpu_to_le32(0);
172 csw->Status = US_BULK_STAT_OK;
173 fu->bot_status.req->context = cmd;
174
175 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
176 if (ret)
177 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
178 } else {
179 cmd->csw_code = US_BULK_STAT_FAIL;
180 bot_send_bad_status(cmd);
181 }
182 return 0;
183}
184
185/*
186 * Called after command (no data transfer) or after the write (to device)
187 * operation is completed
188 */
189static int bot_send_status_response(struct usbg_cmd *cmd)
190{
191 bool moved_data = false;
192
193 if (!cmd->is_read)
194 moved_data = true;
195 return bot_send_status(cmd, moved_data);
196}
197
198/* Read request completed, now we have to send the CSW */
199static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
200{
201 struct usbg_cmd *cmd = req->context;
202
203 if (req->status < 0)
204 pr_err("ERR %s(%d)\n", __func__, __LINE__);
205
206 bot_send_status(cmd, true);
207}
208
209static int bot_send_read_response(struct usbg_cmd *cmd)
210{
211 struct f_uas *fu = cmd->fu;
212 struct se_cmd *se_cmd = &cmd->se_cmd;
213 struct usb_gadget *gadget = fuas_to_gadget(fu);
214 int ret;
215
216 if (!cmd->data_len) {
217 cmd->csw_code = US_BULK_STAT_PHASE;
218 bot_send_bad_status(cmd);
219 return 0;
220 }
221
222 if (!gadget->sg_supported) {
223 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
224 if (!cmd->data_buf)
225 return -ENOMEM;
226
227 sg_copy_to_buffer(se_cmd->t_data_sg,
228 se_cmd->t_data_nents,
229 cmd->data_buf,
230 se_cmd->data_length);
231
232 fu->bot_req_in->buf = cmd->data_buf;
233 } else {
234 fu->bot_req_in->buf = NULL;
235 fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
236 fu->bot_req_in->sg = se_cmd->t_data_sg;
237 }
238
239 fu->bot_req_in->complete = bot_read_compl;
240 fu->bot_req_in->length = se_cmd->data_length;
241 fu->bot_req_in->context = cmd;
242 ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
243 if (ret)
244 pr_err("%s(%d)\n", __func__, __LINE__);
245 return 0;
246}
247
248static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
249static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
250
251static int bot_send_write_request(struct usbg_cmd *cmd)
252{
253 struct f_uas *fu = cmd->fu;
254 struct se_cmd *se_cmd = &cmd->se_cmd;
255 struct usb_gadget *gadget = fuas_to_gadget(fu);
256 int ret;
257
258 init_completion(&cmd->write_complete);
259 cmd->fu = fu;
260
261 if (!cmd->data_len) {
262 cmd->csw_code = US_BULK_STAT_PHASE;
263 return -EINVAL;
264 }
265
266 if (!gadget->sg_supported) {
267 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
268 if (!cmd->data_buf)
269 return -ENOMEM;
270
271 fu->bot_req_out->buf = cmd->data_buf;
272 } else {
273 fu->bot_req_out->buf = NULL;
274 fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
275 fu->bot_req_out->sg = se_cmd->t_data_sg;
276 }
277
278 fu->bot_req_out->complete = usbg_data_write_cmpl;
279 fu->bot_req_out->length = se_cmd->data_length;
280 fu->bot_req_out->context = cmd;
281
282 ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
283 if (ret)
284 goto cleanup;
285 ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
286 if (ret)
287 pr_err("%s(%d)\n", __func__, __LINE__);
288
289 wait_for_completion(&cmd->write_complete);
290 target_execute_cmd(se_cmd);
291cleanup:
292 return ret;
293}
294
295static int bot_submit_command(struct f_uas *, void *, unsigned int);
296
297static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
298{
299 struct f_uas *fu = req->context;
300 int ret;
301
302 fu->flags &= ~USBG_BOT_CMD_PEND;
303
304 if (req->status < 0)
305 return;
306
307 ret = bot_submit_command(fu, req->buf, req->actual);
308 if (ret)
309 pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
310}
311
312static int bot_prepare_reqs(struct f_uas *fu)
313{
314 int ret;
315
316 fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
317 if (!fu->bot_req_in)
318 goto err;
319
320 fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
321 if (!fu->bot_req_out)
322 goto err_out;
323
324 fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
325 if (!fu->cmd.req)
326 goto err_cmd;
327
328 fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
329 if (!fu->bot_status.req)
330 goto err_sts;
331
332 fu->bot_status.req->buf = &fu->bot_status.csw;
333 fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
334 fu->bot_status.req->complete = bot_status_complete;
335 fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
336
337 fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
338 if (!fu->cmd.buf)
339 goto err_buf;
340
341 fu->cmd.req->complete = bot_cmd_complete;
342 fu->cmd.req->buf = fu->cmd.buf;
343 fu->cmd.req->length = fu->ep_out->maxpacket;
344 fu->cmd.req->context = fu;
345
346 ret = bot_enqueue_cmd_cbw(fu);
347 if (ret)
348 goto err_queue;
349 return 0;
350err_queue:
351 kfree(fu->cmd.buf);
352 fu->cmd.buf = NULL;
353err_buf:
354 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
355err_sts:
356 usb_ep_free_request(fu->ep_out, fu->cmd.req);
357 fu->cmd.req = NULL;
358err_cmd:
359 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
360 fu->bot_req_out = NULL;
361err_out:
362 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
363 fu->bot_req_in = NULL;
364err:
365 pr_err("BOT: endpoint setup failed\n");
366 return -ENOMEM;
367}
368
369static void bot_cleanup_old_alt(struct f_uas *fu)
370{
371 if (!(fu->flags & USBG_ENABLED))
372 return;
373
374 usb_ep_disable(fu->ep_in);
375 usb_ep_disable(fu->ep_out);
376
377 if (!fu->bot_req_in)
378 return;
379
380 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
381 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
382 usb_ep_free_request(fu->ep_out, fu->cmd.req);
383 usb_ep_free_request(fu->ep_out, fu->bot_status.req);
384
385 kfree(fu->cmd.buf);
386
387 fu->bot_req_in = NULL;
388 fu->bot_req_out = NULL;
389 fu->cmd.req = NULL;
390 fu->bot_status.req = NULL;
391 fu->cmd.buf = NULL;
392}
393
394static void bot_set_alt(struct f_uas *fu)
395{
396 struct usb_function *f = &fu->function;
397 struct usb_gadget *gadget = f->config->cdev->gadget;
398 int ret;
399
400 fu->flags = USBG_IS_BOT;
401
402 config_ep_by_speed(gadget, f, fu->ep_in);
403 ret = usb_ep_enable(fu->ep_in);
404 if (ret)
405 goto err_b_in;
406
407 config_ep_by_speed(gadget, f, fu->ep_out);
408 ret = usb_ep_enable(fu->ep_out);
409 if (ret)
410 goto err_b_out;
411
412 ret = bot_prepare_reqs(fu);
413 if (ret)
414 goto err_wq;
415 fu->flags |= USBG_ENABLED;
416 pr_info("Using the BOT protocol\n");
417 return;
418err_wq:
419 usb_ep_disable(fu->ep_out);
420err_b_out:
421 usb_ep_disable(fu->ep_in);
422err_b_in:
423 fu->flags = USBG_IS_BOT;
424}
425
426static int usbg_bot_setup(struct usb_function *f,
427 const struct usb_ctrlrequest *ctrl)
428{
429 struct f_uas *fu = to_f_uas(f);
430 struct usb_composite_dev *cdev = f->config->cdev;
431 u16 w_value = le16_to_cpu(ctrl->wValue);
432 u16 w_length = le16_to_cpu(ctrl->wLength);
433 int luns;
434 u8 *ret_lun;
435
436 switch (ctrl->bRequest) {
437 case US_BULK_GET_MAX_LUN:
438 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
439 USB_RECIP_INTERFACE))
440 return -ENOTSUPP;
441
442 if (w_length < 1)
443 return -EINVAL;
444 if (w_value != 0)
445 return -EINVAL;
446 luns = atomic_read(&fu->tpg->tpg_port_count);
447 if (!luns) {
448 pr_err("No LUNs configured?\n");
449 return -EINVAL;
450 }
451 /*
452 * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
453 * accessed. The upper limit is 0xf
454 */
455 luns--;
456 if (luns > 0xf) {
457 pr_info_once("Limiting the number of luns to 16\n");
458 luns = 0xf;
459 }
460 ret_lun = cdev->req->buf;
461 *ret_lun = luns;
462 cdev->req->length = 1;
463 return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
464
465 case US_BULK_RESET_REQUEST:
466 /* XXX maybe we should remove previous requests for IN + OUT */
467 bot_enqueue_cmd_cbw(fu);
468 return 0;
469 }
470 return -ENOTSUPP;
471}
472
473/* Start uas.c code */
474
475static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
476{
477 /* We have either all three allocated or none */
478 if (!stream->req_in)
479 return;
480
481 usb_ep_free_request(fu->ep_in, stream->req_in);
482 usb_ep_free_request(fu->ep_out, stream->req_out);
483 usb_ep_free_request(fu->ep_status, stream->req_status);
484
485 stream->req_in = NULL;
486 stream->req_out = NULL;
487 stream->req_status = NULL;
488}
489
490static void uasp_free_cmdreq(struct f_uas *fu)
491{
492 usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
493 kfree(fu->cmd.buf);
494 fu->cmd.req = NULL;
495 fu->cmd.buf = NULL;
496}
497
498static void uasp_cleanup_old_alt(struct f_uas *fu)
499{
500 int i;
501
502 if (!(fu->flags & USBG_ENABLED))
503 return;
504
505 usb_ep_disable(fu->ep_in);
506 usb_ep_disable(fu->ep_out);
507 usb_ep_disable(fu->ep_status);
508 usb_ep_disable(fu->ep_cmd);
509
510 for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
511 uasp_cleanup_one_stream(fu, &fu->stream[i]);
512 uasp_free_cmdreq(fu);
513}
514
515static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
516
517static int uasp_prepare_r_request(struct usbg_cmd *cmd)
518{
519 struct se_cmd *se_cmd = &cmd->se_cmd;
520 struct f_uas *fu = cmd->fu;
521 struct usb_gadget *gadget = fuas_to_gadget(fu);
522 struct uas_stream *stream = cmd->stream;
523
524 if (!gadget->sg_supported) {
525 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
526 if (!cmd->data_buf)
527 return -ENOMEM;
528
529 sg_copy_to_buffer(se_cmd->t_data_sg,
530 se_cmd->t_data_nents,
531 cmd->data_buf,
532 se_cmd->data_length);
533
534 stream->req_in->buf = cmd->data_buf;
535 } else {
536 stream->req_in->buf = NULL;
537 stream->req_in->num_sgs = se_cmd->t_data_nents;
538 stream->req_in->sg = se_cmd->t_data_sg;
539 }
540
541 stream->req_in->complete = uasp_status_data_cmpl;
542 stream->req_in->length = se_cmd->data_length;
543 stream->req_in->context = cmd;
544
545 cmd->state = UASP_SEND_STATUS;
546 return 0;
547}
548
549static void uasp_prepare_status(struct usbg_cmd *cmd)
550{
551 struct se_cmd *se_cmd = &cmd->se_cmd;
552 struct sense_iu *iu = &cmd->sense_iu;
553 struct uas_stream *stream = cmd->stream;
554
555 cmd->state = UASP_QUEUE_COMMAND;
556 iu->iu_id = IU_ID_STATUS;
557 iu->tag = cpu_to_be16(cmd->tag);
558
559 /*
560 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
561 */
562 iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
563 iu->status = se_cmd->scsi_status;
564 stream->req_status->context = cmd;
565 stream->req_status->length = se_cmd->scsi_sense_length + 16;
566 stream->req_status->buf = iu;
567 stream->req_status->complete = uasp_status_data_cmpl;
568}
569
570static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
571{
572 struct usbg_cmd *cmd = req->context;
573 struct uas_stream *stream = cmd->stream;
574 struct f_uas *fu = cmd->fu;
575 int ret;
576
577 if (req->status < 0)
578 goto cleanup;
579
580 switch (cmd->state) {
581 case UASP_SEND_DATA:
582 ret = uasp_prepare_r_request(cmd);
583 if (ret)
584 goto cleanup;
585 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
586 if (ret)
587 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
588 break;
589
590 case UASP_RECEIVE_DATA:
591 ret = usbg_prepare_w_request(cmd, stream->req_out);
592 if (ret)
593 goto cleanup;
594 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
595 if (ret)
596 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
597 break;
598
599 case UASP_SEND_STATUS:
600 uasp_prepare_status(cmd);
601 ret = usb_ep_queue(fu->ep_status, stream->req_status,
602 GFP_ATOMIC);
603 if (ret)
604 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
605 break;
606
607 case UASP_QUEUE_COMMAND:
608 usbg_cleanup_cmd(cmd);
609 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
610 break;
611
612 default:
613 BUG();
614 }
615 return;
616
617cleanup:
618 usbg_cleanup_cmd(cmd);
619}
620
621static int uasp_send_status_response(struct usbg_cmd *cmd)
622{
623 struct f_uas *fu = cmd->fu;
624 struct uas_stream *stream = cmd->stream;
625 struct sense_iu *iu = &cmd->sense_iu;
626
627 iu->tag = cpu_to_be16(cmd->tag);
628 stream->req_status->complete = uasp_status_data_cmpl;
629 stream->req_status->context = cmd;
630 cmd->fu = fu;
631 uasp_prepare_status(cmd);
632 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
633}
634
635static int uasp_send_read_response(struct usbg_cmd *cmd)
636{
637 struct f_uas *fu = cmd->fu;
638 struct uas_stream *stream = cmd->stream;
639 struct sense_iu *iu = &cmd->sense_iu;
640 int ret;
641
642 cmd->fu = fu;
643
644 iu->tag = cpu_to_be16(cmd->tag);
645 if (fu->flags & USBG_USE_STREAMS) {
646
647 ret = uasp_prepare_r_request(cmd);
648 if (ret)
649 goto out;
650 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
651 if (ret) {
652 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
653 kfree(cmd->data_buf);
654 cmd->data_buf = NULL;
655 }
656
657 } else {
658
659 iu->iu_id = IU_ID_READ_READY;
660 iu->tag = cpu_to_be16(cmd->tag);
661
662 stream->req_status->complete = uasp_status_data_cmpl;
663 stream->req_status->context = cmd;
664
665 cmd->state = UASP_SEND_DATA;
666 stream->req_status->buf = iu;
667 stream->req_status->length = sizeof(struct iu);
668
669 ret = usb_ep_queue(fu->ep_status, stream->req_status,
670 GFP_ATOMIC);
671 if (ret)
672 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
673 }
674out:
675 return ret;
676}
677
678static int uasp_send_write_request(struct usbg_cmd *cmd)
679{
680 struct f_uas *fu = cmd->fu;
681 struct se_cmd *se_cmd = &cmd->se_cmd;
682 struct uas_stream *stream = cmd->stream;
683 struct sense_iu *iu = &cmd->sense_iu;
684 int ret;
685
686 init_completion(&cmd->write_complete);
687 cmd->fu = fu;
688
689 iu->tag = cpu_to_be16(cmd->tag);
690
691 if (fu->flags & USBG_USE_STREAMS) {
692
693 ret = usbg_prepare_w_request(cmd, stream->req_out);
694 if (ret)
695 goto cleanup;
696 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
697 if (ret)
698 pr_err("%s(%d)\n", __func__, __LINE__);
699
700 } else {
701
702 iu->iu_id = IU_ID_WRITE_READY;
703 iu->tag = cpu_to_be16(cmd->tag);
704
705 stream->req_status->complete = uasp_status_data_cmpl;
706 stream->req_status->context = cmd;
707
708 cmd->state = UASP_RECEIVE_DATA;
709 stream->req_status->buf = iu;
710 stream->req_status->length = sizeof(struct iu);
711
712 ret = usb_ep_queue(fu->ep_status, stream->req_status,
713 GFP_ATOMIC);
714 if (ret)
715 pr_err("%s(%d)\n", __func__, __LINE__);
716 }
717
718 wait_for_completion(&cmd->write_complete);
719 target_execute_cmd(se_cmd);
720cleanup:
721 return ret;
722}
723
724static int usbg_submit_command(struct f_uas *, void *, unsigned int);
725
726static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
727{
728 struct f_uas *fu = req->context;
729 int ret;
730
731 if (req->status < 0)
732 return;
733
734 ret = usbg_submit_command(fu, req->buf, req->actual);
735 /*
736 * Once we tune for performance enqueue the command req here again so
737 * we can receive a second command while we processing this one. Pay
738 * attention to properly sync STAUS endpoint with DATA IN + OUT so you
739 * don't break HS.
740 */
741 if (!ret)
742 return;
743 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
744}
745
746static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
747{
748 stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
749 if (!stream->req_in)
750 goto out;
751
752 stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
753 if (!stream->req_out)
754 goto err_out;
755
756 stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
757 if (!stream->req_status)
758 goto err_sts;
759
760 return 0;
761err_sts:
762 usb_ep_free_request(fu->ep_status, stream->req_status);
763 stream->req_status = NULL;
764err_out:
765 usb_ep_free_request(fu->ep_out, stream->req_out);
766 stream->req_out = NULL;
767out:
768 return -ENOMEM;
769}
770
771static int uasp_alloc_cmd(struct f_uas *fu)
772{
773 fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
774 if (!fu->cmd.req)
775 goto err;
776
777 fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
778 if (!fu->cmd.buf)
779 goto err_buf;
780
781 fu->cmd.req->complete = uasp_cmd_complete;
782 fu->cmd.req->buf = fu->cmd.buf;
783 fu->cmd.req->length = fu->ep_cmd->maxpacket;
784 fu->cmd.req->context = fu;
785 return 0;
786
787err_buf:
788 usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
789err:
790 return -ENOMEM;
791}
792
793static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
794{
795 int i;
796
797 for (i = 0; i < max_streams; i++) {
798 struct uas_stream *s = &fu->stream[i];
799
800 s->req_in->stream_id = i + 1;
801 s->req_out->stream_id = i + 1;
802 s->req_status->stream_id = i + 1;
803 }
804}
805
806static int uasp_prepare_reqs(struct f_uas *fu)
807{
808 int ret;
809 int i;
810 int max_streams;
811
812 if (fu->flags & USBG_USE_STREAMS)
813 max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
814 else
815 max_streams = 1;
816
817 for (i = 0; i < max_streams; i++) {
818 ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
819 if (ret)
820 goto err_cleanup;
821 }
822
823 ret = uasp_alloc_cmd(fu);
824 if (ret)
825 goto err_free_stream;
826 uasp_setup_stream_res(fu, max_streams);
827
828 ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
829 if (ret)
830 goto err_free_stream;
831
832 return 0;
833
834err_free_stream:
835 uasp_free_cmdreq(fu);
836
837err_cleanup:
838 if (i) {
839 do {
840 uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
841 i--;
842 } while (i);
843 }
844 pr_err("UASP: endpoint setup failed\n");
845 return ret;
846}
847
848static void uasp_set_alt(struct f_uas *fu)
849{
850 struct usb_function *f = &fu->function;
851 struct usb_gadget *gadget = f->config->cdev->gadget;
852 int ret;
853
854 fu->flags = USBG_IS_UAS;
855
856 if (gadget->speed == USB_SPEED_SUPER)
857 fu->flags |= USBG_USE_STREAMS;
858
859 config_ep_by_speed(gadget, f, fu->ep_in);
860 ret = usb_ep_enable(fu->ep_in);
861 if (ret)
862 goto err_b_in;
863
864 config_ep_by_speed(gadget, f, fu->ep_out);
865 ret = usb_ep_enable(fu->ep_out);
866 if (ret)
867 goto err_b_out;
868
869 config_ep_by_speed(gadget, f, fu->ep_cmd);
870 ret = usb_ep_enable(fu->ep_cmd);
871 if (ret)
872 goto err_cmd;
873 config_ep_by_speed(gadget, f, fu->ep_status);
874 ret = usb_ep_enable(fu->ep_status);
875 if (ret)
876 goto err_status;
877
878 ret = uasp_prepare_reqs(fu);
879 if (ret)
880 goto err_wq;
881 fu->flags |= USBG_ENABLED;
882
883 pr_info("Using the UAS protocol\n");
884 return;
885err_wq:
886 usb_ep_disable(fu->ep_status);
887err_status:
888 usb_ep_disable(fu->ep_cmd);
889err_cmd:
890 usb_ep_disable(fu->ep_out);
891err_b_out:
892 usb_ep_disable(fu->ep_in);
893err_b_in:
894 fu->flags = 0;
895}
896
897static int get_cmd_dir(const unsigned char *cdb)
898{
899 int ret;
900
901 switch (cdb[0]) {
902 case READ_6:
903 case READ_10:
904 case READ_12:
905 case READ_16:
906 case INQUIRY:
907 case MODE_SENSE:
908 case MODE_SENSE_10:
909 case SERVICE_ACTION_IN_16:
910 case MAINTENANCE_IN:
911 case PERSISTENT_RESERVE_IN:
912 case SECURITY_PROTOCOL_IN:
913 case ACCESS_CONTROL_IN:
914 case REPORT_LUNS:
915 case READ_BLOCK_LIMITS:
916 case READ_POSITION:
917 case READ_CAPACITY:
918 case READ_TOC:
919 case READ_FORMAT_CAPACITIES:
920 case REQUEST_SENSE:
921 ret = DMA_FROM_DEVICE;
922 break;
923
924 case WRITE_6:
925 case WRITE_10:
926 case WRITE_12:
927 case WRITE_16:
928 case MODE_SELECT:
929 case MODE_SELECT_10:
930 case WRITE_VERIFY:
931 case WRITE_VERIFY_12:
932 case PERSISTENT_RESERVE_OUT:
933 case MAINTENANCE_OUT:
934 case SECURITY_PROTOCOL_OUT:
935 case ACCESS_CONTROL_OUT:
936 ret = DMA_TO_DEVICE;
937 break;
938 case ALLOW_MEDIUM_REMOVAL:
939 case TEST_UNIT_READY:
940 case SYNCHRONIZE_CACHE:
941 case START_STOP:
942 case ERASE:
943 case REZERO_UNIT:
944 case SEEK_10:
945 case SPACE:
946 case VERIFY:
947 case WRITE_FILEMARKS:
948 ret = DMA_NONE;
949 break;
950 default:
951#define CMD_DIR_MSG "target: Unknown data direction for SCSI Opcode 0x%02x\n"
952 pr_warn(CMD_DIR_MSG, cdb[0]);
953#undef CMD_DIR_MSG
954 ret = -EINVAL;
955 }
956 return ret;
957}
958
959static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
960{
961 struct usbg_cmd *cmd = req->context;
962 struct se_cmd *se_cmd = &cmd->se_cmd;
963
964 if (req->status < 0) {
965 pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
966 goto cleanup;
967 }
968
969 if (req->num_sgs == 0) {
970 sg_copy_from_buffer(se_cmd->t_data_sg,
971 se_cmd->t_data_nents,
972 cmd->data_buf,
973 se_cmd->data_length);
974 }
975
976 complete(&cmd->write_complete);
977 return;
978
979cleanup:
980 usbg_cleanup_cmd(cmd);
981}
982
983static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
984{
985 struct se_cmd *se_cmd = &cmd->se_cmd;
986 struct f_uas *fu = cmd->fu;
987 struct usb_gadget *gadget = fuas_to_gadget(fu);
988
989 if (!gadget->sg_supported) {
990 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
991 if (!cmd->data_buf)
992 return -ENOMEM;
993
994 req->buf = cmd->data_buf;
995 } else {
996 req->buf = NULL;
997 req->num_sgs = se_cmd->t_data_nents;
998 req->sg = se_cmd->t_data_sg;
999 }
1000
1001 req->complete = usbg_data_write_cmpl;
1002 req->length = se_cmd->data_length;
1003 req->context = cmd;
1004 return 0;
1005}
1006
1007static int usbg_send_status_response(struct se_cmd *se_cmd)
1008{
1009 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1010 se_cmd);
1011 struct f_uas *fu = cmd->fu;
1012
1013 if (fu->flags & USBG_IS_BOT)
1014 return bot_send_status_response(cmd);
1015 else
1016 return uasp_send_status_response(cmd);
1017}
1018
1019static int usbg_send_write_request(struct se_cmd *se_cmd)
1020{
1021 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1022 se_cmd);
1023 struct f_uas *fu = cmd->fu;
1024
1025 if (fu->flags & USBG_IS_BOT)
1026 return bot_send_write_request(cmd);
1027 else
1028 return uasp_send_write_request(cmd);
1029}
1030
1031static int usbg_send_read_response(struct se_cmd *se_cmd)
1032{
1033 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1034 se_cmd);
1035 struct f_uas *fu = cmd->fu;
1036
1037 if (fu->flags & USBG_IS_BOT)
1038 return bot_send_read_response(cmd);
1039 else
1040 return uasp_send_read_response(cmd);
1041}
1042
1043static void usbg_cmd_work(struct work_struct *work)
1044{
1045 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1046 struct se_cmd *se_cmd;
1047 struct tcm_usbg_nexus *tv_nexus;
1048 struct usbg_tpg *tpg;
1049 int dir;
1050
1051 se_cmd = &cmd->se_cmd;
1052 tpg = cmd->fu->tpg;
1053 tv_nexus = tpg->tpg_nexus;
1054 dir = get_cmd_dir(cmd->cmd_buf);
1055 if (dir < 0) {
1056 transport_init_se_cmd(se_cmd,
1057 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1058 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1059 cmd->prio_attr, cmd->sense_iu.sense);
1060 goto out;
1061 }
1062
1063 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1064 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1065 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
1066 goto out;
1067
1068 return;
1069
1070out:
1071 transport_send_check_condition_and_sense(se_cmd,
1072 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1073 usbg_cleanup_cmd(cmd);
1074}
1075
1076static int usbg_submit_command(struct f_uas *fu,
1077 void *cmdbuf, unsigned int len)
1078{
1079 struct command_iu *cmd_iu = cmdbuf;
1080 struct usbg_cmd *cmd;
1081 struct usbg_tpg *tpg;
1082 struct tcm_usbg_nexus *tv_nexus;
1083 u32 cmd_len;
1084
1085 if (cmd_iu->iu_id != IU_ID_COMMAND) {
1086 pr_err("Unsupported type %d\n", cmd_iu->iu_id);
1087 return -EINVAL;
1088 }
1089
1090 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1091 if (!cmd)
1092 return -ENOMEM;
1093
1094 cmd->fu = fu;
1095
1096 /* XXX until I figure out why I can't free in on complete */
1097 kref_init(&cmd->ref);
1098 kref_get(&cmd->ref);
1099
1100 tpg = fu->tpg;
1101 cmd_len = (cmd_iu->len & ~0x3) + 16;
1102 if (cmd_len > USBG_MAX_CMD)
1103 goto err;
1104
1105 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1106
1107 cmd->tag = be16_to_cpup(&cmd_iu->tag);
1108 cmd->se_cmd.tag = cmd->tag;
1109 if (fu->flags & USBG_USE_STREAMS) {
1110 if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
1111 goto err;
1112 if (!cmd->tag)
1113 cmd->stream = &fu->stream[0];
1114 else
1115 cmd->stream = &fu->stream[cmd->tag - 1];
1116 } else {
1117 cmd->stream = &fu->stream[0];
1118 }
1119
1120 tv_nexus = tpg->tpg_nexus;
1121 if (!tv_nexus) {
1122 pr_err("Missing nexus, ignoring command\n");
1123 goto err;
1124 }
1125
1126 switch (cmd_iu->prio_attr & 0x7) {
1127 case UAS_HEAD_TAG:
1128 cmd->prio_attr = TCM_HEAD_TAG;
1129 break;
1130 case UAS_ORDERED_TAG:
1131 cmd->prio_attr = TCM_ORDERED_TAG;
1132 break;
1133 case UAS_ACA:
1134 cmd->prio_attr = TCM_ACA_TAG;
1135 break;
1136 default:
1137 pr_debug_once("Unsupported prio_attr: %02x.\n",
1138 cmd_iu->prio_attr);
1139 case UAS_SIMPLE_TAG:
1140 cmd->prio_attr = TCM_SIMPLE_TAG;
1141 break;
1142 }
1143
1144 cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
1145
1146 INIT_WORK(&cmd->work, usbg_cmd_work);
1147 queue_work(tpg->workqueue, &cmd->work);
1148
1149 return 0;
1150err:
1151 kfree(cmd);
1152 return -EINVAL;
1153}
1154
1155static void bot_cmd_work(struct work_struct *work)
1156{
1157 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1158 struct se_cmd *se_cmd;
1159 struct tcm_usbg_nexus *tv_nexus;
1160 struct usbg_tpg *tpg;
1161 int dir;
1162
1163 se_cmd = &cmd->se_cmd;
1164 tpg = cmd->fu->tpg;
1165 tv_nexus = tpg->tpg_nexus;
1166 dir = get_cmd_dir(cmd->cmd_buf);
1167 if (dir < 0) {
1168 transport_init_se_cmd(se_cmd,
1169 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1170 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1171 cmd->prio_attr, cmd->sense_iu.sense);
1172 goto out;
1173 }
1174
1175 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1176 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1177 cmd->data_len, cmd->prio_attr, dir, 0) < 0)
1178 goto out;
1179
1180 return;
1181
1182out:
1183 transport_send_check_condition_and_sense(se_cmd,
1184 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1185 usbg_cleanup_cmd(cmd);
1186}
1187
1188static int bot_submit_command(struct f_uas *fu,
1189 void *cmdbuf, unsigned int len)
1190{
1191 struct bulk_cb_wrap *cbw = cmdbuf;
1192 struct usbg_cmd *cmd;
1193 struct usbg_tpg *tpg;
1194 struct tcm_usbg_nexus *tv_nexus;
1195 u32 cmd_len;
1196
1197 if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
1198 pr_err("Wrong signature on CBW\n");
1199 return -EINVAL;
1200 }
1201 if (len != 31) {
1202 pr_err("Wrong length for CBW\n");
1203 return -EINVAL;
1204 }
1205
1206 cmd_len = cbw->Length;
1207 if (cmd_len < 1 || cmd_len > 16)
1208 return -EINVAL;
1209
1210 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1211 if (!cmd)
1212 return -ENOMEM;
1213
1214 cmd->fu = fu;
1215
1216 /* XXX until I figure out why I can't free in on complete */
1217 kref_init(&cmd->ref);
1218 kref_get(&cmd->ref);
1219
1220 tpg = fu->tpg;
1221
1222 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1223
1224 cmd->bot_tag = cbw->Tag;
1225
1226 tv_nexus = tpg->tpg_nexus;
1227 if (!tv_nexus) {
1228 pr_err("Missing nexus, ignoring command\n");
1229 goto err;
1230 }
1231
1232 cmd->prio_attr = TCM_SIMPLE_TAG;
1233 cmd->unpacked_lun = cbw->Lun;
1234 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
1235 cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
1236 cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
1237
1238 INIT_WORK(&cmd->work, bot_cmd_work);
1239 queue_work(tpg->workqueue, &cmd->work);
1240
1241 return 0;
1242err:
1243 kfree(cmd);
1244 return -EINVAL;
1245}
1246
1247/* Start fabric.c code */
1248
1249static int usbg_check_true(struct se_portal_group *se_tpg)
1250{
1251 return 1;
1252}
1253
1254static int usbg_check_false(struct se_portal_group *se_tpg)
1255{
1256 return 0;
1257}
1258
1259static char *usbg_get_fabric_name(void)
1260{
1261 return "usb_gadget";
1262}
1263
1264static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1265{
1266 struct usbg_tpg *tpg = container_of(se_tpg,
1267 struct usbg_tpg, se_tpg);
1268 struct usbg_tport *tport = tpg->tport;
1269
1270 return &tport->tport_name[0];
1271}
1272
1273static u16 usbg_get_tag(struct se_portal_group *se_tpg)
1274{
1275 struct usbg_tpg *tpg = container_of(se_tpg,
1276 struct usbg_tpg, se_tpg);
1277 return tpg->tport_tpgt;
1278}
1279
1280static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
1281{
1282 return 1;
1283}
1284
1285static void usbg_cmd_release(struct kref *ref)
1286{
1287 struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
1288 ref);
1289
1290 transport_generic_free_cmd(&cmd->se_cmd, 0);
1291}
1292
1293static void usbg_release_cmd(struct se_cmd *se_cmd)
1294{
1295 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1296 se_cmd);
1297 kfree(cmd->data_buf);
1298 kfree(cmd);
1299}
1300
1301static int usbg_shutdown_session(struct se_session *se_sess)
1302{
1303 return 0;
1304}
1305
1306static void usbg_close_session(struct se_session *se_sess)
1307{
1308}
1309
1310static u32 usbg_sess_get_index(struct se_session *se_sess)
1311{
1312 return 0;
1313}
1314
1315/*
1316 * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
1317 */
1318static int usbg_write_pending_status(struct se_cmd *se_cmd)
1319{
1320 return 0;
1321}
1322
1323static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
1324{
1325}
1326
1327static int usbg_get_cmd_state(struct se_cmd *se_cmd)
1328{
1329 return 0;
1330}
1331
1332static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1333{
1334}
1335
1336static void usbg_aborted_task(struct se_cmd *se_cmd)
1337{
1338}
1339
1340static const char *usbg_check_wwn(const char *name)
1341{
1342 const char *n;
1343 unsigned int len;
1344
1345 n = strstr(name, "naa.");
1346 if (!n)
1347 return NULL;
1348 n += 4;
1349 len = strlen(n);
1350 if (len == 0 || len > USBG_NAMELEN - 1)
1351 return NULL;
1352 return n;
1353}
1354
1355static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1356{
1357 if (!usbg_check_wwn(name))
1358 return -EINVAL;
1359 return 0;
1360}
1361
1362static struct se_portal_group *usbg_make_tpg(
1363 struct se_wwn *wwn,
1364 struct config_group *group,
1365 const char *name)
1366{
1367 struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
1368 tport_wwn);
1369 struct usbg_tpg *tpg;
1370 unsigned long tpgt;
1371 int ret;
1372 struct f_tcm_opts *opts;
1373 unsigned i;
1374
1375 if (strstr(name, "tpgt_") != name)
1376 return ERR_PTR(-EINVAL);
1377 if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
1378 return ERR_PTR(-EINVAL);
1379 ret = -ENODEV;
1380 mutex_lock(&tpg_instances_lock);
1381 for (i = 0; i < TPG_INSTANCES; ++i)
1382 if (tpg_instances[i].func_inst && !tpg_instances[i].tpg)
1383 break;
1384 if (i == TPG_INSTANCES)
1385 goto unlock_inst;
1386
1387 opts = container_of(tpg_instances[i].func_inst, struct f_tcm_opts,
1388 func_inst);
1389 mutex_lock(&opts->dep_lock);
1390 if (!opts->ready)
1391 goto unlock_dep;
1392
1393 if (opts->has_dep) {
1394 if (!try_module_get(opts->dependent))
1395 goto unlock_dep;
1396 } else {
1397 ret = configfs_depend_item_unlocked(
1398 group->cg_subsys,
1399 &opts->func_inst.group.cg_item);
1400 if (ret)
1401 goto unlock_dep;
1402 }
1403
1404 tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
1405 ret = -ENOMEM;
1406 if (!tpg)
1407 goto unref_dep;
1408 mutex_init(&tpg->tpg_mutex);
1409 atomic_set(&tpg->tpg_port_count, 0);
1410 tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
1411 if (!tpg->workqueue)
1412 goto free_tpg;
1413
1414 tpg->tport = tport;
1415 tpg->tport_tpgt = tpgt;
1416
1417 /*
1418 * SPC doesn't assign a protocol identifier for USB-SCSI, so we
1419 * pretend to be SAS..
1420 */
1421 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
1422 if (ret < 0)
1423 goto free_workqueue;
1424
1425 tpg_instances[i].tpg = tpg;
1426 tpg->fi = tpg_instances[i].func_inst;
1427 mutex_unlock(&opts->dep_lock);
1428 mutex_unlock(&tpg_instances_lock);
1429 return &tpg->se_tpg;
1430
1431free_workqueue:
1432 destroy_workqueue(tpg->workqueue);
1433free_tpg:
1434 kfree(tpg);
1435unref_dep:
1436 if (opts->has_dep)
1437 module_put(opts->dependent);
1438 else
1439 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
1440unlock_dep:
1441 mutex_unlock(&opts->dep_lock);
1442unlock_inst:
1443 mutex_unlock(&tpg_instances_lock);
1444
1445 return ERR_PTR(ret);
1446}
1447
1448static int tcm_usbg_drop_nexus(struct usbg_tpg *);
1449
1450static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1451{
1452 struct usbg_tpg *tpg = container_of(se_tpg,
1453 struct usbg_tpg, se_tpg);
1454 unsigned i;
1455 struct f_tcm_opts *opts;
1456
1457 tcm_usbg_drop_nexus(tpg);
1458 core_tpg_deregister(se_tpg);
1459 destroy_workqueue(tpg->workqueue);
1460
1461 mutex_lock(&tpg_instances_lock);
1462 for (i = 0; i < TPG_INSTANCES; ++i)
1463 if (tpg_instances[i].tpg == tpg)
1464 break;
1465 if (i < TPG_INSTANCES)
1466 tpg_instances[i].tpg = NULL;
1467 opts = container_of(tpg_instances[i].func_inst,
1468 struct f_tcm_opts, func_inst);
1469 mutex_lock(&opts->dep_lock);
1470 if (opts->has_dep)
1471 module_put(opts->dependent);
1472 else
1473 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
1474 mutex_unlock(&opts->dep_lock);
1475 mutex_unlock(&tpg_instances_lock);
1476
1477 kfree(tpg);
1478}
1479
1480static struct se_wwn *usbg_make_tport(
1481 struct target_fabric_configfs *tf,
1482 struct config_group *group,
1483 const char *name)
1484{
1485 struct usbg_tport *tport;
1486 const char *wnn_name;
1487 u64 wwpn = 0;
1488
1489 wnn_name = usbg_check_wwn(name);
1490 if (!wnn_name)
1491 return ERR_PTR(-EINVAL);
1492
1493 tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
1494 if (!(tport))
1495 return ERR_PTR(-ENOMEM);
1496
1497 tport->tport_wwpn = wwpn;
1498 snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
1499 return &tport->tport_wwn;
1500}
1501
1502static void usbg_drop_tport(struct se_wwn *wwn)
1503{
1504 struct usbg_tport *tport = container_of(wwn,
1505 struct usbg_tport, tport_wwn);
1506 kfree(tport);
1507}
1508
1509/*
1510 * If somebody feels like dropping the version property, go ahead.
1511 */
1512static ssize_t usbg_wwn_version_show(struct config_item *item, char *page)
1513{
1514 return sprintf(page, "usb-gadget fabric module\n");
1515}
1516
1517CONFIGFS_ATTR_RO(usbg_wwn_, version);
1518
1519static struct configfs_attribute *usbg_wwn_attrs[] = {
1520 &usbg_wwn_attr_version,
1521 NULL,
1522};
1523
1524static ssize_t tcm_usbg_tpg_enable_show(struct config_item *item, char *page)
1525{
1526 struct se_portal_group *se_tpg = to_tpg(item);
1527 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1528
1529 return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
1530}
1531
1532static int usbg_attach(struct usbg_tpg *);
1533static void usbg_detach(struct usbg_tpg *);
1534
1535static ssize_t tcm_usbg_tpg_enable_store(struct config_item *item,
1536 const char *page, size_t count)
1537{
1538 struct se_portal_group *se_tpg = to_tpg(item);
1539 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1540 bool op;
1541 ssize_t ret;
1542
1543 ret = strtobool(page, &op);
1544 if (ret)
1545 return ret;
1546
1547 if ((op && tpg->gadget_connect) || (!op && !tpg->gadget_connect))
1548 return -EINVAL;
1549
1550 if (op)
1551 ret = usbg_attach(tpg);
1552 else
1553 usbg_detach(tpg);
1554 if (ret)
1555 return ret;
1556
1557 tpg->gadget_connect = op;
1558
1559 return count;
1560}
1561
1562static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
1563{
1564 struct se_portal_group *se_tpg = to_tpg(item);
1565 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1566 struct tcm_usbg_nexus *tv_nexus;
1567 ssize_t ret;
1568
1569 mutex_lock(&tpg->tpg_mutex);
1570 tv_nexus = tpg->tpg_nexus;
1571 if (!tv_nexus) {
1572 ret = -ENODEV;
1573 goto out;
1574 }
1575 ret = snprintf(page, PAGE_SIZE, "%s\n",
1576 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1577out:
1578 mutex_unlock(&tpg->tpg_mutex);
1579 return ret;
1580}
1581
1582static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1583{
1584 struct se_portal_group *se_tpg;
1585 struct tcm_usbg_nexus *tv_nexus;
1586 int ret;
1587
1588 mutex_lock(&tpg->tpg_mutex);
1589 if (tpg->tpg_nexus) {
1590 ret = -EEXIST;
1591 pr_debug("tpg->tpg_nexus already exists\n");
1592 goto err_unlock;
1593 }
1594 se_tpg = &tpg->se_tpg;
1595
1596 ret = -ENOMEM;
1597 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1598 if (!tv_nexus)
1599 goto err_unlock;
1600 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
1601 if (IS_ERR(tv_nexus->tvn_se_sess))
1602 goto err_free;
1603
1604 /*
1605 * Since we are running in 'demo mode' this call with generate a
1606 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1607 * the SCSI Initiator port name of the passed configfs group 'name'.
1608 */
1609 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1610 se_tpg, name);
1611 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1612#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
1613 pr_debug(MAKE_NEXUS_MSG, name);
1614#undef MAKE_NEXUS_MSG
1615 goto err_session;
1616 }
1617 /*
1618 * Now register the TCM vHost virtual I_T Nexus as active.
1619 */
1620 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1621 tv_nexus->tvn_se_sess, tv_nexus);
1622 tpg->tpg_nexus = tv_nexus;
1623 mutex_unlock(&tpg->tpg_mutex);
1624 return 0;
1625
1626err_session:
1627 transport_free_session(tv_nexus->tvn_se_sess);
1628err_free:
1629 kfree(tv_nexus);
1630err_unlock:
1631 mutex_unlock(&tpg->tpg_mutex);
1632 return ret;
1633}
1634
1635static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
1636{
1637 struct se_session *se_sess;
1638 struct tcm_usbg_nexus *tv_nexus;
1639 int ret = -ENODEV;
1640
1641 mutex_lock(&tpg->tpg_mutex);
1642 tv_nexus = tpg->tpg_nexus;
1643 if (!tv_nexus)
1644 goto out;
1645
1646 se_sess = tv_nexus->tvn_se_sess;
1647 if (!se_sess)
1648 goto out;
1649
1650 if (atomic_read(&tpg->tpg_port_count)) {
1651 ret = -EPERM;
1652#define MSG "Unable to remove Host I_T Nexus with active TPG port count: %d\n"
1653 pr_err(MSG, atomic_read(&tpg->tpg_port_count));
1654#undef MSG
1655 goto out;
1656 }
1657
1658 pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
1659 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1660 /*
1661 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1662 */
1663 transport_deregister_session(tv_nexus->tvn_se_sess);
1664 tpg->tpg_nexus = NULL;
1665
1666 kfree(tv_nexus);
1667 ret = 0;
1668out:
1669 mutex_unlock(&tpg->tpg_mutex);
1670 return ret;
1671}
1672
1673static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
1674 const char *page, size_t count)
1675{
1676 struct se_portal_group *se_tpg = to_tpg(item);
1677 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1678 unsigned char i_port[USBG_NAMELEN], *ptr;
1679 int ret;
1680
1681 if (!strncmp(page, "NULL", 4)) {
1682 ret = tcm_usbg_drop_nexus(tpg);
1683 return (!ret) ? count : ret;
1684 }
1685 if (strlen(page) >= USBG_NAMELEN) {
1686
1687#define NEXUS_STORE_MSG "Emulated NAA Sas Address: %s, exceeds max: %d\n"
1688 pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
1689#undef NEXUS_STORE_MSG
1690 return -EINVAL;
1691 }
1692 snprintf(i_port, USBG_NAMELEN, "%s", page);
1693
1694 ptr = strstr(i_port, "naa.");
1695 if (!ptr) {
1696 pr_err("Missing 'naa.' prefix\n");
1697 return -EINVAL;
1698 }
1699
1700 if (i_port[strlen(i_port) - 1] == '\n')
1701 i_port[strlen(i_port) - 1] = '\0';
1702
1703 ret = tcm_usbg_make_nexus(tpg, &i_port[0]);
1704 if (ret < 0)
1705 return ret;
1706 return count;
1707}
1708
1709CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
1710CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
1711
1712static struct configfs_attribute *usbg_base_attrs[] = {
1713 &tcm_usbg_tpg_attr_enable,
1714 &tcm_usbg_tpg_attr_nexus,
1715 NULL,
1716};
1717
1718static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
1719{
1720 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1721
1722 atomic_inc(&tpg->tpg_port_count);
1723 smp_mb__after_atomic();
1724 return 0;
1725}
1726
1727static void usbg_port_unlink(struct se_portal_group *se_tpg,
1728 struct se_lun *se_lun)
1729{
1730 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1731
1732 atomic_dec(&tpg->tpg_port_count);
1733 smp_mb__after_atomic();
1734}
1735
1736static int usbg_check_stop_free(struct se_cmd *se_cmd)
1737{
1738 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1739 se_cmd);
1740
1741 kref_put(&cmd->ref, usbg_cmd_release);
1742 return 1;
1743}
1744
1745static const struct target_core_fabric_ops usbg_ops = {
1746 .module = THIS_MODULE,
1747 .name = "usb_gadget",
1748 .get_fabric_name = usbg_get_fabric_name,
1749 .tpg_get_wwn = usbg_get_fabric_wwn,
1750 .tpg_get_tag = usbg_get_tag,
1751 .tpg_check_demo_mode = usbg_check_true,
1752 .tpg_check_demo_mode_cache = usbg_check_false,
1753 .tpg_check_demo_mode_write_protect = usbg_check_false,
1754 .tpg_check_prod_mode_write_protect = usbg_check_false,
1755 .tpg_get_inst_index = usbg_tpg_get_inst_index,
1756 .release_cmd = usbg_release_cmd,
1757 .shutdown_session = usbg_shutdown_session,
1758 .close_session = usbg_close_session,
1759 .sess_get_index = usbg_sess_get_index,
1760 .sess_get_initiator_sid = NULL,
1761 .write_pending = usbg_send_write_request,
1762 .write_pending_status = usbg_write_pending_status,
1763 .set_default_node_attributes = usbg_set_default_node_attrs,
1764 .get_cmd_state = usbg_get_cmd_state,
1765 .queue_data_in = usbg_send_read_response,
1766 .queue_status = usbg_send_status_response,
1767 .queue_tm_rsp = usbg_queue_tm_rsp,
1768 .aborted_task = usbg_aborted_task,
1769 .check_stop_free = usbg_check_stop_free,
1770
1771 .fabric_make_wwn = usbg_make_tport,
1772 .fabric_drop_wwn = usbg_drop_tport,
1773 .fabric_make_tpg = usbg_make_tpg,
1774 .fabric_drop_tpg = usbg_drop_tpg,
1775 .fabric_post_link = usbg_port_link,
1776 .fabric_pre_unlink = usbg_port_unlink,
1777 .fabric_init_nodeacl = usbg_init_nodeacl,
1778
1779 .tfc_wwn_attrs = usbg_wwn_attrs,
1780 .tfc_tpg_base_attrs = usbg_base_attrs,
1781};
1782
1783/* Start gadget.c code */
1784
1785static struct usb_interface_descriptor bot_intf_desc = {
1786 .bLength = sizeof(bot_intf_desc),
1787 .bDescriptorType = USB_DT_INTERFACE,
1788 .bNumEndpoints = 2,
1789 .bAlternateSetting = USB_G_ALT_INT_BBB,
1790 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
1791 .bInterfaceSubClass = USB_SC_SCSI,
1792 .bInterfaceProtocol = USB_PR_BULK,
1793};
1794
1795static struct usb_interface_descriptor uasp_intf_desc = {
1796 .bLength = sizeof(uasp_intf_desc),
1797 .bDescriptorType = USB_DT_INTERFACE,
1798 .bNumEndpoints = 4,
1799 .bAlternateSetting = USB_G_ALT_INT_UAS,
1800 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
1801 .bInterfaceSubClass = USB_SC_SCSI,
1802 .bInterfaceProtocol = USB_PR_UAS,
1803};
1804
1805static struct usb_endpoint_descriptor uasp_bi_desc = {
1806 .bLength = USB_DT_ENDPOINT_SIZE,
1807 .bDescriptorType = USB_DT_ENDPOINT,
1808 .bEndpointAddress = USB_DIR_IN,
1809 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1810 .wMaxPacketSize = cpu_to_le16(512),
1811};
1812
1813static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
1814 .bLength = USB_DT_ENDPOINT_SIZE,
1815 .bDescriptorType = USB_DT_ENDPOINT,
1816 .bEndpointAddress = USB_DIR_IN,
1817 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1818};
1819
1820static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
1821 .bLength = sizeof(uasp_bi_pipe_desc),
1822 .bDescriptorType = USB_DT_PIPE_USAGE,
1823 .bPipeID = DATA_IN_PIPE_ID,
1824};
1825
1826static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
1827 .bLength = USB_DT_ENDPOINT_SIZE,
1828 .bDescriptorType = USB_DT_ENDPOINT,
1829 .bEndpointAddress = USB_DIR_IN,
1830 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1831 .wMaxPacketSize = cpu_to_le16(1024),
1832};
1833
1834static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
1835 .bLength = sizeof(uasp_bi_ep_comp_desc),
1836 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1837 .bMaxBurst = 0,
1838 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1839 .wBytesPerInterval = 0,
1840};
1841
1842static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
1843 .bLength = sizeof(bot_bi_ep_comp_desc),
1844 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1845 .bMaxBurst = 0,
1846};
1847
1848static struct usb_endpoint_descriptor uasp_bo_desc = {
1849 .bLength = USB_DT_ENDPOINT_SIZE,
1850 .bDescriptorType = USB_DT_ENDPOINT,
1851 .bEndpointAddress = USB_DIR_OUT,
1852 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1853 .wMaxPacketSize = cpu_to_le16(512),
1854};
1855
1856static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
1857 .bLength = USB_DT_ENDPOINT_SIZE,
1858 .bDescriptorType = USB_DT_ENDPOINT,
1859 .bEndpointAddress = USB_DIR_OUT,
1860 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1861};
1862
1863static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
1864 .bLength = sizeof(uasp_bo_pipe_desc),
1865 .bDescriptorType = USB_DT_PIPE_USAGE,
1866 .bPipeID = DATA_OUT_PIPE_ID,
1867};
1868
1869static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
1870 .bLength = USB_DT_ENDPOINT_SIZE,
1871 .bDescriptorType = USB_DT_ENDPOINT,
1872 .bEndpointAddress = USB_DIR_OUT,
1873 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1874 .wMaxPacketSize = cpu_to_le16(0x400),
1875};
1876
1877static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
1878 .bLength = sizeof(uasp_bo_ep_comp_desc),
1879 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1880 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1881};
1882
1883static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
1884 .bLength = sizeof(bot_bo_ep_comp_desc),
1885 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1886};
1887
1888static struct usb_endpoint_descriptor uasp_status_desc = {
1889 .bLength = USB_DT_ENDPOINT_SIZE,
1890 .bDescriptorType = USB_DT_ENDPOINT,
1891 .bEndpointAddress = USB_DIR_IN,
1892 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1893 .wMaxPacketSize = cpu_to_le16(512),
1894};
1895
1896static struct usb_endpoint_descriptor uasp_fs_status_desc = {
1897 .bLength = USB_DT_ENDPOINT_SIZE,
1898 .bDescriptorType = USB_DT_ENDPOINT,
1899 .bEndpointAddress = USB_DIR_IN,
1900 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1901};
1902
1903static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
1904 .bLength = sizeof(uasp_status_pipe_desc),
1905 .bDescriptorType = USB_DT_PIPE_USAGE,
1906 .bPipeID = STATUS_PIPE_ID,
1907};
1908
1909static struct usb_endpoint_descriptor uasp_ss_status_desc = {
1910 .bLength = USB_DT_ENDPOINT_SIZE,
1911 .bDescriptorType = USB_DT_ENDPOINT,
1912 .bEndpointAddress = USB_DIR_IN,
1913 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1914 .wMaxPacketSize = cpu_to_le16(1024),
1915};
1916
1917static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
1918 .bLength = sizeof(uasp_status_in_ep_comp_desc),
1919 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1920 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1921};
1922
1923static struct usb_endpoint_descriptor uasp_cmd_desc = {
1924 .bLength = USB_DT_ENDPOINT_SIZE,
1925 .bDescriptorType = USB_DT_ENDPOINT,
1926 .bEndpointAddress = USB_DIR_OUT,
1927 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1928 .wMaxPacketSize = cpu_to_le16(512),
1929};
1930
1931static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
1932 .bLength = USB_DT_ENDPOINT_SIZE,
1933 .bDescriptorType = USB_DT_ENDPOINT,
1934 .bEndpointAddress = USB_DIR_OUT,
1935 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1936};
1937
1938static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
1939 .bLength = sizeof(uasp_cmd_pipe_desc),
1940 .bDescriptorType = USB_DT_PIPE_USAGE,
1941 .bPipeID = CMD_PIPE_ID,
1942};
1943
1944static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
1945 .bLength = USB_DT_ENDPOINT_SIZE,
1946 .bDescriptorType = USB_DT_ENDPOINT,
1947 .bEndpointAddress = USB_DIR_OUT,
1948 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1949 .wMaxPacketSize = cpu_to_le16(1024),
1950};
1951
1952static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
1953 .bLength = sizeof(uasp_cmd_comp_desc),
1954 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1955};
1956
1957static struct usb_descriptor_header *uasp_fs_function_desc[] = {
1958 (struct usb_descriptor_header *) &bot_intf_desc,
1959 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
1960 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
1961
1962 (struct usb_descriptor_header *) &uasp_intf_desc,
1963 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
1964 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
1965 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
1966 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
1967 (struct usb_descriptor_header *) &uasp_fs_status_desc,
1968 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
1969 (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
1970 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
1971 NULL,
1972};
1973
1974static struct usb_descriptor_header *uasp_hs_function_desc[] = {
1975 (struct usb_descriptor_header *) &bot_intf_desc,
1976 (struct usb_descriptor_header *) &uasp_bi_desc,
1977 (struct usb_descriptor_header *) &uasp_bo_desc,
1978
1979 (struct usb_descriptor_header *) &uasp_intf_desc,
1980 (struct usb_descriptor_header *) &uasp_bi_desc,
1981 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
1982 (struct usb_descriptor_header *) &uasp_bo_desc,
1983 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
1984 (struct usb_descriptor_header *) &uasp_status_desc,
1985 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
1986 (struct usb_descriptor_header *) &uasp_cmd_desc,
1987 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
1988 NULL,
1989};
1990
1991static struct usb_descriptor_header *uasp_ss_function_desc[] = {
1992 (struct usb_descriptor_header *) &bot_intf_desc,
1993 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
1994 (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
1995 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
1996 (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
1997
1998 (struct usb_descriptor_header *) &uasp_intf_desc,
1999 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
2000 (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
2001 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2002 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
2003 (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
2004 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2005 (struct usb_descriptor_header *) &uasp_ss_status_desc,
2006 (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
2007 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2008 (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
2009 (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
2010 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2011 NULL,
2012};
2013
2014static struct usb_string tcm_us_strings[] = {
2015 [USB_G_STR_INT_UAS].s = "USB Attached SCSI",
2016 [USB_G_STR_INT_BBB].s = "Bulk Only Transport",
2017 { },
2018};
2019
2020static struct usb_gadget_strings tcm_stringtab = {
2021 .language = 0x0409,
2022 .strings = tcm_us_strings,
2023};
2024
2025static struct usb_gadget_strings *tcm_strings[] = {
2026 &tcm_stringtab,
2027 NULL,
2028};
2029
2030static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
2031{
2032 struct f_uas *fu = to_f_uas(f);
2033 struct usb_string *us;
2034 struct usb_gadget *gadget = c->cdev->gadget;
2035 struct usb_ep *ep;
2036 struct f_tcm_opts *opts;
2037 int iface;
2038 int ret;
2039
2040 opts = container_of(f->fi, struct f_tcm_opts, func_inst);
2041
2042 mutex_lock(&opts->dep_lock);
2043 if (!opts->can_attach) {
2044 mutex_unlock(&opts->dep_lock);
2045 return -ENODEV;
2046 }
2047 mutex_unlock(&opts->dep_lock);
2048 us = usb_gstrings_attach(c->cdev, tcm_strings,
2049 ARRAY_SIZE(tcm_us_strings));
2050 if (IS_ERR(us))
2051 return PTR_ERR(us);
2052 bot_intf_desc.iInterface = us[USB_G_STR_INT_BBB].id;
2053 uasp_intf_desc.iInterface = us[USB_G_STR_INT_UAS].id;
2054
2055 iface = usb_interface_id(c, f);
2056 if (iface < 0)
2057 return iface;
2058
2059 bot_intf_desc.bInterfaceNumber = iface;
2060 uasp_intf_desc.bInterfaceNumber = iface;
2061 fu->iface = iface;
2062 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
2063 &uasp_bi_ep_comp_desc);
2064 if (!ep)
2065 goto ep_fail;
2066
2067 fu->ep_in = ep;
2068
2069 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
2070 &uasp_bo_ep_comp_desc);
2071 if (!ep)
2072 goto ep_fail;
2073 fu->ep_out = ep;
2074
2075 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
2076 &uasp_status_in_ep_comp_desc);
2077 if (!ep)
2078 goto ep_fail;
2079 fu->ep_status = ep;
2080
2081 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
2082 &uasp_cmd_comp_desc);
2083 if (!ep)
2084 goto ep_fail;
2085 fu->ep_cmd = ep;
2086
2087 /* Assume endpoint addresses are the same for both speeds */
2088 uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
2089 uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
2090 uasp_status_desc.bEndpointAddress =
2091 uasp_ss_status_desc.bEndpointAddress;
2092 uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
2093
2094 uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
2095 uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
2096 uasp_fs_status_desc.bEndpointAddress =
2097 uasp_ss_status_desc.bEndpointAddress;
2098 uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
2099
2100 ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2101 uasp_hs_function_desc, uasp_ss_function_desc);
2102 if (ret)
2103 goto ep_fail;
2104
2105 return 0;
2106ep_fail:
2107 pr_err("Can't claim all required eps\n");
2108
2109 return -ENOTSUPP;
2110}
2111
2112struct guas_setup_wq {
2113 struct work_struct work;
2114 struct f_uas *fu;
2115 unsigned int alt;
2116};
2117
2118static void tcm_delayed_set_alt(struct work_struct *wq)
2119{
2120 struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
2121 work);
2122 struct f_uas *fu = work->fu;
2123 int alt = work->alt;
2124
2125 kfree(work);
2126
2127 if (fu->flags & USBG_IS_BOT)
2128 bot_cleanup_old_alt(fu);
2129 if (fu->flags & USBG_IS_UAS)
2130 uasp_cleanup_old_alt(fu);
2131
2132 if (alt == USB_G_ALT_INT_BBB)
2133 bot_set_alt(fu);
2134 else if (alt == USB_G_ALT_INT_UAS)
2135 uasp_set_alt(fu);
2136 usb_composite_setup_continue(fu->function.config->cdev);
2137}
2138
2139static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2140{
2141 struct f_uas *fu = to_f_uas(f);
2142
2143 if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
2144 struct guas_setup_wq *work;
2145
2146 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2147 if (!work)
2148 return -ENOMEM;
2149 INIT_WORK(&work->work, tcm_delayed_set_alt);
2150 work->fu = fu;
2151 work->alt = alt;
2152 schedule_work(&work->work);
2153 return USB_GADGET_DELAYED_STATUS;
2154 }
2155 return -EOPNOTSUPP;
2156}
2157
2158static void tcm_disable(struct usb_function *f)
2159{
2160 struct f_uas *fu = to_f_uas(f);
2161
2162 if (fu->flags & USBG_IS_UAS)
2163 uasp_cleanup_old_alt(fu);
2164 else if (fu->flags & USBG_IS_BOT)
2165 bot_cleanup_old_alt(fu);
2166 fu->flags = 0;
2167}
2168
2169static int tcm_setup(struct usb_function *f,
2170 const struct usb_ctrlrequest *ctrl)
2171{
2172 struct f_uas *fu = to_f_uas(f);
2173
2174 if (!(fu->flags & USBG_IS_BOT))
2175 return -EOPNOTSUPP;
2176
2177 return usbg_bot_setup(f, ctrl);
2178}
2179
2180static inline struct f_tcm_opts *to_f_tcm_opts(struct config_item *item)
2181{
2182 return container_of(to_config_group(item), struct f_tcm_opts,
2183 func_inst.group);
2184}
2185
2186static void tcm_attr_release(struct config_item *item)
2187{
2188 struct f_tcm_opts *opts = to_f_tcm_opts(item);
2189
2190 usb_put_function_instance(&opts->func_inst);
2191}
2192
2193static struct configfs_item_operations tcm_item_ops = {
2194 .release = tcm_attr_release,
2195};
2196
2197static struct config_item_type tcm_func_type = {
2198 .ct_item_ops = &tcm_item_ops,
2199 .ct_owner = THIS_MODULE,
2200};
2201
2202static void tcm_free_inst(struct usb_function_instance *f)
2203{
2204 struct f_tcm_opts *opts;
2205 unsigned i;
2206
2207 opts = container_of(f, struct f_tcm_opts, func_inst);
2208
2209 mutex_lock(&tpg_instances_lock);
2210 for (i = 0; i < TPG_INSTANCES; ++i)
2211 if (tpg_instances[i].func_inst == f)
2212 break;
2213 if (i < TPG_INSTANCES)
2214 tpg_instances[i].func_inst = NULL;
2215 mutex_unlock(&tpg_instances_lock);
2216
2217 kfree(opts);
2218}
2219
2220static int tcm_register_callback(struct usb_function_instance *f)
2221{
2222 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2223
2224 mutex_lock(&opts->dep_lock);
2225 opts->can_attach = true;
2226 mutex_unlock(&opts->dep_lock);
2227
2228 return 0;
2229}
2230
2231static void tcm_unregister_callback(struct usb_function_instance *f)
2232{
2233 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2234
2235 mutex_lock(&opts->dep_lock);
2236 unregister_gadget_item(opts->
2237 func_inst.group.cg_item.ci_parent->ci_parent);
2238 opts->can_attach = false;
2239 mutex_unlock(&opts->dep_lock);
2240}
2241
2242static int usbg_attach(struct usbg_tpg *tpg)
2243{
2244 struct usb_function_instance *f = tpg->fi;
2245 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2246
2247 if (opts->tcm_register_callback)
2248 return opts->tcm_register_callback(f);
2249
2250 return 0;
2251}
2252
2253static void usbg_detach(struct usbg_tpg *tpg)
2254{
2255 struct usb_function_instance *f = tpg->fi;
2256 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2257
2258 if (opts->tcm_unregister_callback)
2259 opts->tcm_unregister_callback(f);
2260}
2261
2262static int tcm_set_name(struct usb_function_instance *f, const char *name)
2263{
2264 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2265
2266 pr_debug("tcm: Activating %s\n", name);
2267
2268 mutex_lock(&opts->dep_lock);
2269 opts->ready = true;
2270 mutex_unlock(&opts->dep_lock);
2271
2272 return 0;
2273}
2274
2275static struct usb_function_instance *tcm_alloc_inst(void)
2276{
2277 struct f_tcm_opts *opts;
2278 int i;
2279
2280
2281 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2282 if (!opts)
2283 return ERR_PTR(-ENOMEM);
2284
2285 mutex_lock(&tpg_instances_lock);
2286 for (i = 0; i < TPG_INSTANCES; ++i)
2287 if (!tpg_instances[i].func_inst)
2288 break;
2289
2290 if (i == TPG_INSTANCES) {
2291 mutex_unlock(&tpg_instances_lock);
2292 kfree(opts);
2293 return ERR_PTR(-EBUSY);
2294 }
2295 tpg_instances[i].func_inst = &opts->func_inst;
2296 mutex_unlock(&tpg_instances_lock);
2297
2298 mutex_init(&opts->dep_lock);
2299 opts->func_inst.set_inst_name = tcm_set_name;
2300 opts->func_inst.free_func_inst = tcm_free_inst;
2301 opts->tcm_register_callback = tcm_register_callback;
2302 opts->tcm_unregister_callback = tcm_unregister_callback;
2303
2304 config_group_init_type_name(&opts->func_inst.group, "",
2305 &tcm_func_type);
2306
2307 return &opts->func_inst;
2308}
2309
2310static void tcm_free(struct usb_function *f)
2311{
2312 struct f_uas *tcm = to_f_uas(f);
2313
2314 kfree(tcm);
2315}
2316
2317static void tcm_unbind(struct usb_configuration *c, struct usb_function *f)
2318{
2319 usb_free_all_descriptors(f);
2320}
2321
2322static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
2323{
2324 struct f_uas *fu;
2325 unsigned i;
2326
2327 mutex_lock(&tpg_instances_lock);
2328 for (i = 0; i < TPG_INSTANCES; ++i)
2329 if (tpg_instances[i].func_inst == fi)
2330 break;
2331 if (i == TPG_INSTANCES) {
2332 mutex_unlock(&tpg_instances_lock);
2333 return ERR_PTR(-ENODEV);
2334 }
2335
2336 fu = kzalloc(sizeof(*fu), GFP_KERNEL);
2337 if (!fu) {
2338 mutex_unlock(&tpg_instances_lock);
2339 return ERR_PTR(-ENOMEM);
2340 }
2341
2342 fu->function.name = "Target Function";
2343 fu->function.bind = tcm_bind;
2344 fu->function.unbind = tcm_unbind;
2345 fu->function.set_alt = tcm_set_alt;
2346 fu->function.setup = tcm_setup;
2347 fu->function.disable = tcm_disable;
2348 fu->function.free_func = tcm_free;
2349 fu->tpg = tpg_instances[i].tpg;
2350 mutex_unlock(&tpg_instances_lock);
2351
2352 return &fu->function;
2353}
2354
2355DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
2356
2357static int tcm_init(void)
2358{
2359 int ret;
2360
2361 ret = usb_function_register(&tcmusb_func);
2362 if (ret)
2363 return ret;
2364
2365 ret = target_register_template(&usbg_ops);
2366 if (ret)
2367 usb_function_unregister(&tcmusb_func);
2368
2369 return ret;
2370}
2371module_init(tcm_init);
2372
2373static void tcm_exit(void)
2374{
2375 target_unregister_template(&usbg_ops);
2376 usb_function_unregister(&tcmusb_func);
2377}
2378module_exit(tcm_exit);
2379
2380MODULE_LICENSE("GPL");
2381MODULE_AUTHOR("Sebastian Andrzej Siewior");
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.h b/drivers/usb/gadget/function/tcm.h
index 0b749e1aa2f1..b75c6f3e1980 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -16,8 +16,7 @@
16#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS) 16#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
17 17
18enum { 18enum {
19 USB_G_STR_CONFIG = USB_GADGET_FIRST_AVAIL_IDX, 19 USB_G_STR_INT_UAS = 0,
20 USB_G_STR_INT_UAS,
21 USB_G_STR_INT_BBB, 20 USB_G_STR_INT_BBB,
22}; 21};
23 22
@@ -40,6 +39,8 @@ struct usbg_tpg {
40 u32 gadget_connect; 39 u32 gadget_connect;
41 struct tcm_usbg_nexus *tpg_nexus; 40 struct tcm_usbg_nexus *tpg_nexus;
42 atomic_t tpg_port_count; 41 atomic_t tpg_port_count;
42
43 struct usb_function_instance *fi;
43}; 44};
44 45
45struct usbg_tport { 46struct usbg_tport {
@@ -128,6 +129,4 @@ struct f_uas {
128 struct usb_request *bot_req_out; 129 struct usb_request *bot_req_out;
129}; 130};
130 131
131extern struct usbg_tpg *the_only_tpg_I_currently_have; 132#endif /* __TARGET_USB_GADGET_H__ */
132
133#endif
diff --git a/drivers/usb/gadget/function/u_tcm.h b/drivers/usb/gadget/function/u_tcm.h
new file mode 100644
index 000000000000..0bd751e0483f
--- /dev/null
+++ b/drivers/usb/gadget/function/u_tcm.h
@@ -0,0 +1,50 @@
1/*
2 * u_tcm.h
3 *
4 * Utility definitions for the tcm function
5 *
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * http://www.samsung.com
8 *
9 * Author: Andrzej Pietrasiewicz <andrzej.p@xxxxxxxxxxx>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#ifndef U_TCM_H
17#define U_TCM_H
18
19#include <linux/usb/composite.h>
20
21/**
22 * @dependent: optional dependent module. Meant for legacy gadget.
23 * If non-null its refcount will be increased when a tpg is created and
24 * decreased when tpg is dropped.
25 * @dep_lock: lock for dependent module operations.
26 * @ready: true if the dependent module information is set.
27 * @can_attach: true a function can be bound to gadget
28 * @has_dep: true if there is a dependent module
29 *
30 */
31struct f_tcm_opts {
32 struct usb_function_instance func_inst;
33 struct module *dependent;
34 struct mutex dep_lock;
35 bool ready;
36 bool can_attach;
37 bool has_dep;
38
39 /*
40 * Callbacks to be removed when legacy tcm gadget disappears.
41 *
42 * If you use the new function registration interface
43 * programmatically, you MUST set these callbacks to
44 * something sensible (e.g. probe/remove the composite).
45 */
46 int (*tcm_register_callback)(struct usb_function_instance *);
47 void (*tcm_unregister_callback)(struct usb_function_instance *);
48};
49
50#endif /* U_TCM_H */
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 4d682ad7bf23..a23d1b90454c 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -250,6 +250,7 @@ config USB_GADGET_TARGET
250 tristate "USB Gadget Target Fabric Module" 250 tristate "USB Gadget Target Fabric Module"
251 depends on TARGET_CORE 251 depends on TARGET_CORE
252 select USB_LIBCOMPOSITE 252 select USB_LIBCOMPOSITE
253 select USB_F_TCM
253 help 254 help
254 This fabric is an USB gadget. Two USB protocols are supported that is 255 This fabric is an USB gadget. Two USB protocols are supported that is
255 BBB or BOT (Bulk Only Transport) and UAS (USB Attached SCSI). BOT is 256 BBB or BOT (Bulk Only Transport) and UAS (USB Attached SCSI). BOT is
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 7857fa411636..0b0bb98319cd 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -21,1953 +21,10 @@
21#include <target/target_core_fabric.h> 21#include <target/target_core_fabric.h>
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24#include "tcm_usb_gadget.h" 24#include "u_tcm.h"
25 25
26USB_GADGET_COMPOSITE_OPTIONS(); 26USB_GADGET_COMPOSITE_OPTIONS();
27 27
28static inline struct f_uas *to_f_uas(struct usb_function *f)
29{
30 return container_of(f, struct f_uas, function);
31}
32
33static void usbg_cmd_release(struct kref *);
34
35static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
36{
37 kref_put(&cmd->ref, usbg_cmd_release);
38}
39
40/* Start bot.c code */
41
42static int bot_enqueue_cmd_cbw(struct f_uas *fu)
43{
44 int ret;
45
46 if (fu->flags & USBG_BOT_CMD_PEND)
47 return 0;
48
49 ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
50 if (!ret)
51 fu->flags |= USBG_BOT_CMD_PEND;
52 return ret;
53}
54
55static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
56{
57 struct usbg_cmd *cmd = req->context;
58 struct f_uas *fu = cmd->fu;
59
60 usbg_cleanup_cmd(cmd);
61 if (req->status < 0) {
62 pr_err("ERR %s(%d)\n", __func__, __LINE__);
63 return;
64 }
65
66 /* CSW completed, wait for next CBW */
67 bot_enqueue_cmd_cbw(fu);
68}
69
70static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
71{
72 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
73 int ret;
74 u8 *sense;
75 unsigned int csw_stat;
76
77 csw_stat = cmd->csw_code;
78
79 /*
80 * We can't send SENSE as a response. So we take ASC & ASCQ from our
81 * sense buffer and queue it and hope the host sends a REQUEST_SENSE
82 * command where it learns why we failed.
83 */
84 sense = cmd->sense_iu.sense;
85
86 csw->Tag = cmd->bot_tag;
87 csw->Status = csw_stat;
88 fu->bot_status.req->context = cmd;
89 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
90 if (ret)
91 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
92}
93
94static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
95{
96 struct usbg_cmd *cmd = req->context;
97 struct f_uas *fu = cmd->fu;
98
99 if (req->status < 0)
100 pr_err("ERR %s(%d)\n", __func__, __LINE__);
101
102 if (cmd->data_len) {
103 if (cmd->data_len > ep->maxpacket) {
104 req->length = ep->maxpacket;
105 cmd->data_len -= ep->maxpacket;
106 } else {
107 req->length = cmd->data_len;
108 cmd->data_len = 0;
109 }
110
111 usb_ep_queue(ep, req, GFP_ATOMIC);
112 return ;
113 }
114 bot_enqueue_sense_code(fu, cmd);
115}
116
117static void bot_send_bad_status(struct usbg_cmd *cmd)
118{
119 struct f_uas *fu = cmd->fu;
120 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
121 struct usb_request *req;
122 struct usb_ep *ep;
123
124 csw->Residue = cpu_to_le32(cmd->data_len);
125
126 if (cmd->data_len) {
127 if (cmd->is_read) {
128 ep = fu->ep_in;
129 req = fu->bot_req_in;
130 } else {
131 ep = fu->ep_out;
132 req = fu->bot_req_out;
133 }
134
135 if (cmd->data_len > fu->ep_in->maxpacket) {
136 req->length = ep->maxpacket;
137 cmd->data_len -= ep->maxpacket;
138 } else {
139 req->length = cmd->data_len;
140 cmd->data_len = 0;
141 }
142 req->complete = bot_err_compl;
143 req->context = cmd;
144 req->buf = fu->cmd.buf;
145 usb_ep_queue(ep, req, GFP_KERNEL);
146 } else {
147 bot_enqueue_sense_code(fu, cmd);
148 }
149}
150
151static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
152{
153 struct f_uas *fu = cmd->fu;
154 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
155 int ret;
156
157 if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
158 if (!moved_data && cmd->data_len) {
159 /*
160 * the host wants to move data, we don't. Fill / empty
161 * the pipe and then send the csw with reside set.
162 */
163 cmd->csw_code = US_BULK_STAT_OK;
164 bot_send_bad_status(cmd);
165 return 0;
166 }
167
168 csw->Tag = cmd->bot_tag;
169 csw->Residue = cpu_to_le32(0);
170 csw->Status = US_BULK_STAT_OK;
171 fu->bot_status.req->context = cmd;
172
173 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
174 if (ret)
175 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
176 } else {
177 cmd->csw_code = US_BULK_STAT_FAIL;
178 bot_send_bad_status(cmd);
179 }
180 return 0;
181}
182
183/*
184 * Called after command (no data transfer) or after the write (to device)
185 * operation is completed
186 */
187static int bot_send_status_response(struct usbg_cmd *cmd)
188{
189 bool moved_data = false;
190
191 if (!cmd->is_read)
192 moved_data = true;
193 return bot_send_status(cmd, moved_data);
194}
195
196/* Read request completed, now we have to send the CSW */
197static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
198{
199 struct usbg_cmd *cmd = req->context;
200
201 if (req->status < 0)
202 pr_err("ERR %s(%d)\n", __func__, __LINE__);
203
204 bot_send_status(cmd, true);
205}
206
207static int bot_send_read_response(struct usbg_cmd *cmd)
208{
209 struct f_uas *fu = cmd->fu;
210 struct se_cmd *se_cmd = &cmd->se_cmd;
211 struct usb_gadget *gadget = fuas_to_gadget(fu);
212 int ret;
213
214 if (!cmd->data_len) {
215 cmd->csw_code = US_BULK_STAT_PHASE;
216 bot_send_bad_status(cmd);
217 return 0;
218 }
219
220 if (!gadget->sg_supported) {
221 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
222 if (!cmd->data_buf)
223 return -ENOMEM;
224
225 sg_copy_to_buffer(se_cmd->t_data_sg,
226 se_cmd->t_data_nents,
227 cmd->data_buf,
228 se_cmd->data_length);
229
230 fu->bot_req_in->buf = cmd->data_buf;
231 } else {
232 fu->bot_req_in->buf = NULL;
233 fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
234 fu->bot_req_in->sg = se_cmd->t_data_sg;
235 }
236
237 fu->bot_req_in->complete = bot_read_compl;
238 fu->bot_req_in->length = se_cmd->data_length;
239 fu->bot_req_in->context = cmd;
240 ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
241 if (ret)
242 pr_err("%s(%d)\n", __func__, __LINE__);
243 return 0;
244}
245
246static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
247static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
248
249static int bot_send_write_request(struct usbg_cmd *cmd)
250{
251 struct f_uas *fu = cmd->fu;
252 struct se_cmd *se_cmd = &cmd->se_cmd;
253 struct usb_gadget *gadget = fuas_to_gadget(fu);
254 int ret;
255
256 init_completion(&cmd->write_complete);
257 cmd->fu = fu;
258
259 if (!cmd->data_len) {
260 cmd->csw_code = US_BULK_STAT_PHASE;
261 return -EINVAL;
262 }
263
264 if (!gadget->sg_supported) {
265 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
266 if (!cmd->data_buf)
267 return -ENOMEM;
268
269 fu->bot_req_out->buf = cmd->data_buf;
270 } else {
271 fu->bot_req_out->buf = NULL;
272 fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
273 fu->bot_req_out->sg = se_cmd->t_data_sg;
274 }
275
276 fu->bot_req_out->complete = usbg_data_write_cmpl;
277 fu->bot_req_out->length = se_cmd->data_length;
278 fu->bot_req_out->context = cmd;
279
280 ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
281 if (ret)
282 goto cleanup;
283 ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
284 if (ret)
285 pr_err("%s(%d)\n", __func__, __LINE__);
286
287 wait_for_completion(&cmd->write_complete);
288 target_execute_cmd(se_cmd);
289cleanup:
290 return ret;
291}
292
293static int bot_submit_command(struct f_uas *, void *, unsigned int);
294
295static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
296{
297 struct f_uas *fu = req->context;
298 int ret;
299
300 fu->flags &= ~USBG_BOT_CMD_PEND;
301
302 if (req->status < 0)
303 return;
304
305 ret = bot_submit_command(fu, req->buf, req->actual);
306 if (ret)
307 pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
308}
309
310static int bot_prepare_reqs(struct f_uas *fu)
311{
312 int ret;
313
314 fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
315 if (!fu->bot_req_in)
316 goto err;
317
318 fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
319 if (!fu->bot_req_out)
320 goto err_out;
321
322 fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
323 if (!fu->cmd.req)
324 goto err_cmd;
325
326 fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
327 if (!fu->bot_status.req)
328 goto err_sts;
329
330 fu->bot_status.req->buf = &fu->bot_status.csw;
331 fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
332 fu->bot_status.req->complete = bot_status_complete;
333 fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
334
335 fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
336 if (!fu->cmd.buf)
337 goto err_buf;
338
339 fu->cmd.req->complete = bot_cmd_complete;
340 fu->cmd.req->buf = fu->cmd.buf;
341 fu->cmd.req->length = fu->ep_out->maxpacket;
342 fu->cmd.req->context = fu;
343
344 ret = bot_enqueue_cmd_cbw(fu);
345 if (ret)
346 goto err_queue;
347 return 0;
348err_queue:
349 kfree(fu->cmd.buf);
350 fu->cmd.buf = NULL;
351err_buf:
352 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
353err_sts:
354 usb_ep_free_request(fu->ep_out, fu->cmd.req);
355 fu->cmd.req = NULL;
356err_cmd:
357 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
358 fu->bot_req_out = NULL;
359err_out:
360 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
361 fu->bot_req_in = NULL;
362err:
363 pr_err("BOT: endpoint setup failed\n");
364 return -ENOMEM;
365}
366
367static void bot_cleanup_old_alt(struct f_uas *fu)
368{
369 if (!(fu->flags & USBG_ENABLED))
370 return;
371
372 usb_ep_disable(fu->ep_in);
373 usb_ep_disable(fu->ep_out);
374
375 if (!fu->bot_req_in)
376 return;
377
378 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
379 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
380 usb_ep_free_request(fu->ep_out, fu->cmd.req);
381 usb_ep_free_request(fu->ep_out, fu->bot_status.req);
382
383 kfree(fu->cmd.buf);
384
385 fu->bot_req_in = NULL;
386 fu->bot_req_out = NULL;
387 fu->cmd.req = NULL;
388 fu->bot_status.req = NULL;
389 fu->cmd.buf = NULL;
390}
391
392static void bot_set_alt(struct f_uas *fu)
393{
394 struct usb_function *f = &fu->function;
395 struct usb_gadget *gadget = f->config->cdev->gadget;
396 int ret;
397
398 fu->flags = USBG_IS_BOT;
399
400 config_ep_by_speed(gadget, f, fu->ep_in);
401 ret = usb_ep_enable(fu->ep_in);
402 if (ret)
403 goto err_b_in;
404
405 config_ep_by_speed(gadget, f, fu->ep_out);
406 ret = usb_ep_enable(fu->ep_out);
407 if (ret)
408 goto err_b_out;
409
410 ret = bot_prepare_reqs(fu);
411 if (ret)
412 goto err_wq;
413 fu->flags |= USBG_ENABLED;
414 pr_info("Using the BOT protocol\n");
415 return;
416err_wq:
417 usb_ep_disable(fu->ep_out);
418err_b_out:
419 usb_ep_disable(fu->ep_in);
420err_b_in:
421 fu->flags = USBG_IS_BOT;
422}
423
424static int usbg_bot_setup(struct usb_function *f,
425 const struct usb_ctrlrequest *ctrl)
426{
427 struct f_uas *fu = to_f_uas(f);
428 struct usb_composite_dev *cdev = f->config->cdev;
429 u16 w_value = le16_to_cpu(ctrl->wValue);
430 u16 w_length = le16_to_cpu(ctrl->wLength);
431 int luns;
432 u8 *ret_lun;
433
434 switch (ctrl->bRequest) {
435 case US_BULK_GET_MAX_LUN:
436 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
437 USB_RECIP_INTERFACE))
438 return -ENOTSUPP;
439
440 if (w_length < 1)
441 return -EINVAL;
442 if (w_value != 0)
443 return -EINVAL;
444 luns = atomic_read(&fu->tpg->tpg_port_count);
445 if (!luns) {
446 pr_err("No LUNs configured?\n");
447 return -EINVAL;
448 }
449 /*
450 * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
451 * accessed. The upper limit is 0xf
452 */
453 luns--;
454 if (luns > 0xf) {
455 pr_info_once("Limiting the number of luns to 16\n");
456 luns = 0xf;
457 }
458 ret_lun = cdev->req->buf;
459 *ret_lun = luns;
460 cdev->req->length = 1;
461 return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
462 break;
463
464 case US_BULK_RESET_REQUEST:
465 /* XXX maybe we should remove previous requests for IN + OUT */
466 bot_enqueue_cmd_cbw(fu);
467 return 0;
468 break;
469 }
470 return -ENOTSUPP;
471}
472
473/* Start uas.c code */
474
475static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
476{
477 /* We have either all three allocated or none */
478 if (!stream->req_in)
479 return;
480
481 usb_ep_free_request(fu->ep_in, stream->req_in);
482 usb_ep_free_request(fu->ep_out, stream->req_out);
483 usb_ep_free_request(fu->ep_status, stream->req_status);
484
485 stream->req_in = NULL;
486 stream->req_out = NULL;
487 stream->req_status = NULL;
488}
489
490static void uasp_free_cmdreq(struct f_uas *fu)
491{
492 usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
493 kfree(fu->cmd.buf);
494 fu->cmd.req = NULL;
495 fu->cmd.buf = NULL;
496}
497
498static void uasp_cleanup_old_alt(struct f_uas *fu)
499{
500 int i;
501
502 if (!(fu->flags & USBG_ENABLED))
503 return;
504
505 usb_ep_disable(fu->ep_in);
506 usb_ep_disable(fu->ep_out);
507 usb_ep_disable(fu->ep_status);
508 usb_ep_disable(fu->ep_cmd);
509
510 for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
511 uasp_cleanup_one_stream(fu, &fu->stream[i]);
512 uasp_free_cmdreq(fu);
513}
514
515static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
516
517static int uasp_prepare_r_request(struct usbg_cmd *cmd)
518{
519 struct se_cmd *se_cmd = &cmd->se_cmd;
520 struct f_uas *fu = cmd->fu;
521 struct usb_gadget *gadget = fuas_to_gadget(fu);
522 struct uas_stream *stream = cmd->stream;
523
524 if (!gadget->sg_supported) {
525 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
526 if (!cmd->data_buf)
527 return -ENOMEM;
528
529 sg_copy_to_buffer(se_cmd->t_data_sg,
530 se_cmd->t_data_nents,
531 cmd->data_buf,
532 se_cmd->data_length);
533
534 stream->req_in->buf = cmd->data_buf;
535 } else {
536 stream->req_in->buf = NULL;
537 stream->req_in->num_sgs = se_cmd->t_data_nents;
538 stream->req_in->sg = se_cmd->t_data_sg;
539 }
540
541 stream->req_in->complete = uasp_status_data_cmpl;
542 stream->req_in->length = se_cmd->data_length;
543 stream->req_in->context = cmd;
544
545 cmd->state = UASP_SEND_STATUS;
546 return 0;
547}
548
549static void uasp_prepare_status(struct usbg_cmd *cmd)
550{
551 struct se_cmd *se_cmd = &cmd->se_cmd;
552 struct sense_iu *iu = &cmd->sense_iu;
553 struct uas_stream *stream = cmd->stream;
554
555 cmd->state = UASP_QUEUE_COMMAND;
556 iu->iu_id = IU_ID_STATUS;
557 iu->tag = cpu_to_be16(cmd->tag);
558
559 /*
560 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
561 */
562 iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
563 iu->status = se_cmd->scsi_status;
564 stream->req_status->context = cmd;
565 stream->req_status->length = se_cmd->scsi_sense_length + 16;
566 stream->req_status->buf = iu;
567 stream->req_status->complete = uasp_status_data_cmpl;
568}
569
570static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
571{
572 struct usbg_cmd *cmd = req->context;
573 struct uas_stream *stream = cmd->stream;
574 struct f_uas *fu = cmd->fu;
575 int ret;
576
577 if (req->status < 0)
578 goto cleanup;
579
580 switch (cmd->state) {
581 case UASP_SEND_DATA:
582 ret = uasp_prepare_r_request(cmd);
583 if (ret)
584 goto cleanup;
585 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
586 if (ret)
587 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
588 break;
589
590 case UASP_RECEIVE_DATA:
591 ret = usbg_prepare_w_request(cmd, stream->req_out);
592 if (ret)
593 goto cleanup;
594 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
595 if (ret)
596 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
597 break;
598
599 case UASP_SEND_STATUS:
600 uasp_prepare_status(cmd);
601 ret = usb_ep_queue(fu->ep_status, stream->req_status,
602 GFP_ATOMIC);
603 if (ret)
604 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
605 break;
606
607 case UASP_QUEUE_COMMAND:
608 usbg_cleanup_cmd(cmd);
609 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
610 break;
611
612 default:
613 BUG();
614 }
615 return;
616
617cleanup:
618 usbg_cleanup_cmd(cmd);
619}
620
621static int uasp_send_status_response(struct usbg_cmd *cmd)
622{
623 struct f_uas *fu = cmd->fu;
624 struct uas_stream *stream = cmd->stream;
625 struct sense_iu *iu = &cmd->sense_iu;
626
627 iu->tag = cpu_to_be16(cmd->tag);
628 stream->req_status->complete = uasp_status_data_cmpl;
629 stream->req_status->context = cmd;
630 cmd->fu = fu;
631 uasp_prepare_status(cmd);
632 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
633}
634
635static int uasp_send_read_response(struct usbg_cmd *cmd)
636{
637 struct f_uas *fu = cmd->fu;
638 struct uas_stream *stream = cmd->stream;
639 struct sense_iu *iu = &cmd->sense_iu;
640 int ret;
641
642 cmd->fu = fu;
643
644 iu->tag = cpu_to_be16(cmd->tag);
645 if (fu->flags & USBG_USE_STREAMS) {
646
647 ret = uasp_prepare_r_request(cmd);
648 if (ret)
649 goto out;
650 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
651 if (ret) {
652 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
653 kfree(cmd->data_buf);
654 cmd->data_buf = NULL;
655 }
656
657 } else {
658
659 iu->iu_id = IU_ID_READ_READY;
660 iu->tag = cpu_to_be16(cmd->tag);
661
662 stream->req_status->complete = uasp_status_data_cmpl;
663 stream->req_status->context = cmd;
664
665 cmd->state = UASP_SEND_DATA;
666 stream->req_status->buf = iu;
667 stream->req_status->length = sizeof(struct iu);
668
669 ret = usb_ep_queue(fu->ep_status, stream->req_status,
670 GFP_ATOMIC);
671 if (ret)
672 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
673 }
674out:
675 return ret;
676}
677
678static int uasp_send_write_request(struct usbg_cmd *cmd)
679{
680 struct f_uas *fu = cmd->fu;
681 struct se_cmd *se_cmd = &cmd->se_cmd;
682 struct uas_stream *stream = cmd->stream;
683 struct sense_iu *iu = &cmd->sense_iu;
684 int ret;
685
686 init_completion(&cmd->write_complete);
687 cmd->fu = fu;
688
689 iu->tag = cpu_to_be16(cmd->tag);
690
691 if (fu->flags & USBG_USE_STREAMS) {
692
693 ret = usbg_prepare_w_request(cmd, stream->req_out);
694 if (ret)
695 goto cleanup;
696 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
697 if (ret)
698 pr_err("%s(%d)\n", __func__, __LINE__);
699
700 } else {
701
702 iu->iu_id = IU_ID_WRITE_READY;
703 iu->tag = cpu_to_be16(cmd->tag);
704
705 stream->req_status->complete = uasp_status_data_cmpl;
706 stream->req_status->context = cmd;
707
708 cmd->state = UASP_RECEIVE_DATA;
709 stream->req_status->buf = iu;
710 stream->req_status->length = sizeof(struct iu);
711
712 ret = usb_ep_queue(fu->ep_status, stream->req_status,
713 GFP_ATOMIC);
714 if (ret)
715 pr_err("%s(%d)\n", __func__, __LINE__);
716 }
717
718 wait_for_completion(&cmd->write_complete);
719 target_execute_cmd(se_cmd);
720cleanup:
721 return ret;
722}
723
724static int usbg_submit_command(struct f_uas *, void *, unsigned int);
725
726static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
727{
728 struct f_uas *fu = req->context;
729 int ret;
730
731 if (req->status < 0)
732 return;
733
734 ret = usbg_submit_command(fu, req->buf, req->actual);
735 /*
736 * Once we tune for performance enqueue the command req here again so
737 * we can receive a second command while we processing this one. Pay
738 * attention to properly sync STAUS endpoint with DATA IN + OUT so you
739 * don't break HS.
740 */
741 if (!ret)
742 return;
743 usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
744}
745
746static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
747{
748 stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
749 if (!stream->req_in)
750 goto out;
751
752 stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
753 if (!stream->req_out)
754 goto err_out;
755
756 stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
757 if (!stream->req_status)
758 goto err_sts;
759
760 return 0;
761err_sts:
762 usb_ep_free_request(fu->ep_status, stream->req_status);
763 stream->req_status = NULL;
764err_out:
765 usb_ep_free_request(fu->ep_out, stream->req_out);
766 stream->req_out = NULL;
767out:
768 return -ENOMEM;
769}
770
771static int uasp_alloc_cmd(struct f_uas *fu)
772{
773 fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
774 if (!fu->cmd.req)
775 goto err;
776
777 fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
778 if (!fu->cmd.buf)
779 goto err_buf;
780
781 fu->cmd.req->complete = uasp_cmd_complete;
782 fu->cmd.req->buf = fu->cmd.buf;
783 fu->cmd.req->length = fu->ep_cmd->maxpacket;
784 fu->cmd.req->context = fu;
785 return 0;
786
787err_buf:
788 usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
789err:
790 return -ENOMEM;
791}
792
793static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
794{
795 int i;
796
797 for (i = 0; i < max_streams; i++) {
798 struct uas_stream *s = &fu->stream[i];
799
800 s->req_in->stream_id = i + 1;
801 s->req_out->stream_id = i + 1;
802 s->req_status->stream_id = i + 1;
803 }
804}
805
806static int uasp_prepare_reqs(struct f_uas *fu)
807{
808 int ret;
809 int i;
810 int max_streams;
811
812 if (fu->flags & USBG_USE_STREAMS)
813 max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
814 else
815 max_streams = 1;
816
817 for (i = 0; i < max_streams; i++) {
818 ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
819 if (ret)
820 goto err_cleanup;
821 }
822
823 ret = uasp_alloc_cmd(fu);
824 if (ret)
825 goto err_free_stream;
826 uasp_setup_stream_res(fu, max_streams);
827
828 ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
829 if (ret)
830 goto err_free_stream;
831
832 return 0;
833
834err_free_stream:
835 uasp_free_cmdreq(fu);
836
837err_cleanup:
838 if (i) {
839 do {
840 uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
841 i--;
842 } while (i);
843 }
844 pr_err("UASP: endpoint setup failed\n");
845 return ret;
846}
847
848static void uasp_set_alt(struct f_uas *fu)
849{
850 struct usb_function *f = &fu->function;
851 struct usb_gadget *gadget = f->config->cdev->gadget;
852 int ret;
853
854 fu->flags = USBG_IS_UAS;
855
856 if (gadget->speed == USB_SPEED_SUPER)
857 fu->flags |= USBG_USE_STREAMS;
858
859 config_ep_by_speed(gadget, f, fu->ep_in);
860 ret = usb_ep_enable(fu->ep_in);
861 if (ret)
862 goto err_b_in;
863
864 config_ep_by_speed(gadget, f, fu->ep_out);
865 ret = usb_ep_enable(fu->ep_out);
866 if (ret)
867 goto err_b_out;
868
869 config_ep_by_speed(gadget, f, fu->ep_cmd);
870 ret = usb_ep_enable(fu->ep_cmd);
871 if (ret)
872 goto err_cmd;
873 config_ep_by_speed(gadget, f, fu->ep_status);
874 ret = usb_ep_enable(fu->ep_status);
875 if (ret)
876 goto err_status;
877
878 ret = uasp_prepare_reqs(fu);
879 if (ret)
880 goto err_wq;
881 fu->flags |= USBG_ENABLED;
882
883 pr_info("Using the UAS protocol\n");
884 return;
885err_wq:
886 usb_ep_disable(fu->ep_status);
887err_status:
888 usb_ep_disable(fu->ep_cmd);
889err_cmd:
890 usb_ep_disable(fu->ep_out);
891err_b_out:
892 usb_ep_disable(fu->ep_in);
893err_b_in:
894 fu->flags = 0;
895}
896
897static int get_cmd_dir(const unsigned char *cdb)
898{
899 int ret;
900
901 switch (cdb[0]) {
902 case READ_6:
903 case READ_10:
904 case READ_12:
905 case READ_16:
906 case INQUIRY:
907 case MODE_SENSE:
908 case MODE_SENSE_10:
909 case SERVICE_ACTION_IN_16:
910 case MAINTENANCE_IN:
911 case PERSISTENT_RESERVE_IN:
912 case SECURITY_PROTOCOL_IN:
913 case ACCESS_CONTROL_IN:
914 case REPORT_LUNS:
915 case READ_BLOCK_LIMITS:
916 case READ_POSITION:
917 case READ_CAPACITY:
918 case READ_TOC:
919 case READ_FORMAT_CAPACITIES:
920 case REQUEST_SENSE:
921 ret = DMA_FROM_DEVICE;
922 break;
923
924 case WRITE_6:
925 case WRITE_10:
926 case WRITE_12:
927 case WRITE_16:
928 case MODE_SELECT:
929 case MODE_SELECT_10:
930 case WRITE_VERIFY:
931 case WRITE_VERIFY_12:
932 case PERSISTENT_RESERVE_OUT:
933 case MAINTENANCE_OUT:
934 case SECURITY_PROTOCOL_OUT:
935 case ACCESS_CONTROL_OUT:
936 ret = DMA_TO_DEVICE;
937 break;
938 case ALLOW_MEDIUM_REMOVAL:
939 case TEST_UNIT_READY:
940 case SYNCHRONIZE_CACHE:
941 case START_STOP:
942 case ERASE:
943 case REZERO_UNIT:
944 case SEEK_10:
945 case SPACE:
946 case VERIFY:
947 case WRITE_FILEMARKS:
948 ret = DMA_NONE;
949 break;
950 default:
951 pr_warn("target: Unknown data direction for SCSI Opcode "
952 "0x%02x\n", cdb[0]);
953 ret = -EINVAL;
954 }
955 return ret;
956}
957
958static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
959{
960 struct usbg_cmd *cmd = req->context;
961 struct se_cmd *se_cmd = &cmd->se_cmd;
962
963 if (req->status < 0) {
964 pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
965 goto cleanup;
966 }
967
968 if (req->num_sgs == 0) {
969 sg_copy_from_buffer(se_cmd->t_data_sg,
970 se_cmd->t_data_nents,
971 cmd->data_buf,
972 se_cmd->data_length);
973 }
974
975 complete(&cmd->write_complete);
976 return;
977
978cleanup:
979 usbg_cleanup_cmd(cmd);
980}
981
982static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
983{
984 struct se_cmd *se_cmd = &cmd->se_cmd;
985 struct f_uas *fu = cmd->fu;
986 struct usb_gadget *gadget = fuas_to_gadget(fu);
987
988 if (!gadget->sg_supported) {
989 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
990 if (!cmd->data_buf)
991 return -ENOMEM;
992
993 req->buf = cmd->data_buf;
994 } else {
995 req->buf = NULL;
996 req->num_sgs = se_cmd->t_data_nents;
997 req->sg = se_cmd->t_data_sg;
998 }
999
1000 req->complete = usbg_data_write_cmpl;
1001 req->length = se_cmd->data_length;
1002 req->context = cmd;
1003 return 0;
1004}
1005
1006static int usbg_send_status_response(struct se_cmd *se_cmd)
1007{
1008 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1009 se_cmd);
1010 struct f_uas *fu = cmd->fu;
1011
1012 if (fu->flags & USBG_IS_BOT)
1013 return bot_send_status_response(cmd);
1014 else
1015 return uasp_send_status_response(cmd);
1016}
1017
1018static int usbg_send_write_request(struct se_cmd *se_cmd)
1019{
1020 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1021 se_cmd);
1022 struct f_uas *fu = cmd->fu;
1023
1024 if (fu->flags & USBG_IS_BOT)
1025 return bot_send_write_request(cmd);
1026 else
1027 return uasp_send_write_request(cmd);
1028}
1029
1030static int usbg_send_read_response(struct se_cmd *se_cmd)
1031{
1032 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1033 se_cmd);
1034 struct f_uas *fu = cmd->fu;
1035
1036 if (fu->flags & USBG_IS_BOT)
1037 return bot_send_read_response(cmd);
1038 else
1039 return uasp_send_read_response(cmd);
1040}
1041
1042static void usbg_cmd_work(struct work_struct *work)
1043{
1044 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1045 struct se_cmd *se_cmd;
1046 struct tcm_usbg_nexus *tv_nexus;
1047 struct usbg_tpg *tpg;
1048 int dir;
1049
1050 se_cmd = &cmd->se_cmd;
1051 tpg = cmd->fu->tpg;
1052 tv_nexus = tpg->tpg_nexus;
1053 dir = get_cmd_dir(cmd->cmd_buf);
1054 if (dir < 0) {
1055 transport_init_se_cmd(se_cmd,
1056 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1057 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1058 cmd->prio_attr, cmd->sense_iu.sense);
1059 goto out;
1060 }
1061
1062 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1063 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1064 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
1065 goto out;
1066
1067 return;
1068
1069out:
1070 transport_send_check_condition_and_sense(se_cmd,
1071 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1072 usbg_cleanup_cmd(cmd);
1073}
1074
1075static int usbg_submit_command(struct f_uas *fu,
1076 void *cmdbuf, unsigned int len)
1077{
1078 struct command_iu *cmd_iu = cmdbuf;
1079 struct usbg_cmd *cmd;
1080 struct usbg_tpg *tpg;
1081 struct se_cmd *se_cmd;
1082 struct tcm_usbg_nexus *tv_nexus;
1083 u32 cmd_len;
1084 int ret;
1085
1086 if (cmd_iu->iu_id != IU_ID_COMMAND) {
1087 pr_err("Unsupported type %d\n", cmd_iu->iu_id);
1088 return -EINVAL;
1089 }
1090
1091 cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
1092 if (!cmd)
1093 return -ENOMEM;
1094
1095 cmd->fu = fu;
1096
1097 /* XXX until I figure out why I can't free in on complete */
1098 kref_init(&cmd->ref);
1099 kref_get(&cmd->ref);
1100
1101 tpg = fu->tpg;
1102 cmd_len = (cmd_iu->len & ~0x3) + 16;
1103 if (cmd_len > USBG_MAX_CMD)
1104 goto err;
1105
1106 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1107
1108 cmd->tag = be16_to_cpup(&cmd_iu->tag);
1109 cmd->se_cmd.tag = cmd->tag;
1110 if (fu->flags & USBG_USE_STREAMS) {
1111 if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
1112 goto err;
1113 if (!cmd->tag)
1114 cmd->stream = &fu->stream[0];
1115 else
1116 cmd->stream = &fu->stream[cmd->tag - 1];
1117 } else {
1118 cmd->stream = &fu->stream[0];
1119 }
1120
1121 tv_nexus = tpg->tpg_nexus;
1122 if (!tv_nexus) {
1123 pr_err("Missing nexus, ignoring command\n");
1124 goto err;
1125 }
1126
1127 switch (cmd_iu->prio_attr & 0x7) {
1128 case UAS_HEAD_TAG:
1129 cmd->prio_attr = TCM_HEAD_TAG;
1130 break;
1131 case UAS_ORDERED_TAG:
1132 cmd->prio_attr = TCM_ORDERED_TAG;
1133 break;
1134 case UAS_ACA:
1135 cmd->prio_attr = TCM_ACA_TAG;
1136 break;
1137 default:
1138 pr_debug_once("Unsupported prio_attr: %02x.\n",
1139 cmd_iu->prio_attr);
1140 case UAS_SIMPLE_TAG:
1141 cmd->prio_attr = TCM_SIMPLE_TAG;
1142 break;
1143 }
1144
1145 se_cmd = &cmd->se_cmd;
1146 cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
1147
1148 INIT_WORK(&cmd->work, usbg_cmd_work);
1149 ret = queue_work(tpg->workqueue, &cmd->work);
1150 if (ret < 0)
1151 goto err;
1152
1153 return 0;
1154err:
1155 kfree(cmd);
1156 return -EINVAL;
1157}
1158
1159static void bot_cmd_work(struct work_struct *work)
1160{
1161 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1162 struct se_cmd *se_cmd;
1163 struct tcm_usbg_nexus *tv_nexus;
1164 struct usbg_tpg *tpg;
1165 int dir;
1166
1167 se_cmd = &cmd->se_cmd;
1168 tpg = cmd->fu->tpg;
1169 tv_nexus = tpg->tpg_nexus;
1170 dir = get_cmd_dir(cmd->cmd_buf);
1171 if (dir < 0) {
1172 transport_init_se_cmd(se_cmd,
1173 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1174 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1175 cmd->prio_attr, cmd->sense_iu.sense);
1176 goto out;
1177 }
1178
1179 if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1180 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1181 cmd->data_len, cmd->prio_attr, dir, 0) < 0)
1182 goto out;
1183
1184 return;
1185
1186out:
1187 transport_send_check_condition_and_sense(se_cmd,
1188 TCM_UNSUPPORTED_SCSI_OPCODE, 1);
1189 usbg_cleanup_cmd(cmd);
1190}
1191
1192static int bot_submit_command(struct f_uas *fu,
1193 void *cmdbuf, unsigned int len)
1194{
1195 struct bulk_cb_wrap *cbw = cmdbuf;
1196 struct usbg_cmd *cmd;
1197 struct usbg_tpg *tpg;
1198 struct se_cmd *se_cmd;
1199 struct tcm_usbg_nexus *tv_nexus;
1200 u32 cmd_len;
1201 int ret;
1202
1203 if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
1204 pr_err("Wrong signature on CBW\n");
1205 return -EINVAL;
1206 }
1207 if (len != 31) {
1208 pr_err("Wrong length for CBW\n");
1209 return -EINVAL;
1210 }
1211
1212 cmd_len = cbw->Length;
1213 if (cmd_len < 1 || cmd_len > 16)
1214 return -EINVAL;
1215
1216 cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
1217 if (!cmd)
1218 return -ENOMEM;
1219
1220 cmd->fu = fu;
1221
1222 /* XXX until I figure out why I can't free in on complete */
1223 kref_init(&cmd->ref);
1224 kref_get(&cmd->ref);
1225
1226 tpg = fu->tpg;
1227
1228 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1229
1230 cmd->bot_tag = cbw->Tag;
1231
1232 tv_nexus = tpg->tpg_nexus;
1233 if (!tv_nexus) {
1234 pr_err("Missing nexus, ignoring command\n");
1235 goto err;
1236 }
1237
1238 cmd->prio_attr = TCM_SIMPLE_TAG;
1239 se_cmd = &cmd->se_cmd;
1240 cmd->unpacked_lun = cbw->Lun;
1241 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
1242 cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
1243 cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
1244
1245 INIT_WORK(&cmd->work, bot_cmd_work);
1246 ret = queue_work(tpg->workqueue, &cmd->work);
1247 if (ret < 0)
1248 goto err;
1249
1250 return 0;
1251err:
1252 kfree(cmd);
1253 return -EINVAL;
1254}
1255
1256/* Start fabric.c code */
1257
1258static int usbg_check_true(struct se_portal_group *se_tpg)
1259{
1260 return 1;
1261}
1262
1263static int usbg_check_false(struct se_portal_group *se_tpg)
1264{
1265 return 0;
1266}
1267
1268static char *usbg_get_fabric_name(void)
1269{
1270 return "usb_gadget";
1271}
1272
1273static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1274{
1275 struct usbg_tpg *tpg = container_of(se_tpg,
1276 struct usbg_tpg, se_tpg);
1277 struct usbg_tport *tport = tpg->tport;
1278
1279 return &tport->tport_name[0];
1280}
1281
1282static u16 usbg_get_tag(struct se_portal_group *se_tpg)
1283{
1284 struct usbg_tpg *tpg = container_of(se_tpg,
1285 struct usbg_tpg, se_tpg);
1286 return tpg->tport_tpgt;
1287}
1288
1289static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
1290{
1291 return 1;
1292}
1293
1294static void usbg_cmd_release(struct kref *ref)
1295{
1296 struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
1297 ref);
1298
1299 transport_generic_free_cmd(&cmd->se_cmd, 0);
1300}
1301
1302static void usbg_release_cmd(struct se_cmd *se_cmd)
1303{
1304 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1305 se_cmd);
1306 kfree(cmd->data_buf);
1307 kfree(cmd);
1308 return;
1309}
1310
1311static int usbg_shutdown_session(struct se_session *se_sess)
1312{
1313 return 0;
1314}
1315
1316static void usbg_close_session(struct se_session *se_sess)
1317{
1318 return;
1319}
1320
1321static u32 usbg_sess_get_index(struct se_session *se_sess)
1322{
1323 return 0;
1324}
1325
1326/*
1327 * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
1328 */
1329static int usbg_write_pending_status(struct se_cmd *se_cmd)
1330{
1331 return 0;
1332}
1333
1334static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
1335{
1336 return;
1337}
1338
1339static int usbg_get_cmd_state(struct se_cmd *se_cmd)
1340{
1341 return 0;
1342}
1343
1344static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1345{
1346}
1347
1348static void usbg_aborted_task(struct se_cmd *se_cmd)
1349{
1350 return;
1351}
1352
1353static const char *usbg_check_wwn(const char *name)
1354{
1355 const char *n;
1356 unsigned int len;
1357
1358 n = strstr(name, "naa.");
1359 if (!n)
1360 return NULL;
1361 n += 4;
1362 len = strlen(n);
1363 if (len == 0 || len > USBG_NAMELEN - 1)
1364 return NULL;
1365 return n;
1366}
1367
1368static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1369{
1370 if (!usbg_check_wwn(name))
1371 return -EINVAL;
1372 return 0;
1373}
1374
1375struct usbg_tpg *the_only_tpg_I_currently_have;
1376
1377static struct se_portal_group *usbg_make_tpg(
1378 struct se_wwn *wwn,
1379 struct config_group *group,
1380 const char *name)
1381{
1382 struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
1383 tport_wwn);
1384 struct usbg_tpg *tpg;
1385 unsigned long tpgt;
1386 int ret;
1387
1388 if (strstr(name, "tpgt_") != name)
1389 return ERR_PTR(-EINVAL);
1390 if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
1391 return ERR_PTR(-EINVAL);
1392 if (the_only_tpg_I_currently_have) {
1393 pr_err("Until the gadget framework can't handle multiple\n");
1394 pr_err("gadgets, you can't do this here.\n");
1395 return ERR_PTR(-EBUSY);
1396 }
1397
1398 tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
1399 if (!tpg)
1400 return ERR_PTR(-ENOMEM);
1401 mutex_init(&tpg->tpg_mutex);
1402 atomic_set(&tpg->tpg_port_count, 0);
1403 tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
1404 if (!tpg->workqueue) {
1405 kfree(tpg);
1406 return NULL;
1407 }
1408
1409 tpg->tport = tport;
1410 tpg->tport_tpgt = tpgt;
1411
1412 /*
1413 * SPC doesn't assign a protocol identifier for USB-SCSI, so we
1414 * pretend to be SAS..
1415 */
1416 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
1417 if (ret < 0) {
1418 destroy_workqueue(tpg->workqueue);
1419 kfree(tpg);
1420 return NULL;
1421 }
1422 the_only_tpg_I_currently_have = tpg;
1423 return &tpg->se_tpg;
1424}
1425
1426static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1427{
1428 struct usbg_tpg *tpg = container_of(se_tpg,
1429 struct usbg_tpg, se_tpg);
1430
1431 core_tpg_deregister(se_tpg);
1432 destroy_workqueue(tpg->workqueue);
1433 kfree(tpg);
1434 the_only_tpg_I_currently_have = NULL;
1435}
1436
1437static struct se_wwn *usbg_make_tport(
1438 struct target_fabric_configfs *tf,
1439 struct config_group *group,
1440 const char *name)
1441{
1442 struct usbg_tport *tport;
1443 const char *wnn_name;
1444 u64 wwpn = 0;
1445
1446 wnn_name = usbg_check_wwn(name);
1447 if (!wnn_name)
1448 return ERR_PTR(-EINVAL);
1449
1450 tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
1451 if (!(tport))
1452 return ERR_PTR(-ENOMEM);
1453 tport->tport_wwpn = wwpn;
1454 snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
1455 return &tport->tport_wwn;
1456}
1457
1458static void usbg_drop_tport(struct se_wwn *wwn)
1459{
1460 struct usbg_tport *tport = container_of(wwn,
1461 struct usbg_tport, tport_wwn);
1462 kfree(tport);
1463}
1464
1465/*
1466 * If somebody feels like dropping the version property, go ahead.
1467 */
1468static ssize_t usbg_wwn_version_show(struct config_item *item, char *page)
1469{
1470 return sprintf(page, "usb-gadget fabric module\n");
1471}
1472
1473CONFIGFS_ATTR_RO(usbg_wwn_, version);
1474
1475static struct configfs_attribute *usbg_wwn_attrs[] = {
1476 &usbg_wwn_attr_version,
1477 NULL,
1478};
1479
1480static ssize_t tcm_usbg_tpg_enable_show(struct config_item *item, char *page)
1481{
1482 struct se_portal_group *se_tpg = to_tpg(item);
1483 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1484
1485 return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
1486}
1487
1488static int usbg_attach(struct usbg_tpg *);
1489static void usbg_detach(struct usbg_tpg *);
1490
1491static ssize_t tcm_usbg_tpg_enable_store(struct config_item *item,
1492 const char *page, size_t count)
1493{
1494 struct se_portal_group *se_tpg = to_tpg(item);
1495 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1496 unsigned long op;
1497 ssize_t ret;
1498
1499 ret = kstrtoul(page, 0, &op);
1500 if (ret < 0)
1501 return -EINVAL;
1502 if (op > 1)
1503 return -EINVAL;
1504
1505 if (op && tpg->gadget_connect)
1506 goto out;
1507 if (!op && !tpg->gadget_connect)
1508 goto out;
1509
1510 if (op) {
1511 ret = usbg_attach(tpg);
1512 if (ret)
1513 goto out;
1514 } else {
1515 usbg_detach(tpg);
1516 }
1517 tpg->gadget_connect = op;
1518out:
1519 return count;
1520}
1521
1522static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
1523{
1524 struct se_portal_group *se_tpg = to_tpg(item);
1525 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1526 struct tcm_usbg_nexus *tv_nexus;
1527 ssize_t ret;
1528
1529 mutex_lock(&tpg->tpg_mutex);
1530 tv_nexus = tpg->tpg_nexus;
1531 if (!tv_nexus) {
1532 ret = -ENODEV;
1533 goto out;
1534 }
1535 ret = snprintf(page, PAGE_SIZE, "%s\n",
1536 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1537out:
1538 mutex_unlock(&tpg->tpg_mutex);
1539 return ret;
1540}
1541
1542static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1543{
1544 struct se_portal_group *se_tpg;
1545 struct tcm_usbg_nexus *tv_nexus;
1546 int ret;
1547
1548 mutex_lock(&tpg->tpg_mutex);
1549 if (tpg->tpg_nexus) {
1550 ret = -EEXIST;
1551 pr_debug("tpg->tpg_nexus already exists\n");
1552 goto err_unlock;
1553 }
1554 se_tpg = &tpg->se_tpg;
1555
1556 ret = -ENOMEM;
1557 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1558 if (!tv_nexus)
1559 goto err_unlock;
1560 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
1561 if (IS_ERR(tv_nexus->tvn_se_sess))
1562 goto err_free;
1563
1564 /*
1565 * Since we are running in 'demo mode' this call with generate a
1566 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1567 * the SCSI Initiator port name of the passed configfs group 'name'.
1568 */
1569 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1570 se_tpg, name);
1571 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1572 pr_debug("core_tpg_check_initiator_node_acl() failed"
1573 " for %s\n", name);
1574 goto err_session;
1575 }
1576 /*
1577 * Now register the TCM vHost virtual I_T Nexus as active.
1578 */
1579 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1580 tv_nexus->tvn_se_sess, tv_nexus);
1581 tpg->tpg_nexus = tv_nexus;
1582 mutex_unlock(&tpg->tpg_mutex);
1583 return 0;
1584
1585err_session:
1586 transport_free_session(tv_nexus->tvn_se_sess);
1587err_free:
1588 kfree(tv_nexus);
1589err_unlock:
1590 mutex_unlock(&tpg->tpg_mutex);
1591 return ret;
1592}
1593
1594static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
1595{
1596 struct se_session *se_sess;
1597 struct tcm_usbg_nexus *tv_nexus;
1598 int ret = -ENODEV;
1599
1600 mutex_lock(&tpg->tpg_mutex);
1601 tv_nexus = tpg->tpg_nexus;
1602 if (!tv_nexus)
1603 goto out;
1604
1605 se_sess = tv_nexus->tvn_se_sess;
1606 if (!se_sess)
1607 goto out;
1608
1609 if (atomic_read(&tpg->tpg_port_count)) {
1610 ret = -EPERM;
1611 pr_err("Unable to remove Host I_T Nexus with"
1612 " active TPG port count: %d\n",
1613 atomic_read(&tpg->tpg_port_count));
1614 goto out;
1615 }
1616
1617 pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
1618 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1619 /*
1620 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1621 */
1622 transport_deregister_session(tv_nexus->tvn_se_sess);
1623 tpg->tpg_nexus = NULL;
1624
1625 kfree(tv_nexus);
1626 ret = 0;
1627out:
1628 mutex_unlock(&tpg->tpg_mutex);
1629 return ret;
1630}
1631
1632static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
1633 const char *page, size_t count)
1634{
1635 struct se_portal_group *se_tpg = to_tpg(item);
1636 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1637 unsigned char i_port[USBG_NAMELEN], *ptr;
1638 int ret;
1639
1640 if (!strncmp(page, "NULL", 4)) {
1641 ret = tcm_usbg_drop_nexus(tpg);
1642 return (!ret) ? count : ret;
1643 }
1644 if (strlen(page) >= USBG_NAMELEN) {
1645 pr_err("Emulated NAA Sas Address: %s, exceeds"
1646 " max: %d\n", page, USBG_NAMELEN);
1647 return -EINVAL;
1648 }
1649 snprintf(i_port, USBG_NAMELEN, "%s", page);
1650
1651 ptr = strstr(i_port, "naa.");
1652 if (!ptr) {
1653 pr_err("Missing 'naa.' prefix\n");
1654 return -EINVAL;
1655 }
1656
1657 if (i_port[strlen(i_port) - 1] == '\n')
1658 i_port[strlen(i_port) - 1] = '\0';
1659
1660 ret = tcm_usbg_make_nexus(tpg, &i_port[4]);
1661 if (ret < 0)
1662 return ret;
1663 return count;
1664}
1665
1666CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
1667CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
1668
1669static struct configfs_attribute *usbg_base_attrs[] = {
1670 &tcm_usbg_tpg_attr_enable,
1671 &tcm_usbg_tpg_attr_nexus,
1672 NULL,
1673};
1674
1675static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
1676{
1677 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1678
1679 atomic_inc(&tpg->tpg_port_count);
1680 smp_mb__after_atomic();
1681 return 0;
1682}
1683
1684static void usbg_port_unlink(struct se_portal_group *se_tpg,
1685 struct se_lun *se_lun)
1686{
1687 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1688
1689 atomic_dec(&tpg->tpg_port_count);
1690 smp_mb__after_atomic();
1691}
1692
1693static int usbg_check_stop_free(struct se_cmd *se_cmd)
1694{
1695 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1696 se_cmd);
1697
1698 kref_put(&cmd->ref, usbg_cmd_release);
1699 return 1;
1700}
1701
1702static const struct target_core_fabric_ops usbg_ops = {
1703 .module = THIS_MODULE,
1704 .name = "usb_gadget",
1705 .get_fabric_name = usbg_get_fabric_name,
1706 .tpg_get_wwn = usbg_get_fabric_wwn,
1707 .tpg_get_tag = usbg_get_tag,
1708 .tpg_check_demo_mode = usbg_check_true,
1709 .tpg_check_demo_mode_cache = usbg_check_false,
1710 .tpg_check_demo_mode_write_protect = usbg_check_false,
1711 .tpg_check_prod_mode_write_protect = usbg_check_false,
1712 .tpg_get_inst_index = usbg_tpg_get_inst_index,
1713 .release_cmd = usbg_release_cmd,
1714 .shutdown_session = usbg_shutdown_session,
1715 .close_session = usbg_close_session,
1716 .sess_get_index = usbg_sess_get_index,
1717 .sess_get_initiator_sid = NULL,
1718 .write_pending = usbg_send_write_request,
1719 .write_pending_status = usbg_write_pending_status,
1720 .set_default_node_attributes = usbg_set_default_node_attrs,
1721 .get_cmd_state = usbg_get_cmd_state,
1722 .queue_data_in = usbg_send_read_response,
1723 .queue_status = usbg_send_status_response,
1724 .queue_tm_rsp = usbg_queue_tm_rsp,
1725 .aborted_task = usbg_aborted_task,
1726 .check_stop_free = usbg_check_stop_free,
1727
1728 .fabric_make_wwn = usbg_make_tport,
1729 .fabric_drop_wwn = usbg_drop_tport,
1730 .fabric_make_tpg = usbg_make_tpg,
1731 .fabric_drop_tpg = usbg_drop_tpg,
1732 .fabric_post_link = usbg_port_link,
1733 .fabric_pre_unlink = usbg_port_unlink,
1734 .fabric_init_nodeacl = usbg_init_nodeacl,
1735
1736 .tfc_wwn_attrs = usbg_wwn_attrs,
1737 .tfc_tpg_base_attrs = usbg_base_attrs,
1738};
1739
1740/* Start gadget.c code */
1741
1742static struct usb_interface_descriptor bot_intf_desc = {
1743 .bLength = sizeof(bot_intf_desc),
1744 .bDescriptorType = USB_DT_INTERFACE,
1745 .bNumEndpoints = 2,
1746 .bAlternateSetting = USB_G_ALT_INT_BBB,
1747 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
1748 .bInterfaceSubClass = USB_SC_SCSI,
1749 .bInterfaceProtocol = USB_PR_BULK,
1750};
1751
1752static struct usb_interface_descriptor uasp_intf_desc = {
1753 .bLength = sizeof(uasp_intf_desc),
1754 .bDescriptorType = USB_DT_INTERFACE,
1755 .bNumEndpoints = 4,
1756 .bAlternateSetting = USB_G_ALT_INT_UAS,
1757 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
1758 .bInterfaceSubClass = USB_SC_SCSI,
1759 .bInterfaceProtocol = USB_PR_UAS,
1760};
1761
1762static struct usb_endpoint_descriptor uasp_bi_desc = {
1763 .bLength = USB_DT_ENDPOINT_SIZE,
1764 .bDescriptorType = USB_DT_ENDPOINT,
1765 .bEndpointAddress = USB_DIR_IN,
1766 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1767 .wMaxPacketSize = cpu_to_le16(512),
1768};
1769
1770static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
1771 .bLength = USB_DT_ENDPOINT_SIZE,
1772 .bDescriptorType = USB_DT_ENDPOINT,
1773 .bEndpointAddress = USB_DIR_IN,
1774 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1775};
1776
1777static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
1778 .bLength = sizeof(uasp_bi_pipe_desc),
1779 .bDescriptorType = USB_DT_PIPE_USAGE,
1780 .bPipeID = DATA_IN_PIPE_ID,
1781};
1782
1783static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
1784 .bLength = USB_DT_ENDPOINT_SIZE,
1785 .bDescriptorType = USB_DT_ENDPOINT,
1786 .bEndpointAddress = USB_DIR_IN,
1787 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1788 .wMaxPacketSize = cpu_to_le16(1024),
1789};
1790
1791static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
1792 .bLength = sizeof(uasp_bi_ep_comp_desc),
1793 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1794 .bMaxBurst = 0,
1795 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1796 .wBytesPerInterval = 0,
1797};
1798
1799static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
1800 .bLength = sizeof(bot_bi_ep_comp_desc),
1801 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1802 .bMaxBurst = 0,
1803};
1804
1805static struct usb_endpoint_descriptor uasp_bo_desc = {
1806 .bLength = USB_DT_ENDPOINT_SIZE,
1807 .bDescriptorType = USB_DT_ENDPOINT,
1808 .bEndpointAddress = USB_DIR_OUT,
1809 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1810 .wMaxPacketSize = cpu_to_le16(512),
1811};
1812
1813static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
1814 .bLength = USB_DT_ENDPOINT_SIZE,
1815 .bDescriptorType = USB_DT_ENDPOINT,
1816 .bEndpointAddress = USB_DIR_OUT,
1817 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1818};
1819
1820static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
1821 .bLength = sizeof(uasp_bo_pipe_desc),
1822 .bDescriptorType = USB_DT_PIPE_USAGE,
1823 .bPipeID = DATA_OUT_PIPE_ID,
1824};
1825
1826static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
1827 .bLength = USB_DT_ENDPOINT_SIZE,
1828 .bDescriptorType = USB_DT_ENDPOINT,
1829 .bEndpointAddress = USB_DIR_OUT,
1830 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1831 .wMaxPacketSize = cpu_to_le16(0x400),
1832};
1833
1834static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
1835 .bLength = sizeof(uasp_bo_ep_comp_desc),
1836 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1837 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1838};
1839
1840static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
1841 .bLength = sizeof(bot_bo_ep_comp_desc),
1842 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1843};
1844
1845static struct usb_endpoint_descriptor uasp_status_desc = {
1846 .bLength = USB_DT_ENDPOINT_SIZE,
1847 .bDescriptorType = USB_DT_ENDPOINT,
1848 .bEndpointAddress = USB_DIR_IN,
1849 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1850 .wMaxPacketSize = cpu_to_le16(512),
1851};
1852
1853static struct usb_endpoint_descriptor uasp_fs_status_desc = {
1854 .bLength = USB_DT_ENDPOINT_SIZE,
1855 .bDescriptorType = USB_DT_ENDPOINT,
1856 .bEndpointAddress = USB_DIR_IN,
1857 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1858};
1859
1860static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
1861 .bLength = sizeof(uasp_status_pipe_desc),
1862 .bDescriptorType = USB_DT_PIPE_USAGE,
1863 .bPipeID = STATUS_PIPE_ID,
1864};
1865
1866static struct usb_endpoint_descriptor uasp_ss_status_desc = {
1867 .bLength = USB_DT_ENDPOINT_SIZE,
1868 .bDescriptorType = USB_DT_ENDPOINT,
1869 .bEndpointAddress = USB_DIR_IN,
1870 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1871 .wMaxPacketSize = cpu_to_le16(1024),
1872};
1873
1874static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
1875 .bLength = sizeof(uasp_status_in_ep_comp_desc),
1876 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1877 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
1878};
1879
1880static struct usb_endpoint_descriptor uasp_cmd_desc = {
1881 .bLength = USB_DT_ENDPOINT_SIZE,
1882 .bDescriptorType = USB_DT_ENDPOINT,
1883 .bEndpointAddress = USB_DIR_OUT,
1884 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1885 .wMaxPacketSize = cpu_to_le16(512),
1886};
1887
1888static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
1889 .bLength = USB_DT_ENDPOINT_SIZE,
1890 .bDescriptorType = USB_DT_ENDPOINT,
1891 .bEndpointAddress = USB_DIR_OUT,
1892 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1893};
1894
1895static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
1896 .bLength = sizeof(uasp_cmd_pipe_desc),
1897 .bDescriptorType = USB_DT_PIPE_USAGE,
1898 .bPipeID = CMD_PIPE_ID,
1899};
1900
1901static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
1902 .bLength = USB_DT_ENDPOINT_SIZE,
1903 .bDescriptorType = USB_DT_ENDPOINT,
1904 .bEndpointAddress = USB_DIR_OUT,
1905 .bmAttributes = USB_ENDPOINT_XFER_BULK,
1906 .wMaxPacketSize = cpu_to_le16(1024),
1907};
1908
1909static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
1910 .bLength = sizeof(uasp_cmd_comp_desc),
1911 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
1912};
1913
1914static struct usb_descriptor_header *uasp_fs_function_desc[] = {
1915 (struct usb_descriptor_header *) &bot_intf_desc,
1916 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
1917 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
1918
1919 (struct usb_descriptor_header *) &uasp_intf_desc,
1920 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
1921 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
1922 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
1923 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
1924 (struct usb_descriptor_header *) &uasp_fs_status_desc,
1925 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
1926 (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
1927 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
1928 NULL,
1929};
1930
1931static struct usb_descriptor_header *uasp_hs_function_desc[] = {
1932 (struct usb_descriptor_header *) &bot_intf_desc,
1933 (struct usb_descriptor_header *) &uasp_bi_desc,
1934 (struct usb_descriptor_header *) &uasp_bo_desc,
1935
1936 (struct usb_descriptor_header *) &uasp_intf_desc,
1937 (struct usb_descriptor_header *) &uasp_bi_desc,
1938 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
1939 (struct usb_descriptor_header *) &uasp_bo_desc,
1940 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
1941 (struct usb_descriptor_header *) &uasp_status_desc,
1942 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
1943 (struct usb_descriptor_header *) &uasp_cmd_desc,
1944 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
1945 NULL,
1946};
1947
1948static struct usb_descriptor_header *uasp_ss_function_desc[] = {
1949 (struct usb_descriptor_header *) &bot_intf_desc,
1950 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
1951 (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
1952 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
1953 (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
1954
1955 (struct usb_descriptor_header *) &uasp_intf_desc,
1956 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
1957 (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
1958 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
1959 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
1960 (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
1961 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
1962 (struct usb_descriptor_header *) &uasp_ss_status_desc,
1963 (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
1964 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
1965 (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
1966 (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
1967 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
1968 NULL,
1969};
1970
1971#define UAS_VENDOR_ID 0x0525 /* NetChip */ 28#define UAS_VENDOR_ID 0x0525 /* NetChip */
1972#define UAS_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */ 29#define UAS_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
1973 30
@@ -1981,13 +38,13 @@ static struct usb_device_descriptor usbg_device_desc = {
1981 .bNumConfigurations = 1, 38 .bNumConfigurations = 1,
1982}; 39};
1983 40
41#define USB_G_STR_CONFIG USB_GADGET_FIRST_AVAIL_IDX
42
1984static struct usb_string usbg_us_strings[] = { 43static struct usb_string usbg_us_strings[] = {
1985 [USB_GADGET_MANUFACTURER_IDX].s = "Target Manufactor", 44 [USB_GADGET_MANUFACTURER_IDX].s = "Target Manufactor",
1986 [USB_GADGET_PRODUCT_IDX].s = "Target Product", 45 [USB_GADGET_PRODUCT_IDX].s = "Target Product",
1987 [USB_GADGET_SERIAL_IDX].s = "000000000001", 46 [USB_GADGET_SERIAL_IDX].s = "000000000001",
1988 [USB_G_STR_CONFIG].s = "default config", 47 [USB_G_STR_CONFIG].s = "default config",
1989 [USB_G_STR_INT_UAS].s = "USB Attached SCSI",
1990 [USB_G_STR_INT_BBB].s = "Bulk Only Transport",
1991 { }, 48 { },
1992}; 49};
1993 50
@@ -2001,184 +58,42 @@ static struct usb_gadget_strings *usbg_strings[] = {
2001 NULL, 58 NULL,
2002}; 59};
2003 60
2004static int guas_unbind(struct usb_composite_dev *cdev) 61static struct usb_function_instance *fi_tcm;
2005{ 62static struct usb_function *f_tcm;
2006 return 0;
2007}
2008
2009static struct usb_configuration usbg_config_driver = {
2010 .label = "Linux Target",
2011 .bConfigurationValue = 1,
2012 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
2013};
2014 63
2015static int usbg_bind(struct usb_configuration *c, struct usb_function *f) 64static int guas_unbind(struct usb_composite_dev *cdev)
2016{ 65{
2017 struct f_uas *fu = to_f_uas(f); 66 if (!IS_ERR_OR_NULL(f_tcm))
2018 struct usb_gadget *gadget = c->cdev->gadget; 67 usb_put_function(f_tcm);
2019 struct usb_ep *ep;
2020 int iface;
2021 int ret;
2022
2023 iface = usb_interface_id(c, f);
2024 if (iface < 0)
2025 return iface;
2026
2027 bot_intf_desc.bInterfaceNumber = iface;
2028 uasp_intf_desc.bInterfaceNumber = iface;
2029 fu->iface = iface;
2030 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
2031 &uasp_bi_ep_comp_desc);
2032 if (!ep)
2033 goto ep_fail;
2034 fu->ep_in = ep;
2035
2036 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
2037 &uasp_bo_ep_comp_desc);
2038 if (!ep)
2039 goto ep_fail;
2040 fu->ep_out = ep;
2041
2042 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
2043 &uasp_status_in_ep_comp_desc);
2044 if (!ep)
2045 goto ep_fail;
2046 fu->ep_status = ep;
2047
2048 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
2049 &uasp_cmd_comp_desc);
2050 if (!ep)
2051 goto ep_fail;
2052 fu->ep_cmd = ep;
2053
2054 /* Assume endpoint addresses are the same for both speeds */
2055 uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
2056 uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
2057 uasp_status_desc.bEndpointAddress =
2058 uasp_ss_status_desc.bEndpointAddress;
2059 uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
2060
2061 uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
2062 uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
2063 uasp_fs_status_desc.bEndpointAddress =
2064 uasp_ss_status_desc.bEndpointAddress;
2065 uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
2066
2067 ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2068 uasp_hs_function_desc, uasp_ss_function_desc);
2069 if (ret)
2070 goto ep_fail;
2071 68
2072 return 0; 69 return 0;
2073ep_fail:
2074 pr_err("Can't claim all required eps\n");
2075 return -ENOTSUPP;
2076} 70}
2077 71
2078static void usbg_unbind(struct usb_configuration *c, struct usb_function *f) 72static int tcm_do_config(struct usb_configuration *c)
2079{ 73{
2080 struct f_uas *fu = to_f_uas(f); 74 int status;
2081 75
2082 usb_free_all_descriptors(f); 76 f_tcm = usb_get_function(fi_tcm);
2083 kfree(fu); 77 if (IS_ERR(f_tcm))
2084} 78 return PTR_ERR(f_tcm);
2085
2086struct guas_setup_wq {
2087 struct work_struct work;
2088 struct f_uas *fu;
2089 unsigned int alt;
2090};
2091
2092static void usbg_delayed_set_alt(struct work_struct *wq)
2093{
2094 struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
2095 work);
2096 struct f_uas *fu = work->fu;
2097 int alt = work->alt;
2098
2099 kfree(work);
2100
2101 if (fu->flags & USBG_IS_BOT)
2102 bot_cleanup_old_alt(fu);
2103 if (fu->flags & USBG_IS_UAS)
2104 uasp_cleanup_old_alt(fu);
2105
2106 if (alt == USB_G_ALT_INT_BBB)
2107 bot_set_alt(fu);
2108 else if (alt == USB_G_ALT_INT_UAS)
2109 uasp_set_alt(fu);
2110 usb_composite_setup_continue(fu->function.config->cdev);
2111}
2112
2113static int usbg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2114{
2115 struct f_uas *fu = to_f_uas(f);
2116
2117 if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
2118 struct guas_setup_wq *work;
2119 79
2120 work = kmalloc(sizeof(*work), GFP_ATOMIC); 80 status = usb_add_function(c, f_tcm);
2121 if (!work) 81 if (status < 0) {
2122 return -ENOMEM; 82 usb_put_function(f_tcm);
2123 INIT_WORK(&work->work, usbg_delayed_set_alt); 83 return status;
2124 work->fu = fu;
2125 work->alt = alt;
2126 schedule_work(&work->work);
2127 return USB_GADGET_DELAYED_STATUS;
2128 } 84 }
2129 return -EOPNOTSUPP;
2130}
2131
2132static void usbg_disable(struct usb_function *f)
2133{
2134 struct f_uas *fu = to_f_uas(f);
2135
2136 if (fu->flags & USBG_IS_UAS)
2137 uasp_cleanup_old_alt(fu);
2138 else if (fu->flags & USBG_IS_BOT)
2139 bot_cleanup_old_alt(fu);
2140 fu->flags = 0;
2141}
2142
2143static int usbg_setup(struct usb_function *f,
2144 const struct usb_ctrlrequest *ctrl)
2145{
2146 struct f_uas *fu = to_f_uas(f);
2147
2148 if (!(fu->flags & USBG_IS_BOT))
2149 return -EOPNOTSUPP;
2150 85
2151 return usbg_bot_setup(f, ctrl); 86 return 0;
2152} 87}
2153 88
2154static int usbg_cfg_bind(struct usb_configuration *c) 89static struct usb_configuration usbg_config_driver = {
2155{ 90 .label = "Linux Target",
2156 struct f_uas *fu; 91 .bConfigurationValue = 1,
2157 int ret; 92 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
2158 93};
2159 fu = kzalloc(sizeof(*fu), GFP_KERNEL);
2160 if (!fu)
2161 return -ENOMEM;
2162 fu->function.name = "Target Function";
2163 fu->function.bind = usbg_bind;
2164 fu->function.unbind = usbg_unbind;
2165 fu->function.set_alt = usbg_set_alt;
2166 fu->function.setup = usbg_setup;
2167 fu->function.disable = usbg_disable;
2168 fu->tpg = the_only_tpg_I_currently_have;
2169
2170 bot_intf_desc.iInterface = usbg_us_strings[USB_G_STR_INT_BBB].id;
2171 uasp_intf_desc.iInterface = usbg_us_strings[USB_G_STR_INT_UAS].id;
2172
2173 ret = usb_add_function(c, &fu->function);
2174 if (ret)
2175 goto err;
2176 94
2177 return 0; 95static int usbg_attach(struct usb_function_instance *f);
2178err: 96static void usbg_detach(struct usb_function_instance *f);
2179 kfree(fu);
2180 return ret;
2181}
2182 97
2183static int usb_target_bind(struct usb_composite_dev *cdev) 98static int usb_target_bind(struct usb_composite_dev *cdev)
2184{ 99{
@@ -2196,8 +111,7 @@ static int usb_target_bind(struct usb_composite_dev *cdev)
2196 usbg_config_driver.iConfiguration = 111 usbg_config_driver.iConfiguration =
2197 usbg_us_strings[USB_G_STR_CONFIG].id; 112 usbg_us_strings[USB_G_STR_CONFIG].id;
2198 113
2199 ret = usb_add_config(cdev, &usbg_config_driver, 114 ret = usb_add_config(cdev, &usbg_config_driver, tcm_do_config);
2200 usbg_cfg_bind);
2201 if (ret) 115 if (ret)
2202 return ret; 116 return ret;
2203 usb_composite_overwrite_options(cdev, &coverwrite); 117 usb_composite_overwrite_options(cdev, &coverwrite);
@@ -2213,25 +127,44 @@ static struct usb_composite_driver usbg_driver = {
2213 .unbind = guas_unbind, 127 .unbind = guas_unbind,
2214}; 128};
2215 129
2216static int usbg_attach(struct usbg_tpg *tpg) 130static int usbg_attach(struct usb_function_instance *f)
2217{ 131{
2218 return usb_composite_probe(&usbg_driver); 132 return usb_composite_probe(&usbg_driver);
2219} 133}
2220 134
2221static void usbg_detach(struct usbg_tpg *tpg) 135static void usbg_detach(struct usb_function_instance *f)
2222{ 136{
2223 usb_composite_unregister(&usbg_driver); 137 usb_composite_unregister(&usbg_driver);
2224} 138}
2225 139
2226static int __init usb_target_gadget_init(void) 140static int __init usb_target_gadget_init(void)
2227{ 141{
2228 return target_register_template(&usbg_ops); 142 struct f_tcm_opts *tcm_opts;
143
144 fi_tcm = usb_get_function_instance("tcm");
145 if (IS_ERR(fi_tcm))
146 return PTR_ERR(fi_tcm);
147
148 tcm_opts = container_of(fi_tcm, struct f_tcm_opts, func_inst);
149 mutex_lock(&tcm_opts->dep_lock);
150 tcm_opts->tcm_register_callback = usbg_attach;
151 tcm_opts->tcm_unregister_callback = usbg_detach;
152 tcm_opts->dependent = THIS_MODULE;
153 tcm_opts->can_attach = true;
154 tcm_opts->has_dep = true;
155 mutex_unlock(&tcm_opts->dep_lock);
156
157 fi_tcm->set_inst_name(fi_tcm, "tcm-legacy");
158
159 return 0;
2229} 160}
2230module_init(usb_target_gadget_init); 161module_init(usb_target_gadget_init);
2231 162
2232static void __exit usb_target_gadget_exit(void) 163static void __exit usb_target_gadget_exit(void)
2233{ 164{
2234 target_unregister_template(&usbg_ops); 165 if (!IS_ERR_OR_NULL(fi_tcm))
166 usb_put_function_instance(fi_tcm);
167
2235} 168}
2236module_exit(usb_target_gadget_exit); 169module_exit(usb_target_gadget_exit);
2237 170
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 7ae97e83f121..cab612b2ae76 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1070,11 +1070,55 @@ out:
1070 return ret; 1070 return ret;
1071} 1071}
1072 1072
1073static int configfs_do_depend_item(struct dentry *subsys_dentry,
1074 struct config_item *target)
1075{
1076 struct configfs_dirent *p;
1077 int ret;
1078
1079 spin_lock(&configfs_dirent_lock);
1080 /* Scan the tree, return 0 if found */
1081 ret = configfs_depend_prep(subsys_dentry, target);
1082 if (ret)
1083 goto out_unlock_dirent_lock;
1084
1085 /*
1086 * We are sure that the item is not about to be removed by rmdir(), and
1087 * not in the middle of attachment by mkdir().
1088 */
1089 p = target->ci_dentry->d_fsdata;
1090 p->s_dependent_count += 1;
1091
1092out_unlock_dirent_lock:
1093 spin_unlock(&configfs_dirent_lock);
1094
1095 return ret;
1096}
1097
1098static inline struct configfs_dirent *
1099configfs_find_subsys_dentry(struct configfs_dirent *root_sd,
1100 struct config_item *subsys_item)
1101{
1102 struct configfs_dirent *p;
1103 struct configfs_dirent *ret = NULL;
1104
1105 list_for_each_entry(p, &root_sd->s_children, s_sibling) {
1106 if (p->s_type & CONFIGFS_DIR &&
1107 p->s_element == subsys_item) {
1108 ret = p;
1109 break;
1110 }
1111 }
1112
1113 return ret;
1114}
1115
1116
1073int configfs_depend_item(struct configfs_subsystem *subsys, 1117int configfs_depend_item(struct configfs_subsystem *subsys,
1074 struct config_item *target) 1118 struct config_item *target)
1075{ 1119{
1076 int ret; 1120 int ret;
1077 struct configfs_dirent *p, *root_sd, *subsys_sd = NULL; 1121 struct configfs_dirent *subsys_sd;
1078 struct config_item *s_item = &subsys->su_group.cg_item; 1122 struct config_item *s_item = &subsys->su_group.cg_item;
1079 struct dentry *root; 1123 struct dentry *root;
1080 1124
@@ -1093,39 +1137,15 @@ int configfs_depend_item(struct configfs_subsystem *subsys,
1093 */ 1137 */
1094 mutex_lock(&d_inode(root)->i_mutex); 1138 mutex_lock(&d_inode(root)->i_mutex);
1095 1139
1096 root_sd = root->d_fsdata; 1140 subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item);
1097
1098 list_for_each_entry(p, &root_sd->s_children, s_sibling) {
1099 if (p->s_type & CONFIGFS_DIR) {
1100 if (p->s_element == s_item) {
1101 subsys_sd = p;
1102 break;
1103 }
1104 }
1105 }
1106
1107 if (!subsys_sd) { 1141 if (!subsys_sd) {
1108 ret = -ENOENT; 1142 ret = -ENOENT;
1109 goto out_unlock_fs; 1143 goto out_unlock_fs;
1110 } 1144 }
1111 1145
1112 /* Ok, now we can trust subsys/s_item */ 1146 /* Ok, now we can trust subsys/s_item */
1147 ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
1113 1148
1114 spin_lock(&configfs_dirent_lock);
1115 /* Scan the tree, return 0 if found */
1116 ret = configfs_depend_prep(subsys_sd->s_dentry, target);
1117 if (ret)
1118 goto out_unlock_dirent_lock;
1119
1120 /*
1121 * We are sure that the item is not about to be removed by rmdir(), and
1122 * not in the middle of attachment by mkdir().
1123 */
1124 p = target->ci_dentry->d_fsdata;
1125 p->s_dependent_count += 1;
1126
1127out_unlock_dirent_lock:
1128 spin_unlock(&configfs_dirent_lock);
1129out_unlock_fs: 1149out_unlock_fs:
1130 mutex_unlock(&d_inode(root)->i_mutex); 1150 mutex_unlock(&d_inode(root)->i_mutex);
1131 1151
@@ -1144,8 +1164,7 @@ EXPORT_SYMBOL(configfs_depend_item);
1144 * configfs_depend_item() because we know that that the client driver is 1164 * configfs_depend_item() because we know that that the client driver is
1145 * pinned, thus the subsystem is pinned, and therefore configfs is pinned. 1165 * pinned, thus the subsystem is pinned, and therefore configfs is pinned.
1146 */ 1166 */
1147void configfs_undepend_item(struct configfs_subsystem *subsys, 1167void configfs_undepend_item(struct config_item *target)
1148 struct config_item *target)
1149{ 1168{
1150 struct configfs_dirent *sd; 1169 struct configfs_dirent *sd;
1151 1170
@@ -1168,6 +1187,79 @@ void configfs_undepend_item(struct configfs_subsystem *subsys,
1168} 1187}
1169EXPORT_SYMBOL(configfs_undepend_item); 1188EXPORT_SYMBOL(configfs_undepend_item);
1170 1189
1190/*
1191 * caller_subsys is a caller's subsystem not target's. This is used to
1192 * determine if we should lock root and check subsys or not. When we are
1193 * in the same subsystem as our target there is no need to do locking as
1194 * we know that subsys is valid and is not unregistered during this function
1195 * as we are called from callback of one of his children and VFS holds a lock
1196 * on some inode. Otherwise we have to lock our root to ensure that target's
1197 * subsystem it is not unregistered during this function.
1198 */
1199int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
1200 struct config_item *target)
1201{
1202 struct configfs_subsystem *target_subsys;
1203 struct config_group *root, *parent;
1204 struct configfs_dirent *subsys_sd;
1205 int ret = -ENOENT;
1206
1207 /* Disallow this function for configfs root */
1208 if (configfs_is_root(target))
1209 return -EINVAL;
1210
1211 parent = target->ci_group;
1212 /*
1213 * This may happen when someone is trying to depend root
1214 * directory of some subsystem
1215 */
1216 if (configfs_is_root(&parent->cg_item)) {
1217 target_subsys = to_configfs_subsystem(to_config_group(target));
1218 root = parent;
1219 } else {
1220 target_subsys = parent->cg_subsys;
1221 /* Find a cofnigfs root as we may need it for locking */
1222 for (root = parent; !configfs_is_root(&root->cg_item);
1223 root = root->cg_item.ci_group)
1224 ;
1225 }
1226
1227 if (target_subsys != caller_subsys) {
1228 /*
1229 * We are in other configfs subsystem, so we have to do
1230 * additional locking to prevent other subsystem from being
1231 * unregistered
1232 */
1233 mutex_lock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
1234
1235 /*
1236 * As we are trying to depend item from other subsystem
1237 * we have to check if this subsystem is still registered
1238 */
1239 subsys_sd = configfs_find_subsys_dentry(
1240 root->cg_item.ci_dentry->d_fsdata,
1241 &target_subsys->su_group.cg_item);
1242 if (!subsys_sd)
1243 goto out_root_unlock;
1244 } else {
1245 subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata;
1246 }
1247
1248 /* Now we can execute core of depend item */
1249 ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
1250
1251 if (target_subsys != caller_subsys)
1252out_root_unlock:
1253 /*
1254 * We were called from subsystem other than our target so we
1255 * took some locks so now it's time to release them
1256 */
1257 mutex_unlock(&d_inode(root->cg_item.ci_dentry)->i_mutex);
1258
1259 return ret;
1260}
1261EXPORT_SYMBOL(configfs_depend_item_unlocked);
1262
1171static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1263static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1172{ 1264{
1173 int ret = 0; 1265 int ret = 0;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 72afdca3cea7..ebe543894db0 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -757,7 +757,7 @@ int o2nm_depend_item(struct config_item *item)
757 757
758void o2nm_undepend_item(struct config_item *item) 758void o2nm_undepend_item(struct config_item *item)
759{ 759{
760 configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item); 760 configfs_undepend_item(item);
761} 761}
762 762
763int o2nm_depend_this_node(void) 763int o2nm_depend_this_node(void)
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index f7300d023dbe..f8165c129ccb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -259,7 +259,24 @@ void configfs_unregister_default_group(struct config_group *group);
259 259
260/* These functions can sleep and can alloc with GFP_KERNEL */ 260/* These functions can sleep and can alloc with GFP_KERNEL */
261/* WARNING: These cannot be called underneath configfs callbacks!! */ 261/* WARNING: These cannot be called underneath configfs callbacks!! */
262int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); 262int configfs_depend_item(struct configfs_subsystem *subsys,
263void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target); 263 struct config_item *target);
264void configfs_undepend_item(struct config_item *target);
265
266/*
267 * These functions can sleep and can alloc with GFP_KERNEL
268 * NOTE: These should be called only underneath configfs callbacks.
269 * NOTE: First parameter is a caller's subsystem, not target's.
270 * WARNING: These cannot be called on newly created item
271 * (in make_group()/make_item() callback)
272 */
273int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
274 struct config_item *target);
275
276
277static inline void configfs_undepend_item_unlocked(struct config_item *target)
278{
279 configfs_undepend_item(target);
280}
264 281
265#endif /* _CONFIGFS_H_ */ 282#endif /* _CONFIGFS_H_ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index aabf0aca0171..5d82816cc4e3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -63,6 +63,8 @@
63#define DA_UNMAP_GRANULARITY_DEFAULT 0 63#define DA_UNMAP_GRANULARITY_DEFAULT 0
64/* Default unmap_granularity_alignment */ 64/* Default unmap_granularity_alignment */
65#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 65#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
66/* Default unmap_zeroes_data */
67#define DA_UNMAP_ZEROES_DATA_DEFAULT 0
66/* Default max_write_same_len, disabled by default */ 68/* Default max_write_same_len, disabled by default */
67#define DA_MAX_WRITE_SAME_LEN 0 69#define DA_MAX_WRITE_SAME_LEN 0
68/* Use a model alias based on the configfs backend device name */ 70/* Use a model alias based on the configfs backend device name */
@@ -526,6 +528,7 @@ struct se_cmd {
526 unsigned int t_prot_nents; 528 unsigned int t_prot_nents;
527 sense_reason_t pi_err; 529 sense_reason_t pi_err;
528 sector_t bad_sector; 530 sector_t bad_sector;
531 int cpuid;
529}; 532};
530 533
531struct se_ua { 534struct se_ua {
@@ -674,6 +677,7 @@ struct se_dev_attrib {
674 int force_pr_aptpl; 677 int force_pr_aptpl;
675 int is_nonrot; 678 int is_nonrot;
676 int emulate_rest_reord; 679 int emulate_rest_reord;
680 int unmap_zeroes_data;
677 u32 hw_block_size; 681 u32 hw_block_size;
678 u32 block_size; 682 u32 block_size;
679 u32 hw_max_sectors; 683 u32 hw_max_sectors;
@@ -864,8 +868,6 @@ struct se_portal_group {
864 * Negative values can be used by fabric drivers for internal use TPGs. 868 * Negative values can be used by fabric drivers for internal use TPGs.
865 */ 869 */
866 int proto_id; 870 int proto_id;
867 /* Number of ACLed Initiator Nodes for this TPG */
868 u32 num_node_acls;
869 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 871 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
870 atomic_t tpg_pr_ref_count; 872 atomic_t tpg_pr_ref_count;
871 /* Spinlock for adding/removing ACLed Nodes */ 873 /* Spinlock for adding/removing ACLed Nodes */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 7fb2557a760e..56653408f53b 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -117,7 +117,7 @@ void __transport_register_session(struct se_portal_group *,
117 struct se_node_acl *, struct se_session *, void *); 117 struct se_node_acl *, struct se_session *, void *);
118void transport_register_session(struct se_portal_group *, 118void transport_register_session(struct se_portal_group *,
119 struct se_node_acl *, struct se_session *, void *); 119 struct se_node_acl *, struct se_session *, void *);
120void target_get_session(struct se_session *); 120int target_get_session(struct se_session *);
121void target_put_session(struct se_session *); 121void target_put_session(struct se_session *);
122ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); 122ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
123void transport_free_session(struct se_session *); 123void transport_free_session(struct se_session *);
@@ -140,7 +140,7 @@ int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
140int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 140int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
141 unsigned char *sense, u64 unpacked_lun, 141 unsigned char *sense, u64 unpacked_lun,
142 void *fabric_tmr_ptr, unsigned char tm_type, 142 void *fabric_tmr_ptr, unsigned char tm_type,
143 gfp_t, unsigned int, int); 143 gfp_t, u64, int);
144int transport_handle_cdb_direct(struct se_cmd *); 144int transport_handle_cdb_direct(struct se_cmd *);
145sense_reason_t transport_generic_new_cmd(struct se_cmd *); 145sense_reason_t transport_generic_new_cmd(struct se_cmd *);
146 146
@@ -169,10 +169,11 @@ void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
169 169
170struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 170struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
171 unsigned char *); 171 unsigned char *);
172bool target_tpg_has_node_acl(struct se_portal_group *tpg,
173 const char *);
172struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 174struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
173 unsigned char *); 175 unsigned char *);
174int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, 176int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
175 unsigned char *, u32, int);
176int core_tpg_set_initiator_node_tag(struct se_portal_group *, 177int core_tpg_set_initiator_node_tag(struct se_portal_group *,
177 struct se_node_acl *, const char *); 178 struct se_node_acl *, const char *);
178int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); 179int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);