diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-24 13:22:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-24 13:22:09 -0400 |
commit | c6668726d2c2c581e6c417448c472c994d026f5f (patch) | |
tree | ccbc5a73b9dfe09a065cb5d8627aa2297e730ec0 /drivers/target | |
parent | 06b45f2aa703837163496f5db6a53575665cc6b4 (diff) | |
parent | 68d4cef3bab3fb9bb0dbac690ba35a96cb5a16d9 (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger:
"Lots of activity in target land the last months.
The highlights include:
- Convert fabric drivers tree-wide to target_register_template() (hch
+ bart)
- iser-target hardening fixes + v1.0 improvements (sagi)
- Convert iscsi_thread_set usage to kthread.h + kill
iscsi_target_tq.c (sagi + nab)
- Add support for T10-PI WRITE_STRIP + READ_INSERT operation (mkp +
sagi + nab)
- DIF fixes for CONFIG_DEBUG_SG=y + UNMAP file emulation (akinobu +
sagi + mkp)
- Extended TCMU ABI v2 for future BIDI + DIF support (andy + ilias)
- Fix COMPARE_AND_WRITE handling for NO_ALLLOC drivers (hch + nab)
Thanks to everyone who contributed this round with new features,
bug-reports, fixes, cleanups and improvements.
Looking forward, it's currently shaping up to be a busy v4.2 as well"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (69 commits)
target: Put TCMU under a new config option
target: Version 2 of TCMU ABI
target: fix tcm_mod_builder.py
target/file: Fix UNMAP with DIF protection support
target/file: Fix SG table for prot_buf initialization
target/file: Fix BUG() when CONFIG_DEBUG_SG=y and DIF protection enabled
target: Make core_tmr_abort_task() skip TMFs
target/sbc: Update sbc_dif_generate pr_debug output
target/sbc: Make internal DIF emulation honor ->prot_checks
target/sbc: Return INVALID_CDB_FIELD if DIF + sess_prot_type disabled
target: Ensure sess_prot_type is saved across session restart
target/rd: Don't pass incomplete scatterlist entries to sbc_dif_verify_*
target: Remove the unused flag SCF_ACK_KREF
target: Fix two sparse warnings
target: Fix COMPARE_AND_WRITE with SG_TO_MEM_NOALLOC handling
target: simplify the target template registration API
target: simplify target_xcopy_init_pt_lun
target: remove the unused SCF_CMD_XCOPY_PASSTHROUGH flag
target/rd: reduce code duplication in rd_execute_rw()
tcm_loop: fixup tpgt string to integer conversion
...
Diffstat (limited to 'drivers/target')
33 files changed, 990 insertions, 1524 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 81d44c477a5b..257361280510 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig | |||
@@ -31,12 +31,13 @@ config TCM_PSCSI | |||
31 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered | 31 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered |
32 | passthrough access to Linux/SCSI device | 32 | passthrough access to Linux/SCSI device |
33 | 33 | ||
34 | config TCM_USER | 34 | config TCM_USER2 |
35 | tristate "TCM/USER Subsystem Plugin for Linux" | 35 | tristate "TCM/USER Subsystem Plugin for Linux" |
36 | depends on UIO && NET | 36 | depends on UIO && NET |
37 | help | 37 | help |
38 | Say Y here to enable the TCM/USER subsystem plugin for a userspace | 38 | Say Y here to enable the TCM/USER subsystem plugin for a userspace |
39 | process to handle requests | 39 | process to handle requests. This is version 2 of the ABI; version 1 |
40 | is obsolete. | ||
40 | 41 | ||
41 | source "drivers/target/loopback/Kconfig" | 42 | source "drivers/target/loopback/Kconfig" |
42 | source "drivers/target/tcm_fc/Kconfig" | 43 | source "drivers/target/tcm_fc/Kconfig" |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index bbb4a7d638ef..e619c0266a79 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | |||
22 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o | 22 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o |
23 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o | 23 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o |
24 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o | 24 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o |
25 | obj-$(CONFIG_TCM_USER) += target_core_user.o | 25 | obj-$(CONFIG_TCM_USER2) += target_core_user.o |
26 | 26 | ||
27 | # Fabric modules | 27 | # Fabric modules |
28 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ | 28 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ |
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile index 13a92403fe3e..0f43be9c3453 100644 --- a/drivers/target/iscsi/Makefile +++ b/drivers/target/iscsi/Makefile | |||
@@ -1,6 +1,5 @@ | |||
1 | iscsi_target_mod-y += iscsi_target_parameters.o \ | 1 | iscsi_target_mod-y += iscsi_target_parameters.o \ |
2 | iscsi_target_seq_pdu_list.o \ | 2 | iscsi_target_seq_pdu_list.o \ |
3 | iscsi_target_tq.o \ | ||
4 | iscsi_target_auth.o \ | 3 | iscsi_target_auth.o \ |
5 | iscsi_target_datain_values.o \ | 4 | iscsi_target_datain_values.o \ |
6 | iscsi_target_device.o \ | 5 | iscsi_target_device.o \ |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 77d64251af40..34871a628b11 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -33,8 +33,6 @@ | |||
33 | #include <target/iscsi/iscsi_target_core.h> | 33 | #include <target/iscsi/iscsi_target_core.h> |
34 | #include "iscsi_target_parameters.h" | 34 | #include "iscsi_target_parameters.h" |
35 | #include "iscsi_target_seq_pdu_list.h" | 35 | #include "iscsi_target_seq_pdu_list.h" |
36 | #include "iscsi_target_tq.h" | ||
37 | #include "iscsi_target_configfs.h" | ||
38 | #include "iscsi_target_datain_values.h" | 36 | #include "iscsi_target_datain_values.h" |
39 | #include "iscsi_target_erl0.h" | 37 | #include "iscsi_target_erl0.h" |
40 | #include "iscsi_target_erl1.h" | 38 | #include "iscsi_target_erl1.h" |
@@ -537,7 +535,7 @@ static struct iscsit_transport iscsi_target_transport = { | |||
537 | 535 | ||
538 | static int __init iscsi_target_init_module(void) | 536 | static int __init iscsi_target_init_module(void) |
539 | { | 537 | { |
540 | int ret = 0; | 538 | int ret = 0, size; |
541 | 539 | ||
542 | pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); | 540 | pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); |
543 | 541 | ||
@@ -546,24 +544,21 @@ static int __init iscsi_target_init_module(void) | |||
546 | pr_err("Unable to allocate memory for iscsit_global\n"); | 544 | pr_err("Unable to allocate memory for iscsit_global\n"); |
547 | return -1; | 545 | return -1; |
548 | } | 546 | } |
547 | spin_lock_init(&iscsit_global->ts_bitmap_lock); | ||
549 | mutex_init(&auth_id_lock); | 548 | mutex_init(&auth_id_lock); |
550 | spin_lock_init(&sess_idr_lock); | 549 | spin_lock_init(&sess_idr_lock); |
551 | idr_init(&tiqn_idr); | 550 | idr_init(&tiqn_idr); |
552 | idr_init(&sess_idr); | 551 | idr_init(&sess_idr); |
553 | 552 | ||
554 | ret = iscsi_target_register_configfs(); | 553 | ret = target_register_template(&iscsi_ops); |
555 | if (ret < 0) | 554 | if (ret) |
556 | goto out; | 555 | goto out; |
557 | 556 | ||
558 | ret = iscsi_thread_set_init(); | 557 | size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); |
559 | if (ret < 0) | 558 | iscsit_global->ts_bitmap = vzalloc(size); |
559 | if (!iscsit_global->ts_bitmap) { | ||
560 | pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); | ||
560 | goto configfs_out; | 561 | goto configfs_out; |
561 | |||
562 | if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != | ||
563 | TARGET_THREAD_SET_COUNT) { | ||
564 | pr_err("iscsi_allocate_thread_sets() returned" | ||
565 | " unexpected value!\n"); | ||
566 | goto ts_out1; | ||
567 | } | 562 | } |
568 | 563 | ||
569 | lio_qr_cache = kmem_cache_create("lio_qr_cache", | 564 | lio_qr_cache = kmem_cache_create("lio_qr_cache", |
@@ -572,7 +567,7 @@ static int __init iscsi_target_init_module(void) | |||
572 | if (!lio_qr_cache) { | 567 | if (!lio_qr_cache) { |
573 | pr_err("nable to kmem_cache_create() for" | 568 | pr_err("nable to kmem_cache_create() for" |
574 | " lio_qr_cache\n"); | 569 | " lio_qr_cache\n"); |
575 | goto ts_out2; | 570 | goto bitmap_out; |
576 | } | 571 | } |
577 | 572 | ||
578 | lio_dr_cache = kmem_cache_create("lio_dr_cache", | 573 | lio_dr_cache = kmem_cache_create("lio_dr_cache", |
@@ -617,12 +612,13 @@ dr_out: | |||
617 | kmem_cache_destroy(lio_dr_cache); | 612 | kmem_cache_destroy(lio_dr_cache); |
618 | qr_out: | 613 | qr_out: |
619 | kmem_cache_destroy(lio_qr_cache); | 614 | kmem_cache_destroy(lio_qr_cache); |
620 | ts_out2: | 615 | bitmap_out: |
621 | iscsi_deallocate_thread_sets(); | 616 | vfree(iscsit_global->ts_bitmap); |
622 | ts_out1: | ||
623 | iscsi_thread_set_free(); | ||
624 | configfs_out: | 617 | configfs_out: |
625 | iscsi_target_deregister_configfs(); | 618 | /* XXX: this probably wants it to be it's own unwind step.. */ |
619 | if (iscsit_global->discovery_tpg) | ||
620 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
621 | target_unregister_template(&iscsi_ops); | ||
626 | out: | 622 | out: |
627 | kfree(iscsit_global); | 623 | kfree(iscsit_global); |
628 | return -ENOMEM; | 624 | return -ENOMEM; |
@@ -630,8 +626,6 @@ out: | |||
630 | 626 | ||
631 | static void __exit iscsi_target_cleanup_module(void) | 627 | static void __exit iscsi_target_cleanup_module(void) |
632 | { | 628 | { |
633 | iscsi_deallocate_thread_sets(); | ||
634 | iscsi_thread_set_free(); | ||
635 | iscsit_release_discovery_tpg(); | 629 | iscsit_release_discovery_tpg(); |
636 | iscsit_unregister_transport(&iscsi_target_transport); | 630 | iscsit_unregister_transport(&iscsi_target_transport); |
637 | kmem_cache_destroy(lio_qr_cache); | 631 | kmem_cache_destroy(lio_qr_cache); |
@@ -639,8 +633,15 @@ static void __exit iscsi_target_cleanup_module(void) | |||
639 | kmem_cache_destroy(lio_ooo_cache); | 633 | kmem_cache_destroy(lio_ooo_cache); |
640 | kmem_cache_destroy(lio_r2t_cache); | 634 | kmem_cache_destroy(lio_r2t_cache); |
641 | 635 | ||
642 | iscsi_target_deregister_configfs(); | 636 | /* |
637 | * Shutdown discovery sessions and disable discovery TPG | ||
638 | */ | ||
639 | if (iscsit_global->discovery_tpg) | ||
640 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
643 | 641 | ||
642 | target_unregister_template(&iscsi_ops); | ||
643 | |||
644 | vfree(iscsit_global->ts_bitmap); | ||
644 | kfree(iscsit_global); | 645 | kfree(iscsit_global); |
645 | } | 646 | } |
646 | 647 | ||
@@ -990,7 +991,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
990 | /* | 991 | /* |
991 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | 992 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure |
992 | */ | 993 | */ |
993 | transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, | 994 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, |
994 | conn->sess->se_sess, be32_to_cpu(hdr->data_length), | 995 | conn->sess->se_sess, be32_to_cpu(hdr->data_length), |
995 | cmd->data_direction, sam_task_attr, | 996 | cmd->data_direction, sam_task_attr, |
996 | cmd->sense_buffer + 2); | 997 | cmd->sense_buffer + 2); |
@@ -1805,8 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1805 | u8 tcm_function; | 1806 | u8 tcm_function; |
1806 | int ret; | 1807 | int ret; |
1807 | 1808 | ||
1808 | transport_init_se_cmd(&cmd->se_cmd, | 1809 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, |
1809 | &lio_target_fabric_configfs->tf_ops, | ||
1810 | conn->sess->se_sess, 0, DMA_NONE, | 1810 | conn->sess->se_sess, 0, DMA_NONE, |
1811 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); | 1811 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); |
1812 | 1812 | ||
@@ -2155,7 +2155,6 @@ reject: | |||
2155 | cmd->text_in_ptr = NULL; | 2155 | cmd->text_in_ptr = NULL; |
2156 | return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); | 2156 | return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); |
2157 | } | 2157 | } |
2158 | EXPORT_SYMBOL(iscsit_handle_text_cmd); | ||
2159 | 2158 | ||
2160 | int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2159 | int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
2161 | { | 2160 | { |
@@ -3715,17 +3714,16 @@ static int iscsit_send_reject( | |||
3715 | 3714 | ||
3716 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) | 3715 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) |
3717 | { | 3716 | { |
3718 | struct iscsi_thread_set *ts = conn->thread_set; | ||
3719 | int ord, cpu; | 3717 | int ord, cpu; |
3720 | /* | 3718 | /* |
3721 | * thread_id is assigned from iscsit_global->ts_bitmap from | 3719 | * bitmap_id is assigned from iscsit_global->ts_bitmap from |
3722 | * within iscsi_thread_set.c:iscsi_allocate_thread_sets() | 3720 | * within iscsit_start_kthreads() |
3723 | * | 3721 | * |
3724 | * Here we use thread_id to determine which CPU that this | 3722 | * Here we use bitmap_id to determine which CPU that this |
3725 | * iSCSI connection's iscsi_thread_set will be scheduled to | 3723 | * iSCSI connection's RX/TX threads will be scheduled to |
3726 | * execute upon. | 3724 | * execute upon. |
3727 | */ | 3725 | */ |
3728 | ord = ts->thread_id % cpumask_weight(cpu_online_mask); | 3726 | ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); |
3729 | for_each_online_cpu(cpu) { | 3727 | for_each_online_cpu(cpu) { |
3730 | if (ord-- == 0) { | 3728 | if (ord-- == 0) { |
3731 | cpumask_set_cpu(cpu, conn->conn_cpumask); | 3729 | cpumask_set_cpu(cpu, conn->conn_cpumask); |
@@ -3914,7 +3912,7 @@ check_rsp_state: | |||
3914 | switch (state) { | 3912 | switch (state) { |
3915 | case ISTATE_SEND_LOGOUTRSP: | 3913 | case ISTATE_SEND_LOGOUTRSP: |
3916 | if (!iscsit_logout_post_handler(cmd, conn)) | 3914 | if (!iscsit_logout_post_handler(cmd, conn)) |
3917 | goto restart; | 3915 | return -ECONNRESET; |
3918 | /* fall through */ | 3916 | /* fall through */ |
3919 | case ISTATE_SEND_STATUS: | 3917 | case ISTATE_SEND_STATUS: |
3920 | case ISTATE_SEND_ASYNCMSG: | 3918 | case ISTATE_SEND_ASYNCMSG: |
@@ -3942,8 +3940,6 @@ check_rsp_state: | |||
3942 | 3940 | ||
3943 | err: | 3941 | err: |
3944 | return -1; | 3942 | return -1; |
3945 | restart: | ||
3946 | return -EAGAIN; | ||
3947 | } | 3943 | } |
3948 | 3944 | ||
3949 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) | 3945 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) |
@@ -3970,21 +3966,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn) | |||
3970 | int iscsi_target_tx_thread(void *arg) | 3966 | int iscsi_target_tx_thread(void *arg) |
3971 | { | 3967 | { |
3972 | int ret = 0; | 3968 | int ret = 0; |
3973 | struct iscsi_conn *conn; | 3969 | struct iscsi_conn *conn = arg; |
3974 | struct iscsi_thread_set *ts = arg; | ||
3975 | /* | 3970 | /* |
3976 | * Allow ourselves to be interrupted by SIGINT so that a | 3971 | * Allow ourselves to be interrupted by SIGINT so that a |
3977 | * connection recovery / failure event can be triggered externally. | 3972 | * connection recovery / failure event can be triggered externally. |
3978 | */ | 3973 | */ |
3979 | allow_signal(SIGINT); | 3974 | allow_signal(SIGINT); |
3980 | 3975 | ||
3981 | restart: | ||
3982 | conn = iscsi_tx_thread_pre_handler(ts); | ||
3983 | if (!conn) | ||
3984 | goto out; | ||
3985 | |||
3986 | ret = 0; | ||
3987 | |||
3988 | while (!kthread_should_stop()) { | 3976 | while (!kthread_should_stop()) { |
3989 | /* | 3977 | /* |
3990 | * Ensure that both TX and RX per connection kthreads | 3978 | * Ensure that both TX and RX per connection kthreads |
@@ -3993,11 +3981,9 @@ restart: | |||
3993 | iscsit_thread_check_cpumask(conn, current, 1); | 3981 | iscsit_thread_check_cpumask(conn, current, 1); |
3994 | 3982 | ||
3995 | wait_event_interruptible(conn->queues_wq, | 3983 | wait_event_interruptible(conn->queues_wq, |
3996 | !iscsit_conn_all_queues_empty(conn) || | 3984 | !iscsit_conn_all_queues_empty(conn)); |
3997 | ts->status == ISCSI_THREAD_SET_RESET); | ||
3998 | 3985 | ||
3999 | if ((ts->status == ISCSI_THREAD_SET_RESET) || | 3986 | if (signal_pending(current)) |
4000 | signal_pending(current)) | ||
4001 | goto transport_err; | 3987 | goto transport_err; |
4002 | 3988 | ||
4003 | get_immediate: | 3989 | get_immediate: |
@@ -4008,15 +3994,14 @@ get_immediate: | |||
4008 | ret = iscsit_handle_response_queue(conn); | 3994 | ret = iscsit_handle_response_queue(conn); |
4009 | if (ret == 1) | 3995 | if (ret == 1) |
4010 | goto get_immediate; | 3996 | goto get_immediate; |
4011 | else if (ret == -EAGAIN) | 3997 | else if (ret == -ECONNRESET) |
4012 | goto restart; | 3998 | goto out; |
4013 | else if (ret < 0) | 3999 | else if (ret < 0) |
4014 | goto transport_err; | 4000 | goto transport_err; |
4015 | } | 4001 | } |
4016 | 4002 | ||
4017 | transport_err: | 4003 | transport_err: |
4018 | iscsit_take_action_for_connection_exit(conn); | 4004 | iscsit_take_action_for_connection_exit(conn); |
4019 | goto restart; | ||
4020 | out: | 4005 | out: |
4021 | return 0; | 4006 | return 0; |
4022 | } | 4007 | } |
@@ -4111,8 +4096,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4111 | int ret; | 4096 | int ret; |
4112 | u8 buffer[ISCSI_HDR_LEN], opcode; | 4097 | u8 buffer[ISCSI_HDR_LEN], opcode; |
4113 | u32 checksum = 0, digest = 0; | 4098 | u32 checksum = 0, digest = 0; |
4114 | struct iscsi_conn *conn = NULL; | 4099 | struct iscsi_conn *conn = arg; |
4115 | struct iscsi_thread_set *ts = arg; | ||
4116 | struct kvec iov; | 4100 | struct kvec iov; |
4117 | /* | 4101 | /* |
4118 | * Allow ourselves to be interrupted by SIGINT so that a | 4102 | * Allow ourselves to be interrupted by SIGINT so that a |
@@ -4120,11 +4104,6 @@ int iscsi_target_rx_thread(void *arg) | |||
4120 | */ | 4104 | */ |
4121 | allow_signal(SIGINT); | 4105 | allow_signal(SIGINT); |
4122 | 4106 | ||
4123 | restart: | ||
4124 | conn = iscsi_rx_thread_pre_handler(ts); | ||
4125 | if (!conn) | ||
4126 | goto out; | ||
4127 | |||
4128 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { | 4107 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { |
4129 | struct completion comp; | 4108 | struct completion comp; |
4130 | int rc; | 4109 | int rc; |
@@ -4134,7 +4113,7 @@ restart: | |||
4134 | if (rc < 0) | 4113 | if (rc < 0) |
4135 | goto transport_err; | 4114 | goto transport_err; |
4136 | 4115 | ||
4137 | goto out; | 4116 | goto transport_err; |
4138 | } | 4117 | } |
4139 | 4118 | ||
4140 | while (!kthread_should_stop()) { | 4119 | while (!kthread_should_stop()) { |
@@ -4210,8 +4189,6 @@ transport_err: | |||
4210 | if (!signal_pending(current)) | 4189 | if (!signal_pending(current)) |
4211 | atomic_set(&conn->transport_failed, 1); | 4190 | atomic_set(&conn->transport_failed, 1); |
4212 | iscsit_take_action_for_connection_exit(conn); | 4191 | iscsit_take_action_for_connection_exit(conn); |
4213 | goto restart; | ||
4214 | out: | ||
4215 | return 0; | 4192 | return 0; |
4216 | } | 4193 | } |
4217 | 4194 | ||
@@ -4273,7 +4250,24 @@ int iscsit_close_connection( | |||
4273 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 4250 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
4274 | complete(&conn->conn_logout_comp); | 4251 | complete(&conn->conn_logout_comp); |
4275 | 4252 | ||
4276 | iscsi_release_thread_set(conn); | 4253 | if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { |
4254 | if (conn->tx_thread && | ||
4255 | cmpxchg(&conn->tx_thread_active, true, false)) { | ||
4256 | send_sig(SIGINT, conn->tx_thread, 1); | ||
4257 | kthread_stop(conn->tx_thread); | ||
4258 | } | ||
4259 | } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { | ||
4260 | if (conn->rx_thread && | ||
4261 | cmpxchg(&conn->rx_thread_active, true, false)) { | ||
4262 | send_sig(SIGINT, conn->rx_thread, 1); | ||
4263 | kthread_stop(conn->rx_thread); | ||
4264 | } | ||
4265 | } | ||
4266 | |||
4267 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
4268 | bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, | ||
4269 | get_order(1)); | ||
4270 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
4277 | 4271 | ||
4278 | iscsit_stop_timers_for_cmds(conn); | 4272 | iscsit_stop_timers_for_cmds(conn); |
4279 | iscsit_stop_nopin_response_timer(conn); | 4273 | iscsit_stop_nopin_response_timer(conn); |
@@ -4383,8 +4377,6 @@ int iscsit_close_connection( | |||
4383 | 4377 | ||
4384 | iscsit_put_transport(conn->conn_transport); | 4378 | iscsit_put_transport(conn->conn_transport); |
4385 | 4379 | ||
4386 | conn->thread_set = NULL; | ||
4387 | |||
4388 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | 4380 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); |
4389 | conn->conn_state = TARG_CONN_STATE_FREE; | 4381 | conn->conn_state = TARG_CONN_STATE_FREE; |
4390 | kfree(conn); | 4382 | kfree(conn); |
@@ -4551,15 +4543,13 @@ static void iscsit_logout_post_handler_closesession( | |||
4551 | struct iscsi_conn *conn) | 4543 | struct iscsi_conn *conn) |
4552 | { | 4544 | { |
4553 | struct iscsi_session *sess = conn->sess; | 4545 | struct iscsi_session *sess = conn->sess; |
4554 | 4546 | int sleep = cmpxchg(&conn->tx_thread_active, true, false); | |
4555 | iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); | ||
4556 | iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); | ||
4557 | 4547 | ||
4558 | atomic_set(&conn->conn_logout_remove, 0); | 4548 | atomic_set(&conn->conn_logout_remove, 0); |
4559 | complete(&conn->conn_logout_comp); | 4549 | complete(&conn->conn_logout_comp); |
4560 | 4550 | ||
4561 | iscsit_dec_conn_usage_count(conn); | 4551 | iscsit_dec_conn_usage_count(conn); |
4562 | iscsit_stop_session(sess, 1, 1); | 4552 | iscsit_stop_session(sess, sleep, sleep); |
4563 | iscsit_dec_session_usage_count(sess); | 4553 | iscsit_dec_session_usage_count(sess); |
4564 | target_put_session(sess->se_sess); | 4554 | target_put_session(sess->se_sess); |
4565 | } | 4555 | } |
@@ -4567,13 +4557,12 @@ static void iscsit_logout_post_handler_closesession( | |||
4567 | static void iscsit_logout_post_handler_samecid( | 4557 | static void iscsit_logout_post_handler_samecid( |
4568 | struct iscsi_conn *conn) | 4558 | struct iscsi_conn *conn) |
4569 | { | 4559 | { |
4570 | iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); | 4560 | int sleep = cmpxchg(&conn->tx_thread_active, true, false); |
4571 | iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); | ||
4572 | 4561 | ||
4573 | atomic_set(&conn->conn_logout_remove, 0); | 4562 | atomic_set(&conn->conn_logout_remove, 0); |
4574 | complete(&conn->conn_logout_comp); | 4563 | complete(&conn->conn_logout_comp); |
4575 | 4564 | ||
4576 | iscsit_cause_connection_reinstatement(conn, 1); | 4565 | iscsit_cause_connection_reinstatement(conn, sleep); |
4577 | iscsit_dec_conn_usage_count(conn); | 4566 | iscsit_dec_conn_usage_count(conn); |
4578 | } | 4567 | } |
4579 | 4568 | ||
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index e936d56fb523..7d0f9c00d9c2 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h | |||
@@ -35,7 +35,7 @@ extern void iscsit_stop_session(struct iscsi_session *, int, int); | |||
35 | extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); | 35 | extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); |
36 | 36 | ||
37 | extern struct iscsit_global *iscsit_global; | 37 | extern struct iscsit_global *iscsit_global; |
38 | extern struct target_fabric_configfs *lio_target_fabric_configfs; | 38 | extern const struct target_core_fabric_ops iscsi_ops; |
39 | 39 | ||
40 | extern struct kmem_cache *lio_dr_cache; | 40 | extern struct kmem_cache *lio_dr_cache; |
41 | extern struct kmem_cache *lio_ooo_cache; | 41 | extern struct kmem_cache *lio_ooo_cache; |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 48384b675e62..469fce44ebad 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include "iscsi_target_util.h" | 37 | #include "iscsi_target_util.h" |
38 | #include "iscsi_target.h" | 38 | #include "iscsi_target.h" |
39 | #include <target/iscsi/iscsi_target_stat.h> | 39 | #include <target/iscsi/iscsi_target_stat.h> |
40 | #include "iscsi_target_configfs.h" | ||
41 | |||
42 | struct target_fabric_configfs *lio_target_fabric_configfs; | ||
43 | 40 | ||
44 | struct lio_target_configfs_attribute { | 41 | struct lio_target_configfs_attribute { |
45 | struct configfs_attribute attr; | 42 | struct configfs_attribute attr; |
@@ -1052,6 +1049,11 @@ TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); | |||
1052 | */ | 1049 | */ |
1053 | DEF_TPG_ATTRIB(t10_pi); | 1050 | DEF_TPG_ATTRIB(t10_pi); |
1054 | TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); | 1051 | TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR); |
1052 | /* | ||
1053 | * Define iscsi_tpg_attrib_s_fabric_prot_type | ||
1054 | */ | ||
1055 | DEF_TPG_ATTRIB(fabric_prot_type); | ||
1056 | TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR); | ||
1055 | 1057 | ||
1056 | static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { | 1058 | static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { |
1057 | &iscsi_tpg_attrib_authentication.attr, | 1059 | &iscsi_tpg_attrib_authentication.attr, |
@@ -1065,6 +1067,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { | |||
1065 | &iscsi_tpg_attrib_demo_mode_discovery.attr, | 1067 | &iscsi_tpg_attrib_demo_mode_discovery.attr, |
1066 | &iscsi_tpg_attrib_default_erl.attr, | 1068 | &iscsi_tpg_attrib_default_erl.attr, |
1067 | &iscsi_tpg_attrib_t10_pi.attr, | 1069 | &iscsi_tpg_attrib_t10_pi.attr, |
1070 | &iscsi_tpg_attrib_fabric_prot_type.attr, | ||
1068 | NULL, | 1071 | NULL, |
1069 | }; | 1072 | }; |
1070 | 1073 | ||
@@ -1410,8 +1413,18 @@ out: | |||
1410 | 1413 | ||
1411 | TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); | 1414 | TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); |
1412 | 1415 | ||
1416 | static ssize_t lio_target_tpg_show_dynamic_sessions( | ||
1417 | struct se_portal_group *se_tpg, | ||
1418 | char *page) | ||
1419 | { | ||
1420 | return target_show_dynamic_sessions(se_tpg, page); | ||
1421 | } | ||
1422 | |||
1423 | TF_TPG_BASE_ATTR_RO(lio_target, dynamic_sessions); | ||
1424 | |||
1413 | static struct configfs_attribute *lio_target_tpg_attrs[] = { | 1425 | static struct configfs_attribute *lio_target_tpg_attrs[] = { |
1414 | &lio_target_tpg_enable.attr, | 1426 | &lio_target_tpg_enable.attr, |
1427 | &lio_target_tpg_dynamic_sessions.attr, | ||
1415 | NULL, | 1428 | NULL, |
1416 | }; | 1429 | }; |
1417 | 1430 | ||
@@ -1450,10 +1463,8 @@ static struct se_portal_group *lio_target_tiqn_addtpg( | |||
1450 | if (!tpg) | 1463 | if (!tpg) |
1451 | return NULL; | 1464 | return NULL; |
1452 | 1465 | ||
1453 | ret = core_tpg_register( | 1466 | ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg, |
1454 | &lio_target_fabric_configfs->tf_ops, | 1467 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
1455 | wwn, &tpg->tpg_se_tpg, tpg, | ||
1456 | TRANSPORT_TPG_TYPE_NORMAL); | ||
1457 | if (ret < 0) | 1468 | if (ret < 0) |
1458 | return NULL; | 1469 | return NULL; |
1459 | 1470 | ||
@@ -1872,6 +1883,20 @@ static int lio_tpg_check_prod_mode_write_protect( | |||
1872 | return tpg->tpg_attrib.prod_mode_write_protect; | 1883 | return tpg->tpg_attrib.prod_mode_write_protect; |
1873 | } | 1884 | } |
1874 | 1885 | ||
1886 | static int lio_tpg_check_prot_fabric_only( | ||
1887 | struct se_portal_group *se_tpg) | ||
1888 | { | ||
1889 | struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; | ||
1890 | /* | ||
1891 | * Only report fabric_prot_type if t10_pi has also been enabled | ||
1892 | * for incoming ib_isert sessions. | ||
1893 | */ | ||
1894 | if (!tpg->tpg_attrib.t10_pi) | ||
1895 | return 0; | ||
1896 | |||
1897 | return tpg->tpg_attrib.fabric_prot_type; | ||
1898 | } | ||
1899 | |||
1875 | static void lio_tpg_release_fabric_acl( | 1900 | static void lio_tpg_release_fabric_acl( |
1876 | struct se_portal_group *se_tpg, | 1901 | struct se_portal_group *se_tpg, |
1877 | struct se_node_acl *se_acl) | 1902 | struct se_node_acl *se_acl) |
@@ -1953,115 +1978,60 @@ static void lio_release_cmd(struct se_cmd *se_cmd) | |||
1953 | iscsit_release_cmd(cmd); | 1978 | iscsit_release_cmd(cmd); |
1954 | } | 1979 | } |
1955 | 1980 | ||
1956 | /* End functions for target_core_fabric_ops */ | 1981 | const struct target_core_fabric_ops iscsi_ops = { |
1957 | 1982 | .module = THIS_MODULE, | |
1958 | int iscsi_target_register_configfs(void) | 1983 | .name = "iscsi", |
1959 | { | 1984 | .get_fabric_name = iscsi_get_fabric_name, |
1960 | struct target_fabric_configfs *fabric; | 1985 | .get_fabric_proto_ident = iscsi_get_fabric_proto_ident, |
1961 | int ret; | 1986 | .tpg_get_wwn = lio_tpg_get_endpoint_wwn, |
1962 | 1987 | .tpg_get_tag = lio_tpg_get_tag, | |
1963 | lio_target_fabric_configfs = NULL; | 1988 | .tpg_get_default_depth = lio_tpg_get_default_depth, |
1964 | fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi"); | 1989 | .tpg_get_pr_transport_id = iscsi_get_pr_transport_id, |
1965 | if (IS_ERR(fabric)) { | 1990 | .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len, |
1966 | pr_err("target_fabric_configfs_init() for" | 1991 | .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id, |
1967 | " LIO-Target failed!\n"); | 1992 | .tpg_check_demo_mode = lio_tpg_check_demo_mode, |
1968 | return PTR_ERR(fabric); | 1993 | .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, |
1969 | } | 1994 | .tpg_check_demo_mode_write_protect = |
1970 | /* | 1995 | lio_tpg_check_demo_mode_write_protect, |
1971 | * Setup the fabric API of function pointers used by target_core_mod.. | 1996 | .tpg_check_prod_mode_write_protect = |
1972 | */ | 1997 | lio_tpg_check_prod_mode_write_protect, |
1973 | fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name; | 1998 | .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, |
1974 | fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident; | 1999 | .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl, |
1975 | fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn; | 2000 | .tpg_release_fabric_acl = lio_tpg_release_fabric_acl, |
1976 | fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag; | 2001 | .tpg_get_inst_index = lio_tpg_get_inst_index, |
1977 | fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth; | 2002 | .check_stop_free = lio_check_stop_free, |
1978 | fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id; | 2003 | .release_cmd = lio_release_cmd, |
1979 | fabric->tf_ops.tpg_get_pr_transport_id_len = | 2004 | .shutdown_session = lio_tpg_shutdown_session, |
1980 | &iscsi_get_pr_transport_id_len; | 2005 | .close_session = lio_tpg_close_session, |
1981 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | 2006 | .sess_get_index = lio_sess_get_index, |
1982 | &iscsi_parse_pr_out_transport_id; | 2007 | .sess_get_initiator_sid = lio_sess_get_initiator_sid, |
1983 | fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode; | 2008 | .write_pending = lio_write_pending, |
1984 | fabric->tf_ops.tpg_check_demo_mode_cache = | 2009 | .write_pending_status = lio_write_pending_status, |
1985 | &lio_tpg_check_demo_mode_cache; | 2010 | .set_default_node_attributes = lio_set_default_node_attributes, |
1986 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | 2011 | .get_task_tag = iscsi_get_task_tag, |
1987 | &lio_tpg_check_demo_mode_write_protect; | 2012 | .get_cmd_state = iscsi_get_cmd_state, |
1988 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | 2013 | .queue_data_in = lio_queue_data_in, |
1989 | &lio_tpg_check_prod_mode_write_protect; | 2014 | .queue_status = lio_queue_status, |
1990 | fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; | 2015 | .queue_tm_rsp = lio_queue_tm_rsp, |
1991 | fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; | 2016 | .aborted_task = lio_aborted_task, |
1992 | fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; | 2017 | .fabric_make_wwn = lio_target_call_coreaddtiqn, |
1993 | fabric->tf_ops.check_stop_free = &lio_check_stop_free, | 2018 | .fabric_drop_wwn = lio_target_call_coredeltiqn, |
1994 | fabric->tf_ops.release_cmd = &lio_release_cmd; | 2019 | .fabric_make_tpg = lio_target_tiqn_addtpg, |
1995 | fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; | 2020 | .fabric_drop_tpg = lio_target_tiqn_deltpg, |
1996 | fabric->tf_ops.close_session = &lio_tpg_close_session; | 2021 | .fabric_make_np = lio_target_call_addnptotpg, |
1997 | fabric->tf_ops.sess_get_index = &lio_sess_get_index; | 2022 | .fabric_drop_np = lio_target_call_delnpfromtpg, |
1998 | fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; | 2023 | .fabric_make_nodeacl = lio_target_make_nodeacl, |
1999 | fabric->tf_ops.write_pending = &lio_write_pending; | 2024 | .fabric_drop_nodeacl = lio_target_drop_nodeacl, |
2000 | fabric->tf_ops.write_pending_status = &lio_write_pending_status; | 2025 | |
2001 | fabric->tf_ops.set_default_node_attributes = | 2026 | .tfc_discovery_attrs = lio_target_discovery_auth_attrs, |
2002 | &lio_set_default_node_attributes; | 2027 | .tfc_wwn_attrs = lio_target_wwn_attrs, |
2003 | fabric->tf_ops.get_task_tag = &iscsi_get_task_tag; | 2028 | .tfc_tpg_base_attrs = lio_target_tpg_attrs, |
2004 | fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state; | 2029 | .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs, |
2005 | fabric->tf_ops.queue_data_in = &lio_queue_data_in; | 2030 | .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs, |
2006 | fabric->tf_ops.queue_status = &lio_queue_status; | 2031 | .tfc_tpg_param_attrs = lio_target_tpg_param_attrs, |
2007 | fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; | 2032 | .tfc_tpg_np_base_attrs = lio_target_portal_attrs, |
2008 | fabric->tf_ops.aborted_task = &lio_aborted_task; | 2033 | .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs, |
2009 | /* | 2034 | .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, |
2010 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | 2035 | .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, |
2011 | */ | 2036 | .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, |
2012 | fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn; | 2037 | }; |
2013 | fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn; | ||
2014 | fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg; | ||
2015 | fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg; | ||
2016 | fabric->tf_ops.fabric_post_link = NULL; | ||
2017 | fabric->tf_ops.fabric_pre_unlink = NULL; | ||
2018 | fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg; | ||
2019 | fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg; | ||
2020 | fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl; | ||
2021 | fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl; | ||
2022 | /* | ||
2023 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2024 | * sturct config_item_type's | ||
2025 | */ | ||
2026 | fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; | ||
2027 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; | ||
2028 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; | ||
2029 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; | ||
2030 | fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; | ||
2031 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; | ||
2032 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; | ||
2033 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; | ||
2034 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; | ||
2035 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; | ||
2036 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; | ||
2037 | |||
2038 | ret = target_fabric_configfs_register(fabric); | ||
2039 | if (ret < 0) { | ||
2040 | pr_err("target_fabric_configfs_register() for" | ||
2041 | " LIO-Target failed!\n"); | ||
2042 | target_fabric_configfs_free(fabric); | ||
2043 | return ret; | ||
2044 | } | ||
2045 | |||
2046 | lio_target_fabric_configfs = fabric; | ||
2047 | pr_debug("LIO_TARGET[0] - Set fabric ->" | ||
2048 | " lio_target_fabric_configfs\n"); | ||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
2052 | |||
2053 | void iscsi_target_deregister_configfs(void) | ||
2054 | { | ||
2055 | if (!lio_target_fabric_configfs) | ||
2056 | return; | ||
2057 | /* | ||
2058 | * Shutdown discovery sessions and disable discovery TPG | ||
2059 | */ | ||
2060 | if (iscsit_global->discovery_tpg) | ||
2061 | iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); | ||
2062 | |||
2063 | target_fabric_configfs_deregister(lio_target_fabric_configfs); | ||
2064 | lio_target_fabric_configfs = NULL; | ||
2065 | pr_debug("LIO_TARGET[0] - Cleared" | ||
2066 | " lio_target_fabric_configfs\n"); | ||
2067 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h deleted file mode 100644 index 8cd5a63c4edc..000000000000 --- a/drivers/target/iscsi/iscsi_target_configfs.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef ISCSI_TARGET_CONFIGFS_H | ||
2 | #define ISCSI_TARGET_CONFIGFS_H | ||
3 | |||
4 | extern int iscsi_target_register_configfs(void); | ||
5 | extern void iscsi_target_deregister_configfs(void); | ||
6 | |||
7 | #endif /* ISCSI_TARGET_CONFIGFS_H */ | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index bdd8731a4daa..959a14c9dd5d 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <target/iscsi/iscsi_target_core.h> | 24 | #include <target/iscsi/iscsi_target_core.h> |
25 | #include "iscsi_target_seq_pdu_list.h" | 25 | #include "iscsi_target_seq_pdu_list.h" |
26 | #include "iscsi_target_tq.h" | ||
27 | #include "iscsi_target_erl0.h" | 26 | #include "iscsi_target_erl0.h" |
28 | #include "iscsi_target_erl1.h" | 27 | #include "iscsi_target_erl1.h" |
29 | #include "iscsi_target_erl2.h" | 28 | #include "iscsi_target_erl2.h" |
@@ -860,7 +859,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) | |||
860 | } | 859 | } |
861 | spin_unlock_bh(&conn->state_lock); | 860 | spin_unlock_bh(&conn->state_lock); |
862 | 861 | ||
863 | iscsi_thread_set_force_reinstatement(conn); | 862 | if (conn->tx_thread && conn->tx_thread_active) |
863 | send_sig(SIGINT, conn->tx_thread, 1); | ||
864 | if (conn->rx_thread && conn->rx_thread_active) | ||
865 | send_sig(SIGINT, conn->rx_thread, 1); | ||
864 | 866 | ||
865 | sleep: | 867 | sleep: |
866 | wait_for_completion(&conn->conn_wait_rcfr_comp); | 868 | wait_for_completion(&conn->conn_wait_rcfr_comp); |
@@ -885,10 +887,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) | |||
885 | return; | 887 | return; |
886 | } | 888 | } |
887 | 889 | ||
888 | if (iscsi_thread_set_force_reinstatement(conn) < 0) { | 890 | if (conn->tx_thread && conn->tx_thread_active) |
889 | spin_unlock_bh(&conn->state_lock); | 891 | send_sig(SIGINT, conn->tx_thread, 1); |
890 | return; | 892 | if (conn->rx_thread && conn->rx_thread_active) |
891 | } | 893 | send_sig(SIGINT, conn->rx_thread, 1); |
892 | 894 | ||
893 | atomic_set(&conn->connection_reinstatement, 1); | 895 | atomic_set(&conn->connection_reinstatement, 1); |
894 | if (!sleep) { | 896 | if (!sleep) { |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 153fb66ac1b8..8ce94ff744e6 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <target/iscsi/iscsi_target_core.h> | 27 | #include <target/iscsi/iscsi_target_core.h> |
28 | #include <target/iscsi/iscsi_target_stat.h> | 28 | #include <target/iscsi/iscsi_target_stat.h> |
29 | #include "iscsi_target_tq.h" | ||
30 | #include "iscsi_target_device.h" | 29 | #include "iscsi_target_device.h" |
31 | #include "iscsi_target_nego.h" | 30 | #include "iscsi_target_nego.h" |
32 | #include "iscsi_target_erl0.h" | 31 | #include "iscsi_target_erl0.h" |
@@ -699,6 +698,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) | |||
699 | iscsit_start_nopin_timer(conn); | 698 | iscsit_start_nopin_timer(conn); |
700 | } | 699 | } |
701 | 700 | ||
701 | static int iscsit_start_kthreads(struct iscsi_conn *conn) | ||
702 | { | ||
703 | int ret = 0; | ||
704 | |||
705 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
706 | conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, | ||
707 | ISCSIT_BITMAP_BITS, get_order(1)); | ||
708 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
709 | |||
710 | if (conn->bitmap_id < 0) { | ||
711 | pr_err("bitmap_find_free_region() failed for" | ||
712 | " iscsit_start_kthreads()\n"); | ||
713 | return -ENOMEM; | ||
714 | } | ||
715 | |||
716 | conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, | ||
717 | "%s", ISCSI_TX_THREAD_NAME); | ||
718 | if (IS_ERR(conn->tx_thread)) { | ||
719 | pr_err("Unable to start iscsi_target_tx_thread\n"); | ||
720 | ret = PTR_ERR(conn->tx_thread); | ||
721 | goto out_bitmap; | ||
722 | } | ||
723 | conn->tx_thread_active = true; | ||
724 | |||
725 | conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, | ||
726 | "%s", ISCSI_RX_THREAD_NAME); | ||
727 | if (IS_ERR(conn->rx_thread)) { | ||
728 | pr_err("Unable to start iscsi_target_rx_thread\n"); | ||
729 | ret = PTR_ERR(conn->rx_thread); | ||
730 | goto out_tx; | ||
731 | } | ||
732 | conn->rx_thread_active = true; | ||
733 | |||
734 | return 0; | ||
735 | out_tx: | ||
736 | kthread_stop(conn->tx_thread); | ||
737 | conn->tx_thread_active = false; | ||
738 | out_bitmap: | ||
739 | spin_lock(&iscsit_global->ts_bitmap_lock); | ||
740 | bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, | ||
741 | get_order(1)); | ||
742 | spin_unlock(&iscsit_global->ts_bitmap_lock); | ||
743 | return ret; | ||
744 | } | ||
745 | |||
702 | int iscsi_post_login_handler( | 746 | int iscsi_post_login_handler( |
703 | struct iscsi_np *np, | 747 | struct iscsi_np *np, |
704 | struct iscsi_conn *conn, | 748 | struct iscsi_conn *conn, |
@@ -709,7 +753,7 @@ int iscsi_post_login_handler( | |||
709 | struct se_session *se_sess = sess->se_sess; | 753 | struct se_session *se_sess = sess->se_sess; |
710 | struct iscsi_portal_group *tpg = sess->tpg; | 754 | struct iscsi_portal_group *tpg = sess->tpg; |
711 | struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; | 755 | struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; |
712 | struct iscsi_thread_set *ts; | 756 | int rc; |
713 | 757 | ||
714 | iscsit_inc_conn_usage_count(conn); | 758 | iscsit_inc_conn_usage_count(conn); |
715 | 759 | ||
@@ -724,7 +768,6 @@ int iscsi_post_login_handler( | |||
724 | /* | 768 | /* |
725 | * SCSI Initiator -> SCSI Target Port Mapping | 769 | * SCSI Initiator -> SCSI Target Port Mapping |
726 | */ | 770 | */ |
727 | ts = iscsi_get_thread_set(); | ||
728 | if (!zero_tsih) { | 771 | if (!zero_tsih) { |
729 | iscsi_set_session_parameters(sess->sess_ops, | 772 | iscsi_set_session_parameters(sess->sess_ops, |
730 | conn->param_list, 0); | 773 | conn->param_list, 0); |
@@ -751,9 +794,11 @@ int iscsi_post_login_handler( | |||
751 | sess->sess_ops->InitiatorName); | 794 | sess->sess_ops->InitiatorName); |
752 | spin_unlock_bh(&sess->conn_lock); | 795 | spin_unlock_bh(&sess->conn_lock); |
753 | 796 | ||
754 | iscsi_post_login_start_timers(conn); | 797 | rc = iscsit_start_kthreads(conn); |
798 | if (rc) | ||
799 | return rc; | ||
755 | 800 | ||
756 | iscsi_activate_thread_set(conn, ts); | 801 | iscsi_post_login_start_timers(conn); |
757 | /* | 802 | /* |
758 | * Determine CPU mask to ensure connection's RX and TX kthreads | 803 | * Determine CPU mask to ensure connection's RX and TX kthreads |
759 | * are scheduled on the same CPU. | 804 | * are scheduled on the same CPU. |
@@ -810,8 +855,11 @@ int iscsi_post_login_handler( | |||
810 | " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); | 855 | " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); |
811 | spin_unlock_bh(&se_tpg->session_lock); | 856 | spin_unlock_bh(&se_tpg->session_lock); |
812 | 857 | ||
858 | rc = iscsit_start_kthreads(conn); | ||
859 | if (rc) | ||
860 | return rc; | ||
861 | |||
813 | iscsi_post_login_start_timers(conn); | 862 | iscsi_post_login_start_timers(conn); |
814 | iscsi_activate_thread_set(conn, ts); | ||
815 | /* | 863 | /* |
816 | * Determine CPU mask to ensure connection's RX and TX kthreads | 864 | * Determine CPU mask to ensure connection's RX and TX kthreads |
817 | * are scheduled on the same CPU. | 865 | * are scheduled on the same CPU. |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index bdd127c0e3ae..e8a240818353 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -68,10 +68,8 @@ int iscsit_load_discovery_tpg(void) | |||
68 | return -1; | 68 | return -1; |
69 | } | 69 | } |
70 | 70 | ||
71 | ret = core_tpg_register( | 71 | ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg, |
72 | &lio_target_fabric_configfs->tf_ops, | 72 | tpg, TRANSPORT_TPG_TYPE_DISCOVERY); |
73 | NULL, &tpg->tpg_se_tpg, tpg, | ||
74 | TRANSPORT_TPG_TYPE_DISCOVERY); | ||
75 | if (ret < 0) { | 73 | if (ret < 0) { |
76 | kfree(tpg); | 74 | kfree(tpg); |
77 | return -1; | 75 | return -1; |
@@ -228,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) | |||
228 | a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; | 226 | a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; |
229 | a->default_erl = TA_DEFAULT_ERL; | 227 | a->default_erl = TA_DEFAULT_ERL; |
230 | a->t10_pi = TA_DEFAULT_T10_PI; | 228 | a->t10_pi = TA_DEFAULT_T10_PI; |
229 | a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) | 232 | int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) |
@@ -878,3 +877,21 @@ int iscsit_ta_t10_pi( | |||
878 | 877 | ||
879 | return 0; | 878 | return 0; |
880 | } | 879 | } |
880 | |||
881 | int iscsit_ta_fabric_prot_type( | ||
882 | struct iscsi_portal_group *tpg, | ||
883 | u32 prot_type) | ||
884 | { | ||
885 | struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; | ||
886 | |||
887 | if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) { | ||
888 | pr_err("Illegal value for fabric_prot_type: %u\n", prot_type); | ||
889 | return -EINVAL; | ||
890 | } | ||
891 | |||
892 | a->fabric_prot_type = prot_type; | ||
893 | pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n", | ||
894 | tpg->tpgt, prot_type); | ||
895 | |||
896 | return 0; | ||
897 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index e7265337bc43..95ff5bdecd71 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h | |||
@@ -39,5 +39,6 @@ extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); | |||
39 | extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); | 39 | extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); |
40 | extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); | 40 | extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); |
41 | extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); | 41 | extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); |
42 | extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); | ||
42 | 43 | ||
43 | #endif /* ISCSI_TARGET_TPG_H */ | 44 | #endif /* ISCSI_TARGET_TPG_H */ |
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c deleted file mode 100644 index 26aa50996473..000000000000 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ /dev/null | |||
@@ -1,495 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * This file contains the iSCSI Login Thread and Thread Queue functions. | ||
3 | * | ||
4 | * (c) Copyright 2007-2013 Datera, Inc. | ||
5 | * | ||
6 | * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | ******************************************************************************/ | ||
18 | |||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/bitmap.h> | ||
22 | |||
23 | #include <target/iscsi/iscsi_target_core.h> | ||
24 | #include "iscsi_target_tq.h" | ||
25 | #include "iscsi_target.h" | ||
26 | |||
27 | static LIST_HEAD(inactive_ts_list); | ||
28 | static DEFINE_SPINLOCK(inactive_ts_lock); | ||
29 | static DEFINE_SPINLOCK(ts_bitmap_lock); | ||
30 | |||
31 | static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) | ||
32 | { | ||
33 | if (!list_empty(&ts->ts_list)) { | ||
34 | WARN_ON(1); | ||
35 | return; | ||
36 | } | ||
37 | spin_lock(&inactive_ts_lock); | ||
38 | list_add_tail(&ts->ts_list, &inactive_ts_list); | ||
39 | iscsit_global->inactive_ts++; | ||
40 | spin_unlock(&inactive_ts_lock); | ||
41 | } | ||
42 | |||
43 | static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) | ||
44 | { | ||
45 | struct iscsi_thread_set *ts; | ||
46 | |||
47 | spin_lock(&inactive_ts_lock); | ||
48 | if (list_empty(&inactive_ts_list)) { | ||
49 | spin_unlock(&inactive_ts_lock); | ||
50 | return NULL; | ||
51 | } | ||
52 | |||
53 | ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); | ||
54 | |||
55 | list_del_init(&ts->ts_list); | ||
56 | iscsit_global->inactive_ts--; | ||
57 | spin_unlock(&inactive_ts_lock); | ||
58 | |||
59 | return ts; | ||
60 | } | ||
61 | |||
62 | int iscsi_allocate_thread_sets(u32 thread_pair_count) | ||
63 | { | ||
64 | int allocated_thread_pair_count = 0, i, thread_id; | ||
65 | struct iscsi_thread_set *ts = NULL; | ||
66 | |||
67 | for (i = 0; i < thread_pair_count; i++) { | ||
68 | ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL); | ||
69 | if (!ts) { | ||
70 | pr_err("Unable to allocate memory for" | ||
71 | " thread set.\n"); | ||
72 | return allocated_thread_pair_count; | ||
73 | } | ||
74 | /* | ||
75 | * Locate the next available regision in the thread_set_bitmap | ||
76 | */ | ||
77 | spin_lock(&ts_bitmap_lock); | ||
78 | thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap, | ||
79 | iscsit_global->ts_bitmap_count, get_order(1)); | ||
80 | spin_unlock(&ts_bitmap_lock); | ||
81 | if (thread_id < 0) { | ||
82 | pr_err("bitmap_find_free_region() failed for" | ||
83 | " thread_set_bitmap\n"); | ||
84 | kfree(ts); | ||
85 | return allocated_thread_pair_count; | ||
86 | } | ||
87 | |||
88 | ts->thread_id = thread_id; | ||
89 | ts->status = ISCSI_THREAD_SET_FREE; | ||
90 | INIT_LIST_HEAD(&ts->ts_list); | ||
91 | spin_lock_init(&ts->ts_state_lock); | ||
92 | init_completion(&ts->rx_restart_comp); | ||
93 | init_completion(&ts->tx_restart_comp); | ||
94 | init_completion(&ts->rx_start_comp); | ||
95 | init_completion(&ts->tx_start_comp); | ||
96 | sema_init(&ts->ts_activate_sem, 0); | ||
97 | |||
98 | ts->create_threads = 1; | ||
99 | ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", | ||
100 | ISCSI_TX_THREAD_NAME); | ||
101 | if (IS_ERR(ts->tx_thread)) { | ||
102 | dump_stack(); | ||
103 | pr_err("Unable to start iscsi_target_tx_thread\n"); | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s", | ||
108 | ISCSI_RX_THREAD_NAME); | ||
109 | if (IS_ERR(ts->rx_thread)) { | ||
110 | kthread_stop(ts->tx_thread); | ||
111 | pr_err("Unable to start iscsi_target_rx_thread\n"); | ||
112 | break; | ||
113 | } | ||
114 | ts->create_threads = 0; | ||
115 | |||
116 | iscsi_add_ts_to_inactive_list(ts); | ||
117 | allocated_thread_pair_count++; | ||
118 | } | ||
119 | |||
120 | pr_debug("Spawned %d thread set(s) (%d total threads).\n", | ||
121 | allocated_thread_pair_count, allocated_thread_pair_count * 2); | ||
122 | return allocated_thread_pair_count; | ||
123 | } | ||
124 | |||
125 | static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts) | ||
126 | { | ||
127 | spin_lock_bh(&ts->ts_state_lock); | ||
128 | ts->status = ISCSI_THREAD_SET_DIE; | ||
129 | |||
130 | if (ts->rx_thread) { | ||
131 | complete(&ts->rx_start_comp); | ||
132 | spin_unlock_bh(&ts->ts_state_lock); | ||
133 | kthread_stop(ts->rx_thread); | ||
134 | spin_lock_bh(&ts->ts_state_lock); | ||
135 | } | ||
136 | if (ts->tx_thread) { | ||
137 | complete(&ts->tx_start_comp); | ||
138 | spin_unlock_bh(&ts->ts_state_lock); | ||
139 | kthread_stop(ts->tx_thread); | ||
140 | spin_lock_bh(&ts->ts_state_lock); | ||
141 | } | ||
142 | spin_unlock_bh(&ts->ts_state_lock); | ||
143 | /* | ||
144 | * Release this thread_id in the thread_set_bitmap | ||
145 | */ | ||
146 | spin_lock(&ts_bitmap_lock); | ||
147 | bitmap_release_region(iscsit_global->ts_bitmap, | ||
148 | ts->thread_id, get_order(1)); | ||
149 | spin_unlock(&ts_bitmap_lock); | ||
150 | |||
151 | kfree(ts); | ||
152 | } | ||
153 | |||
154 | void iscsi_deallocate_thread_sets(void) | ||
155 | { | ||
156 | struct iscsi_thread_set *ts = NULL; | ||
157 | u32 released_count = 0; | ||
158 | |||
159 | while ((ts = iscsi_get_ts_from_inactive_list())) { | ||
160 | |||
161 | iscsi_deallocate_thread_one(ts); | ||
162 | released_count++; | ||
163 | } | ||
164 | |||
165 | if (released_count) | ||
166 | pr_debug("Stopped %d thread set(s) (%d total threads)." | ||
167 | "\n", released_count, released_count * 2); | ||
168 | } | ||
169 | |||
170 | static void iscsi_deallocate_extra_thread_sets(void) | ||
171 | { | ||
172 | u32 orig_count, released_count = 0; | ||
173 | struct iscsi_thread_set *ts = NULL; | ||
174 | |||
175 | orig_count = TARGET_THREAD_SET_COUNT; | ||
176 | |||
177 | while ((iscsit_global->inactive_ts + 1) > orig_count) { | ||
178 | ts = iscsi_get_ts_from_inactive_list(); | ||
179 | if (!ts) | ||
180 | break; | ||
181 | |||
182 | iscsi_deallocate_thread_one(ts); | ||
183 | released_count++; | ||
184 | } | ||
185 | |||
186 | if (released_count) | ||
187 | pr_debug("Stopped %d thread set(s) (%d total threads)." | ||
188 | "\n", released_count, released_count * 2); | ||
189 | } | ||
190 | |||
191 | void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) | ||
192 | { | ||
193 | spin_lock_bh(&ts->ts_state_lock); | ||
194 | conn->thread_set = ts; | ||
195 | ts->conn = conn; | ||
196 | ts->status = ISCSI_THREAD_SET_ACTIVE; | ||
197 | spin_unlock_bh(&ts->ts_state_lock); | ||
198 | |||
199 | complete(&ts->rx_start_comp); | ||
200 | complete(&ts->tx_start_comp); | ||
201 | |||
202 | down(&ts->ts_activate_sem); | ||
203 | } | ||
204 | |||
205 | struct iscsi_thread_set *iscsi_get_thread_set(void) | ||
206 | { | ||
207 | struct iscsi_thread_set *ts; | ||
208 | |||
209 | get_set: | ||
210 | ts = iscsi_get_ts_from_inactive_list(); | ||
211 | if (!ts) { | ||
212 | iscsi_allocate_thread_sets(1); | ||
213 | goto get_set; | ||
214 | } | ||
215 | |||
216 | ts->delay_inactive = 1; | ||
217 | ts->signal_sent = 0; | ||
218 | ts->thread_count = 2; | ||
219 | init_completion(&ts->rx_restart_comp); | ||
220 | init_completion(&ts->tx_restart_comp); | ||
221 | sema_init(&ts->ts_activate_sem, 0); | ||
222 | |||
223 | return ts; | ||
224 | } | ||
225 | |||
226 | void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear) | ||
227 | { | ||
228 | struct iscsi_thread_set *ts = NULL; | ||
229 | |||
230 | if (!conn->thread_set) { | ||
231 | pr_err("struct iscsi_conn->thread_set is NULL\n"); | ||
232 | return; | ||
233 | } | ||
234 | ts = conn->thread_set; | ||
235 | |||
236 | spin_lock_bh(&ts->ts_state_lock); | ||
237 | ts->thread_clear &= ~thread_clear; | ||
238 | |||
239 | if ((thread_clear & ISCSI_CLEAR_RX_THREAD) && | ||
240 | (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD)) | ||
241 | complete(&ts->rx_restart_comp); | ||
242 | else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) && | ||
243 | (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD)) | ||
244 | complete(&ts->tx_restart_comp); | ||
245 | spin_unlock_bh(&ts->ts_state_lock); | ||
246 | } | ||
247 | |||
248 | void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent) | ||
249 | { | ||
250 | struct iscsi_thread_set *ts = NULL; | ||
251 | |||
252 | if (!conn->thread_set) { | ||
253 | pr_err("struct iscsi_conn->thread_set is NULL\n"); | ||
254 | return; | ||
255 | } | ||
256 | ts = conn->thread_set; | ||
257 | |||
258 | spin_lock_bh(&ts->ts_state_lock); | ||
259 | ts->signal_sent |= signal_sent; | ||
260 | spin_unlock_bh(&ts->ts_state_lock); | ||
261 | } | ||
262 | |||
263 | int iscsi_release_thread_set(struct iscsi_conn *conn) | ||
264 | { | ||
265 | int thread_called = 0; | ||
266 | struct iscsi_thread_set *ts = NULL; | ||
267 | |||
268 | if (!conn || !conn->thread_set) { | ||
269 | pr_err("connection or thread set pointer is NULL\n"); | ||
270 | BUG(); | ||
271 | } | ||
272 | ts = conn->thread_set; | ||
273 | |||
274 | spin_lock_bh(&ts->ts_state_lock); | ||
275 | ts->status = ISCSI_THREAD_SET_RESET; | ||
276 | |||
277 | if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME, | ||
278 | strlen(ISCSI_RX_THREAD_NAME))) | ||
279 | thread_called = ISCSI_RX_THREAD; | ||
280 | else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME, | ||
281 | strlen(ISCSI_TX_THREAD_NAME))) | ||
282 | thread_called = ISCSI_TX_THREAD; | ||
283 | |||
284 | if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) && | ||
285 | (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) { | ||
286 | |||
287 | if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) { | ||
288 | send_sig(SIGINT, ts->rx_thread, 1); | ||
289 | ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; | ||
290 | } | ||
291 | ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD; | ||
292 | spin_unlock_bh(&ts->ts_state_lock); | ||
293 | wait_for_completion(&ts->rx_restart_comp); | ||
294 | spin_lock_bh(&ts->ts_state_lock); | ||
295 | ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD; | ||
296 | } | ||
297 | if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) && | ||
298 | (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) { | ||
299 | |||
300 | if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) { | ||
301 | send_sig(SIGINT, ts->tx_thread, 1); | ||
302 | ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; | ||
303 | } | ||
304 | ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD; | ||
305 | spin_unlock_bh(&ts->ts_state_lock); | ||
306 | wait_for_completion(&ts->tx_restart_comp); | ||
307 | spin_lock_bh(&ts->ts_state_lock); | ||
308 | ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD; | ||
309 | } | ||
310 | |||
311 | ts->conn = NULL; | ||
312 | ts->status = ISCSI_THREAD_SET_FREE; | ||
313 | spin_unlock_bh(&ts->ts_state_lock); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn) | ||
319 | { | ||
320 | struct iscsi_thread_set *ts; | ||
321 | |||
322 | if (!conn->thread_set) | ||
323 | return -1; | ||
324 | ts = conn->thread_set; | ||
325 | |||
326 | spin_lock_bh(&ts->ts_state_lock); | ||
327 | if (ts->status != ISCSI_THREAD_SET_ACTIVE) { | ||
328 | spin_unlock_bh(&ts->ts_state_lock); | ||
329 | return -1; | ||
330 | } | ||
331 | |||
332 | if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) { | ||
333 | send_sig(SIGINT, ts->tx_thread, 1); | ||
334 | ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; | ||
335 | } | ||
336 | if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) { | ||
337 | send_sig(SIGINT, ts->rx_thread, 1); | ||
338 | ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; | ||
339 | } | ||
340 | spin_unlock_bh(&ts->ts_state_lock); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static void iscsi_check_to_add_additional_sets(void) | ||
346 | { | ||
347 | int thread_sets_add; | ||
348 | |||
349 | spin_lock(&inactive_ts_lock); | ||
350 | thread_sets_add = iscsit_global->inactive_ts; | ||
351 | spin_unlock(&inactive_ts_lock); | ||
352 | if (thread_sets_add == 1) | ||
353 | iscsi_allocate_thread_sets(1); | ||
354 | } | ||
355 | |||
356 | static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) | ||
357 | { | ||
358 | spin_lock_bh(&ts->ts_state_lock); | ||
359 | if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() || | ||
360 | signal_pending(current)) { | ||
361 | spin_unlock_bh(&ts->ts_state_lock); | ||
362 | return -1; | ||
363 | } | ||
364 | spin_unlock_bh(&ts->ts_state_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) | ||
370 | { | ||
371 | int ret; | ||
372 | |||
373 | spin_lock_bh(&ts->ts_state_lock); | ||
374 | if (ts->create_threads) { | ||
375 | spin_unlock_bh(&ts->ts_state_lock); | ||
376 | goto sleep; | ||
377 | } | ||
378 | |||
379 | if (ts->status != ISCSI_THREAD_SET_DIE) | ||
380 | flush_signals(current); | ||
381 | |||
382 | if (ts->delay_inactive && (--ts->thread_count == 0)) { | ||
383 | spin_unlock_bh(&ts->ts_state_lock); | ||
384 | |||
385 | if (!iscsit_global->in_shutdown) | ||
386 | iscsi_deallocate_extra_thread_sets(); | ||
387 | |||
388 | iscsi_add_ts_to_inactive_list(ts); | ||
389 | spin_lock_bh(&ts->ts_state_lock); | ||
390 | } | ||
391 | |||
392 | if ((ts->status == ISCSI_THREAD_SET_RESET) && | ||
393 | (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) | ||
394 | complete(&ts->rx_restart_comp); | ||
395 | |||
396 | ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD; | ||
397 | spin_unlock_bh(&ts->ts_state_lock); | ||
398 | sleep: | ||
399 | ret = wait_for_completion_interruptible(&ts->rx_start_comp); | ||
400 | if (ret != 0) | ||
401 | return NULL; | ||
402 | |||
403 | if (iscsi_signal_thread_pre_handler(ts) < 0) | ||
404 | return NULL; | ||
405 | |||
406 | iscsi_check_to_add_additional_sets(); | ||
407 | |||
408 | spin_lock_bh(&ts->ts_state_lock); | ||
409 | if (!ts->conn) { | ||
410 | pr_err("struct iscsi_thread_set->conn is NULL for" | ||
411 | " RX thread_id: %s/%d\n", current->comm, current->pid); | ||
412 | spin_unlock_bh(&ts->ts_state_lock); | ||
413 | return NULL; | ||
414 | } | ||
415 | ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; | ||
416 | spin_unlock_bh(&ts->ts_state_lock); | ||
417 | |||
418 | up(&ts->ts_activate_sem); | ||
419 | |||
420 | return ts->conn; | ||
421 | } | ||
422 | |||
423 | struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) | ||
424 | { | ||
425 | int ret; | ||
426 | |||
427 | spin_lock_bh(&ts->ts_state_lock); | ||
428 | if (ts->create_threads) { | ||
429 | spin_unlock_bh(&ts->ts_state_lock); | ||
430 | goto sleep; | ||
431 | } | ||
432 | |||
433 | if (ts->status != ISCSI_THREAD_SET_DIE) | ||
434 | flush_signals(current); | ||
435 | |||
436 | if (ts->delay_inactive && (--ts->thread_count == 0)) { | ||
437 | spin_unlock_bh(&ts->ts_state_lock); | ||
438 | |||
439 | if (!iscsit_global->in_shutdown) | ||
440 | iscsi_deallocate_extra_thread_sets(); | ||
441 | |||
442 | iscsi_add_ts_to_inactive_list(ts); | ||
443 | spin_lock_bh(&ts->ts_state_lock); | ||
444 | } | ||
445 | if ((ts->status == ISCSI_THREAD_SET_RESET) && | ||
446 | (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) | ||
447 | complete(&ts->tx_restart_comp); | ||
448 | |||
449 | ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD; | ||
450 | spin_unlock_bh(&ts->ts_state_lock); | ||
451 | sleep: | ||
452 | ret = wait_for_completion_interruptible(&ts->tx_start_comp); | ||
453 | if (ret != 0) | ||
454 | return NULL; | ||
455 | |||
456 | if (iscsi_signal_thread_pre_handler(ts) < 0) | ||
457 | return NULL; | ||
458 | |||
459 | iscsi_check_to_add_additional_sets(); | ||
460 | |||
461 | spin_lock_bh(&ts->ts_state_lock); | ||
462 | if (!ts->conn) { | ||
463 | pr_err("struct iscsi_thread_set->conn is NULL for" | ||
464 | " TX thread_id: %s/%d\n", current->comm, current->pid); | ||
465 | spin_unlock_bh(&ts->ts_state_lock); | ||
466 | return NULL; | ||
467 | } | ||
468 | ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; | ||
469 | spin_unlock_bh(&ts->ts_state_lock); | ||
470 | |||
471 | up(&ts->ts_activate_sem); | ||
472 | |||
473 | return ts->conn; | ||
474 | } | ||
475 | |||
476 | int iscsi_thread_set_init(void) | ||
477 | { | ||
478 | int size; | ||
479 | |||
480 | iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS; | ||
481 | |||
482 | size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long); | ||
483 | iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL); | ||
484 | if (!iscsit_global->ts_bitmap) { | ||
485 | pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); | ||
486 | return -ENOMEM; | ||
487 | } | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | void iscsi_thread_set_free(void) | ||
493 | { | ||
494 | kfree(iscsit_global->ts_bitmap); | ||
495 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h deleted file mode 100644 index cc1eede5ab3a..000000000000 --- a/drivers/target/iscsi/iscsi_target_tq.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #ifndef ISCSI_THREAD_QUEUE_H | ||
2 | #define ISCSI_THREAD_QUEUE_H | ||
3 | |||
4 | /* | ||
5 | * Defines for thread sets. | ||
6 | */ | ||
7 | extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *); | ||
8 | extern int iscsi_allocate_thread_sets(u32); | ||
9 | extern void iscsi_deallocate_thread_sets(void); | ||
10 | extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *); | ||
11 | extern struct iscsi_thread_set *iscsi_get_thread_set(void); | ||
12 | extern void iscsi_set_thread_clear(struct iscsi_conn *, u8); | ||
13 | extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8); | ||
14 | extern int iscsi_release_thread_set(struct iscsi_conn *); | ||
15 | extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *); | ||
16 | extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *); | ||
17 | extern int iscsi_thread_set_init(void); | ||
18 | extern void iscsi_thread_set_free(void); | ||
19 | |||
20 | extern int iscsi_target_tx_thread(void *); | ||
21 | extern int iscsi_target_rx_thread(void *); | ||
22 | |||
23 | #define TARGET_THREAD_SET_COUNT 4 | ||
24 | |||
25 | #define ISCSI_RX_THREAD 1 | ||
26 | #define ISCSI_TX_THREAD 2 | ||
27 | #define ISCSI_RX_THREAD_NAME "iscsi_trx" | ||
28 | #define ISCSI_TX_THREAD_NAME "iscsi_ttx" | ||
29 | #define ISCSI_BLOCK_RX_THREAD 0x1 | ||
30 | #define ISCSI_BLOCK_TX_THREAD 0x2 | ||
31 | #define ISCSI_CLEAR_RX_THREAD 0x1 | ||
32 | #define ISCSI_CLEAR_TX_THREAD 0x2 | ||
33 | #define ISCSI_SIGNAL_RX_THREAD 0x1 | ||
34 | #define ISCSI_SIGNAL_TX_THREAD 0x2 | ||
35 | |||
36 | /* struct iscsi_thread_set->status */ | ||
37 | #define ISCSI_THREAD_SET_FREE 1 | ||
38 | #define ISCSI_THREAD_SET_ACTIVE 2 | ||
39 | #define ISCSI_THREAD_SET_DIE 3 | ||
40 | #define ISCSI_THREAD_SET_RESET 4 | ||
41 | #define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5 | ||
42 | |||
43 | /* By default allow a maximum of 32K iSCSI connections */ | ||
44 | #define ISCSI_TS_BITMAP_BITS 32768 | ||
45 | |||
46 | struct iscsi_thread_set { | ||
47 | /* flags used for blocking and restarting sets */ | ||
48 | int blocked_threads; | ||
49 | /* flag for creating threads */ | ||
50 | int create_threads; | ||
51 | /* flag for delaying readding to inactive list */ | ||
52 | int delay_inactive; | ||
53 | /* status for thread set */ | ||
54 | int status; | ||
55 | /* which threads have had signals sent */ | ||
56 | int signal_sent; | ||
57 | /* flag for which threads exited first */ | ||
58 | int thread_clear; | ||
59 | /* Active threads in the thread set */ | ||
60 | int thread_count; | ||
61 | /* Unique thread ID */ | ||
62 | u32 thread_id; | ||
63 | /* pointer to connection if set is active */ | ||
64 | struct iscsi_conn *conn; | ||
65 | /* used for controlling ts state accesses */ | ||
66 | spinlock_t ts_state_lock; | ||
67 | /* used for restarting thread queue */ | ||
68 | struct completion rx_restart_comp; | ||
69 | /* used for restarting thread queue */ | ||
70 | struct completion tx_restart_comp; | ||
71 | /* used for normal unused blocking */ | ||
72 | struct completion rx_start_comp; | ||
73 | /* used for normal unused blocking */ | ||
74 | struct completion tx_start_comp; | ||
75 | /* OS descriptor for rx thread */ | ||
76 | struct task_struct *rx_thread; | ||
77 | /* OS descriptor for tx thread */ | ||
78 | struct task_struct *tx_thread; | ||
79 | /* struct iscsi_thread_set in list list head*/ | ||
80 | struct list_head ts_list; | ||
81 | struct semaphore ts_activate_sem; | ||
82 | }; | ||
83 | |||
84 | #endif /*** ISCSI_THREAD_QUEUE_H ***/ | ||
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 390df8ed72b2..b18edda3e8af 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "iscsi_target_erl1.h" | 33 | #include "iscsi_target_erl1.h" |
34 | #include "iscsi_target_erl2.h" | 34 | #include "iscsi_target_erl2.h" |
35 | #include "iscsi_target_tpg.h" | 35 | #include "iscsi_target_tpg.h" |
36 | #include "iscsi_target_tq.h" | ||
37 | #include "iscsi_target_util.h" | 36 | #include "iscsi_target_util.h" |
38 | #include "iscsi_target.h" | 37 | #include "iscsi_target.h" |
39 | 38 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c36bd7c29136..51f0c895c6a5 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -41,8 +41,7 @@ | |||
41 | 41 | ||
42 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) | 42 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) |
43 | 43 | ||
44 | /* Local pointer to allocated TCM configfs fabric module */ | 44 | static const struct target_core_fabric_ops loop_ops; |
45 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; | ||
46 | 45 | ||
47 | static struct workqueue_struct *tcm_loop_workqueue; | 46 | static struct workqueue_struct *tcm_loop_workqueue; |
48 | static struct kmem_cache *tcm_loop_cmd_cache; | 47 | static struct kmem_cache *tcm_loop_cmd_cache; |
@@ -108,7 +107,7 @@ static struct device_driver tcm_loop_driverfs = { | |||
108 | /* | 107 | /* |
109 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below | 108 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below |
110 | */ | 109 | */ |
111 | struct device *tcm_loop_primary; | 110 | static struct device *tcm_loop_primary; |
112 | 111 | ||
113 | static void tcm_loop_submission_work(struct work_struct *work) | 112 | static void tcm_loop_submission_work(struct work_struct *work) |
114 | { | 113 | { |
@@ -697,6 +696,13 @@ static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg | |||
697 | return 0; | 696 | return 0; |
698 | } | 697 | } |
699 | 698 | ||
699 | static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) | ||
700 | { | ||
701 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
702 | tl_se_tpg); | ||
703 | return tl_tpg->tl_fabric_prot_type; | ||
704 | } | ||
705 | |||
700 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | 706 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( |
701 | struct se_portal_group *se_tpg) | 707 | struct se_portal_group *se_tpg) |
702 | { | 708 | { |
@@ -912,6 +918,46 @@ static void tcm_loop_port_unlink( | |||
912 | 918 | ||
913 | /* End items for tcm_loop_port_cit */ | 919 | /* End items for tcm_loop_port_cit */ |
914 | 920 | ||
921 | static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type( | ||
922 | struct se_portal_group *se_tpg, | ||
923 | char *page) | ||
924 | { | ||
925 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
926 | tl_se_tpg); | ||
927 | |||
928 | return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); | ||
929 | } | ||
930 | |||
931 | static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type( | ||
932 | struct se_portal_group *se_tpg, | ||
933 | const char *page, | ||
934 | size_t count) | ||
935 | { | ||
936 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, | ||
937 | tl_se_tpg); | ||
938 | unsigned long val; | ||
939 | int ret = kstrtoul(page, 0, &val); | ||
940 | |||
941 | if (ret) { | ||
942 | pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); | ||
943 | return ret; | ||
944 | } | ||
945 | if (val != 0 && val != 1 && val != 3) { | ||
946 | pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); | ||
947 | return -EINVAL; | ||
948 | } | ||
949 | tl_tpg->tl_fabric_prot_type = val; | ||
950 | |||
951 | return count; | ||
952 | } | ||
953 | |||
954 | TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR); | ||
955 | |||
956 | static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { | ||
957 | &tcm_loop_tpg_attrib_fabric_prot_type.attr, | ||
958 | NULL, | ||
959 | }; | ||
960 | |||
915 | /* Start items for tcm_loop_nexus_cit */ | 961 | /* Start items for tcm_loop_nexus_cit */ |
916 | 962 | ||
917 | static int tcm_loop_make_nexus( | 963 | static int tcm_loop_make_nexus( |
@@ -937,7 +983,8 @@ static int tcm_loop_make_nexus( | |||
937 | /* | 983 | /* |
938 | * Initialize the struct se_session pointer | 984 | * Initialize the struct se_session pointer |
939 | */ | 985 | */ |
940 | tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); | 986 | tl_nexus->se_sess = transport_init_session( |
987 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); | ||
941 | if (IS_ERR(tl_nexus->se_sess)) { | 988 | if (IS_ERR(tl_nexus->se_sess)) { |
942 | ret = PTR_ERR(tl_nexus->se_sess); | 989 | ret = PTR_ERR(tl_nexus->se_sess); |
943 | goto out; | 990 | goto out; |
@@ -1165,21 +1212,19 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1165 | struct tcm_loop_hba *tl_hba = container_of(wwn, | 1212 | struct tcm_loop_hba *tl_hba = container_of(wwn, |
1166 | struct tcm_loop_hba, tl_hba_wwn); | 1213 | struct tcm_loop_hba, tl_hba_wwn); |
1167 | struct tcm_loop_tpg *tl_tpg; | 1214 | struct tcm_loop_tpg *tl_tpg; |
1168 | char *tpgt_str, *end_ptr; | ||
1169 | int ret; | 1215 | int ret; |
1170 | unsigned short int tpgt; | 1216 | unsigned long tpgt; |
1171 | 1217 | ||
1172 | tpgt_str = strstr(name, "tpgt_"); | 1218 | if (strstr(name, "tpgt_") != name) { |
1173 | if (!tpgt_str) { | ||
1174 | pr_err("Unable to locate \"tpgt_#\" directory" | 1219 | pr_err("Unable to locate \"tpgt_#\" directory" |
1175 | " group\n"); | 1220 | " group\n"); |
1176 | return ERR_PTR(-EINVAL); | 1221 | return ERR_PTR(-EINVAL); |
1177 | } | 1222 | } |
1178 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ | 1223 | if (kstrtoul(name+5, 10, &tpgt)) |
1179 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); | 1224 | return ERR_PTR(-EINVAL); |
1180 | 1225 | ||
1181 | if (tpgt >= TL_TPGS_PER_HBA) { | 1226 | if (tpgt >= TL_TPGS_PER_HBA) { |
1182 | pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" | 1227 | pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" |
1183 | " %u\n", tpgt, TL_TPGS_PER_HBA); | 1228 | " %u\n", tpgt, TL_TPGS_PER_HBA); |
1184 | return ERR_PTR(-EINVAL); | 1229 | return ERR_PTR(-EINVAL); |
1185 | } | 1230 | } |
@@ -1189,14 +1234,13 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1189 | /* | 1234 | /* |
1190 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | 1235 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint |
1191 | */ | 1236 | */ |
1192 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, | 1237 | ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, |
1193 | wwn, &tl_tpg->tl_se_tpg, tl_tpg, | ||
1194 | TRANSPORT_TPG_TYPE_NORMAL); | 1238 | TRANSPORT_TPG_TYPE_NORMAL); |
1195 | if (ret < 0) | 1239 | if (ret < 0) |
1196 | return ERR_PTR(-ENOMEM); | 1240 | return ERR_PTR(-ENOMEM); |
1197 | 1241 | ||
1198 | pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" | 1242 | pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" |
1199 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | 1243 | " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), |
1200 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | 1244 | config_item_name(&wwn->wwn_group.cg_item), tpgt); |
1201 | 1245 | ||
1202 | return &tl_tpg->tl_se_tpg; | 1246 | return &tl_tpg->tl_se_tpg; |
@@ -1338,127 +1382,51 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = { | |||
1338 | 1382 | ||
1339 | /* End items for tcm_loop_cit */ | 1383 | /* End items for tcm_loop_cit */ |
1340 | 1384 | ||
1341 | static int tcm_loop_register_configfs(void) | 1385 | static const struct target_core_fabric_ops loop_ops = { |
1342 | { | 1386 | .module = THIS_MODULE, |
1343 | struct target_fabric_configfs *fabric; | 1387 | .name = "loopback", |
1344 | int ret; | 1388 | .get_fabric_name = tcm_loop_get_fabric_name, |
1345 | /* | 1389 | .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident, |
1346 | * Set the TCM Loop HBA counter to zero | 1390 | .tpg_get_wwn = tcm_loop_get_endpoint_wwn, |
1347 | */ | 1391 | .tpg_get_tag = tcm_loop_get_tag, |
1348 | tcm_loop_hba_no_cnt = 0; | 1392 | .tpg_get_default_depth = tcm_loop_get_default_depth, |
1349 | /* | 1393 | .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id, |
1350 | * Register the top level struct config_item_type with TCM core | 1394 | .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len, |
1351 | */ | 1395 | .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id, |
1352 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | 1396 | .tpg_check_demo_mode = tcm_loop_check_demo_mode, |
1353 | if (IS_ERR(fabric)) { | 1397 | .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, |
1354 | pr_err("tcm_loop_register_configfs() failed!\n"); | 1398 | .tpg_check_demo_mode_write_protect = |
1355 | return PTR_ERR(fabric); | 1399 | tcm_loop_check_demo_mode_write_protect, |
1356 | } | 1400 | .tpg_check_prod_mode_write_protect = |
1357 | /* | 1401 | tcm_loop_check_prod_mode_write_protect, |
1358 | * Setup the fabric API of function pointers used by target_core_mod | 1402 | .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, |
1359 | */ | 1403 | .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl, |
1360 | fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; | 1404 | .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl, |
1361 | fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; | 1405 | .tpg_get_inst_index = tcm_loop_get_inst_index, |
1362 | fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; | 1406 | .check_stop_free = tcm_loop_check_stop_free, |
1363 | fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; | 1407 | .release_cmd = tcm_loop_release_cmd, |
1364 | fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; | 1408 | .shutdown_session = tcm_loop_shutdown_session, |
1365 | fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; | 1409 | .close_session = tcm_loop_close_session, |
1366 | fabric->tf_ops.tpg_get_pr_transport_id_len = | 1410 | .sess_get_index = tcm_loop_sess_get_index, |
1367 | &tcm_loop_get_pr_transport_id_len; | 1411 | .write_pending = tcm_loop_write_pending, |
1368 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | 1412 | .write_pending_status = tcm_loop_write_pending_status, |
1369 | &tcm_loop_parse_pr_out_transport_id; | 1413 | .set_default_node_attributes = tcm_loop_set_default_node_attributes, |
1370 | fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; | 1414 | .get_task_tag = tcm_loop_get_task_tag, |
1371 | fabric->tf_ops.tpg_check_demo_mode_cache = | 1415 | .get_cmd_state = tcm_loop_get_cmd_state, |
1372 | &tcm_loop_check_demo_mode_cache; | 1416 | .queue_data_in = tcm_loop_queue_data_in, |
1373 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | 1417 | .queue_status = tcm_loop_queue_status, |
1374 | &tcm_loop_check_demo_mode_write_protect; | 1418 | .queue_tm_rsp = tcm_loop_queue_tm_rsp, |
1375 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | 1419 | .aborted_task = tcm_loop_aborted_task, |
1376 | &tcm_loop_check_prod_mode_write_protect; | 1420 | .fabric_make_wwn = tcm_loop_make_scsi_hba, |
1377 | /* | 1421 | .fabric_drop_wwn = tcm_loop_drop_scsi_hba, |
1378 | * The TCM loopback fabric module runs in demo-mode to a local | 1422 | .fabric_make_tpg = tcm_loop_make_naa_tpg, |
1379 | * virtual SCSI device, so fabric dependent initator ACLs are | 1423 | .fabric_drop_tpg = tcm_loop_drop_naa_tpg, |
1380 | * not required. | 1424 | .fabric_post_link = tcm_loop_port_link, |
1381 | */ | 1425 | .fabric_pre_unlink = tcm_loop_port_unlink, |
1382 | fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; | 1426 | .tfc_wwn_attrs = tcm_loop_wwn_attrs, |
1383 | fabric->tf_ops.tpg_release_fabric_acl = | 1427 | .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, |
1384 | &tcm_loop_tpg_release_fabric_acl; | 1428 | .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, |
1385 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; | 1429 | }; |
1386 | /* | ||
1387 | * Used for setting up remaining TCM resources in process context | ||
1388 | */ | ||
1389 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | ||
1390 | fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; | ||
1391 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | ||
1392 | fabric->tf_ops.close_session = &tcm_loop_close_session; | ||
1393 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; | ||
1394 | fabric->tf_ops.sess_get_initiator_sid = NULL; | ||
1395 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; | ||
1396 | fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; | ||
1397 | /* | ||
1398 | * Not used for TCM loopback | ||
1399 | */ | ||
1400 | fabric->tf_ops.set_default_node_attributes = | ||
1401 | &tcm_loop_set_default_node_attributes; | ||
1402 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; | ||
1403 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; | ||
1404 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; | ||
1405 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; | ||
1406 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | ||
1407 | fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; | ||
1408 | |||
1409 | /* | ||
1410 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | ||
1411 | */ | ||
1412 | fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; | ||
1413 | fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; | ||
1414 | fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; | ||
1415 | fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; | ||
1416 | /* | ||
1417 | * fabric_post_link() and fabric_pre_unlink() are used for | ||
1418 | * registration and release of TCM Loop Virtual SCSI LUNs. | ||
1419 | */ | ||
1420 | fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; | ||
1421 | fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; | ||
1422 | fabric->tf_ops.fabric_make_np = NULL; | ||
1423 | fabric->tf_ops.fabric_drop_np = NULL; | ||
1424 | /* | ||
1425 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
1426 | */ | ||
1427 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; | ||
1428 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; | ||
1429 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1430 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
1431 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1432 | /* | ||
1433 | * Once fabric->tf_ops has been setup, now register the fabric for | ||
1434 | * use within TCM | ||
1435 | */ | ||
1436 | ret = target_fabric_configfs_register(fabric); | ||
1437 | if (ret < 0) { | ||
1438 | pr_err("target_fabric_configfs_register() for" | ||
1439 | " TCM_Loop failed!\n"); | ||
1440 | target_fabric_configfs_free(fabric); | ||
1441 | return -1; | ||
1442 | } | ||
1443 | /* | ||
1444 | * Setup our local pointer to *fabric. | ||
1445 | */ | ||
1446 | tcm_loop_fabric_configfs = fabric; | ||
1447 | pr_debug("TCM_LOOP[0] - Set fabric ->" | ||
1448 | " tcm_loop_fabric_configfs\n"); | ||
1449 | return 0; | ||
1450 | } | ||
1451 | |||
1452 | static void tcm_loop_deregister_configfs(void) | ||
1453 | { | ||
1454 | if (!tcm_loop_fabric_configfs) | ||
1455 | return; | ||
1456 | |||
1457 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); | ||
1458 | tcm_loop_fabric_configfs = NULL; | ||
1459 | pr_debug("TCM_LOOP[0] - Cleared" | ||
1460 | " tcm_loop_fabric_configfs\n"); | ||
1461 | } | ||
1462 | 1430 | ||
1463 | static int __init tcm_loop_fabric_init(void) | 1431 | static int __init tcm_loop_fabric_init(void) |
1464 | { | 1432 | { |
@@ -1482,7 +1450,7 @@ static int __init tcm_loop_fabric_init(void) | |||
1482 | if (ret) | 1450 | if (ret) |
1483 | goto out_destroy_cache; | 1451 | goto out_destroy_cache; |
1484 | 1452 | ||
1485 | ret = tcm_loop_register_configfs(); | 1453 | ret = target_register_template(&loop_ops); |
1486 | if (ret) | 1454 | if (ret) |
1487 | goto out_release_core_bus; | 1455 | goto out_release_core_bus; |
1488 | 1456 | ||
@@ -1500,7 +1468,7 @@ out: | |||
1500 | 1468 | ||
1501 | static void __exit tcm_loop_fabric_exit(void) | 1469 | static void __exit tcm_loop_fabric_exit(void) |
1502 | { | 1470 | { |
1503 | tcm_loop_deregister_configfs(); | 1471 | target_unregister_template(&loop_ops); |
1504 | tcm_loop_release_core_bus(); | 1472 | tcm_loop_release_core_bus(); |
1505 | kmem_cache_destroy(tcm_loop_cmd_cache); | 1473 | kmem_cache_destroy(tcm_loop_cmd_cache); |
1506 | destroy_workqueue(tcm_loop_workqueue); | 1474 | destroy_workqueue(tcm_loop_workqueue); |
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 6ae49f272ba6..1e72ff77cac9 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -43,6 +43,7 @@ struct tcm_loop_nacl { | |||
43 | struct tcm_loop_tpg { | 43 | struct tcm_loop_tpg { |
44 | unsigned short tl_tpgt; | 44 | unsigned short tl_tpgt; |
45 | unsigned short tl_transport_status; | 45 | unsigned short tl_transport_status; |
46 | enum target_prot_type tl_fabric_prot_type; | ||
46 | atomic_t tl_tpg_port_count; | 47 | atomic_t tl_tpg_port_count; |
47 | struct se_portal_group tl_se_tpg; | 48 | struct se_portal_group tl_se_tpg; |
48 | struct tcm_loop_hba *tl_hba; | 49 | struct tcm_loop_hba *tl_hba; |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 9512af6a8114..18b0f9703ff2 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -42,8 +42,7 @@ | |||
42 | 42 | ||
43 | #include "sbp_target.h" | 43 | #include "sbp_target.h" |
44 | 44 | ||
45 | /* Local pointer to allocated TCM configfs fabric module */ | 45 | static const struct target_core_fabric_ops sbp_ops; |
46 | static struct target_fabric_configfs *sbp_fabric_configfs; | ||
47 | 46 | ||
48 | /* FireWire address region for management and command block address handlers */ | 47 | /* FireWire address region for management and command block address handlers */ |
49 | static const struct fw_address_region sbp_register_region = { | 48 | static const struct fw_address_region sbp_register_region = { |
@@ -2215,8 +2214,7 @@ static struct se_portal_group *sbp_make_tpg( | |||
2215 | goto out_free_tpg; | 2214 | goto out_free_tpg; |
2216 | } | 2215 | } |
2217 | 2216 | ||
2218 | ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, | 2217 | ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg, |
2219 | &tpg->se_tpg, (void *)tpg, | ||
2220 | TRANSPORT_TPG_TYPE_NORMAL); | 2218 | TRANSPORT_TPG_TYPE_NORMAL); |
2221 | if (ret < 0) | 2219 | if (ret < 0) |
2222 | goto out_unreg_mgt_agt; | 2220 | goto out_unreg_mgt_agt; |
@@ -2503,7 +2501,9 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { | |||
2503 | NULL, | 2501 | NULL, |
2504 | }; | 2502 | }; |
2505 | 2503 | ||
2506 | static struct target_core_fabric_ops sbp_ops = { | 2504 | static const struct target_core_fabric_ops sbp_ops = { |
2505 | .module = THIS_MODULE, | ||
2506 | .name = "sbp", | ||
2507 | .get_fabric_name = sbp_get_fabric_name, | 2507 | .get_fabric_name = sbp_get_fabric_name, |
2508 | .get_fabric_proto_ident = sbp_get_fabric_proto_ident, | 2508 | .get_fabric_proto_ident = sbp_get_fabric_proto_ident, |
2509 | .tpg_get_wwn = sbp_get_fabric_wwn, | 2509 | .tpg_get_wwn = sbp_get_fabric_wwn, |
@@ -2544,68 +2544,20 @@ static struct target_core_fabric_ops sbp_ops = { | |||
2544 | .fabric_drop_np = NULL, | 2544 | .fabric_drop_np = NULL, |
2545 | .fabric_make_nodeacl = sbp_make_nodeacl, | 2545 | .fabric_make_nodeacl = sbp_make_nodeacl, |
2546 | .fabric_drop_nodeacl = sbp_drop_nodeacl, | 2546 | .fabric_drop_nodeacl = sbp_drop_nodeacl, |
2547 | }; | ||
2548 | |||
2549 | static int sbp_register_configfs(void) | ||
2550 | { | ||
2551 | struct target_fabric_configfs *fabric; | ||
2552 | int ret; | ||
2553 | |||
2554 | fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); | ||
2555 | if (IS_ERR(fabric)) { | ||
2556 | pr_err("target_fabric_configfs_init() failed\n"); | ||
2557 | return PTR_ERR(fabric); | ||
2558 | } | ||
2559 | |||
2560 | fabric->tf_ops = sbp_ops; | ||
2561 | |||
2562 | /* | ||
2563 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
2564 | */ | ||
2565 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; | ||
2566 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; | ||
2567 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; | ||
2568 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
2569 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
2570 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | ||
2571 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
2572 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
2573 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
2574 | |||
2575 | ret = target_fabric_configfs_register(fabric); | ||
2576 | if (ret < 0) { | ||
2577 | pr_err("target_fabric_configfs_register() failed for SBP\n"); | ||
2578 | return ret; | ||
2579 | } | ||
2580 | 2547 | ||
2581 | sbp_fabric_configfs = fabric; | 2548 | .tfc_wwn_attrs = sbp_wwn_attrs, |
2582 | 2549 | .tfc_tpg_base_attrs = sbp_tpg_base_attrs, | |
2583 | return 0; | 2550 | .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, |
2584 | }; | ||
2585 | |||
2586 | static void sbp_deregister_configfs(void) | ||
2587 | { | ||
2588 | if (!sbp_fabric_configfs) | ||
2589 | return; | ||
2590 | |||
2591 | target_fabric_configfs_deregister(sbp_fabric_configfs); | ||
2592 | sbp_fabric_configfs = NULL; | ||
2593 | }; | 2551 | }; |
2594 | 2552 | ||
2595 | static int __init sbp_init(void) | 2553 | static int __init sbp_init(void) |
2596 | { | 2554 | { |
2597 | int ret; | 2555 | return target_register_template(&sbp_ops); |
2598 | |||
2599 | ret = sbp_register_configfs(); | ||
2600 | if (ret < 0) | ||
2601 | return ret; | ||
2602 | |||
2603 | return 0; | ||
2604 | }; | 2556 | }; |
2605 | 2557 | ||
2606 | static void __exit sbp_exit(void) | 2558 | static void __exit sbp_exit(void) |
2607 | { | 2559 | { |
2608 | sbp_deregister_configfs(); | 2560 | target_unregister_template(&sbp_ops); |
2609 | }; | 2561 | }; |
2610 | 2562 | ||
2611 | MODULE_DESCRIPTION("FireWire SBP fabric driver"); | 2563 | MODULE_DESCRIPTION("FireWire SBP fabric driver"); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 75d89adfccc0..ddaf76a4ac2a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -142,8 +142,8 @@ static struct config_group *target_core_register_fabric( | |||
142 | 142 | ||
143 | tf = target_core_get_fabric(name); | 143 | tf = target_core_get_fabric(name); |
144 | if (!tf) { | 144 | if (!tf) { |
145 | pr_err("target_core_register_fabric() trying autoload for %s\n", | 145 | pr_debug("target_core_register_fabric() trying autoload for %s\n", |
146 | name); | 146 | name); |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Below are some hardcoded request_module() calls to automatically | 149 | * Below are some hardcoded request_module() calls to automatically |
@@ -165,8 +165,8 @@ static struct config_group *target_core_register_fabric( | |||
165 | */ | 165 | */ |
166 | ret = request_module("iscsi_target_mod"); | 166 | ret = request_module("iscsi_target_mod"); |
167 | if (ret < 0) { | 167 | if (ret < 0) { |
168 | pr_err("request_module() failed for" | 168 | pr_debug("request_module() failed for" |
169 | " iscsi_target_mod.ko: %d\n", ret); | 169 | " iscsi_target_mod.ko: %d\n", ret); |
170 | return ERR_PTR(-EINVAL); | 170 | return ERR_PTR(-EINVAL); |
171 | } | 171 | } |
172 | } else if (!strncmp(name, "loopback", 8)) { | 172 | } else if (!strncmp(name, "loopback", 8)) { |
@@ -178,8 +178,8 @@ static struct config_group *target_core_register_fabric( | |||
178 | */ | 178 | */ |
179 | ret = request_module("tcm_loop"); | 179 | ret = request_module("tcm_loop"); |
180 | if (ret < 0) { | 180 | if (ret < 0) { |
181 | pr_err("request_module() failed for" | 181 | pr_debug("request_module() failed for" |
182 | " tcm_loop.ko: %d\n", ret); | 182 | " tcm_loop.ko: %d\n", ret); |
183 | return ERR_PTR(-EINVAL); | 183 | return ERR_PTR(-EINVAL); |
184 | } | 184 | } |
185 | } | 185 | } |
@@ -188,8 +188,8 @@ static struct config_group *target_core_register_fabric( | |||
188 | } | 188 | } |
189 | 189 | ||
190 | if (!tf) { | 190 | if (!tf) { |
191 | pr_err("target_core_get_fabric() failed for %s\n", | 191 | pr_debug("target_core_get_fabric() failed for %s\n", |
192 | name); | 192 | name); |
193 | return ERR_PTR(-EINVAL); | 193 | return ERR_PTR(-EINVAL); |
194 | } | 194 | } |
195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" | 195 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" |
@@ -300,81 +300,17 @@ struct configfs_subsystem *target_core_subsystem[] = { | |||
300 | // Start functions called by external Target Fabrics Modules | 300 | // Start functions called by external Target Fabrics Modules |
301 | //############################################################################*/ | 301 | //############################################################################*/ |
302 | 302 | ||
303 | /* | 303 | static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) |
304 | * First function called by fabric modules to: | ||
305 | * | ||
306 | * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. | ||
307 | * 2) Add struct target_fabric_configfs to g_tf_list | ||
308 | * 3) Return struct target_fabric_configfs to fabric module to be passed | ||
309 | * into target_fabric_configfs_register(). | ||
310 | */ | ||
311 | struct target_fabric_configfs *target_fabric_configfs_init( | ||
312 | struct module *fabric_mod, | ||
313 | const char *name) | ||
314 | { | 304 | { |
315 | struct target_fabric_configfs *tf; | 305 | if (!tfo->name) { |
316 | 306 | pr_err("Missing tfo->name\n"); | |
317 | if (!(name)) { | 307 | return -EINVAL; |
318 | pr_err("Unable to locate passed fabric name\n"); | ||
319 | return ERR_PTR(-EINVAL); | ||
320 | } | 308 | } |
321 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { | 309 | if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { |
322 | pr_err("Passed name: %s exceeds TARGET_FABRIC" | 310 | pr_err("Passed name: %s exceeds TARGET_FABRIC" |
323 | "_NAME_SIZE\n", name); | 311 | "_NAME_SIZE\n", tfo->name); |
324 | return ERR_PTR(-EINVAL); | 312 | return -EINVAL; |
325 | } | 313 | } |
326 | |||
327 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | ||
328 | if (!tf) | ||
329 | return ERR_PTR(-ENOMEM); | ||
330 | |||
331 | INIT_LIST_HEAD(&tf->tf_list); | ||
332 | atomic_set(&tf->tf_access_cnt, 0); | ||
333 | /* | ||
334 | * Setup the default generic struct config_item_type's (cits) in | ||
335 | * struct target_fabric_configfs->tf_cit_tmpl | ||
336 | */ | ||
337 | tf->tf_module = fabric_mod; | ||
338 | target_fabric_setup_cits(tf); | ||
339 | |||
340 | tf->tf_subsys = target_core_subsystem[0]; | ||
341 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); | ||
342 | |||
343 | mutex_lock(&g_tf_lock); | ||
344 | list_add_tail(&tf->tf_list, &g_tf_list); | ||
345 | mutex_unlock(&g_tf_lock); | ||
346 | |||
347 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" | ||
348 | ">>>>>>>>>>>>>>\n"); | ||
349 | pr_debug("Initialized struct target_fabric_configfs: %p for" | ||
350 | " %s\n", tf, tf->tf_name); | ||
351 | return tf; | ||
352 | } | ||
353 | EXPORT_SYMBOL(target_fabric_configfs_init); | ||
354 | |||
355 | /* | ||
356 | * Called by fabric plugins after FAILED target_fabric_configfs_register() call. | ||
357 | */ | ||
358 | void target_fabric_configfs_free( | ||
359 | struct target_fabric_configfs *tf) | ||
360 | { | ||
361 | mutex_lock(&g_tf_lock); | ||
362 | list_del(&tf->tf_list); | ||
363 | mutex_unlock(&g_tf_lock); | ||
364 | |||
365 | kfree(tf); | ||
366 | } | ||
367 | EXPORT_SYMBOL(target_fabric_configfs_free); | ||
368 | |||
369 | /* | ||
370 | * Perform a sanity check of the passed tf->tf_ops before completing | ||
371 | * TCM fabric module registration. | ||
372 | */ | ||
373 | static int target_fabric_tf_ops_check( | ||
374 | struct target_fabric_configfs *tf) | ||
375 | { | ||
376 | struct target_core_fabric_ops *tfo = &tf->tf_ops; | ||
377 | |||
378 | if (!tfo->get_fabric_name) { | 314 | if (!tfo->get_fabric_name) { |
379 | pr_err("Missing tfo->get_fabric_name()\n"); | 315 | pr_err("Missing tfo->get_fabric_name()\n"); |
380 | return -EINVAL; | 316 | return -EINVAL; |
@@ -508,77 +444,59 @@ static int target_fabric_tf_ops_check( | |||
508 | return 0; | 444 | return 0; |
509 | } | 445 | } |
510 | 446 | ||
511 | /* | 447 | int target_register_template(const struct target_core_fabric_ops *fo) |
512 | * Called 2nd from fabric module with returned parameter of | ||
513 | * struct target_fabric_configfs * from target_fabric_configfs_init(). | ||
514 | * | ||
515 | * Upon a successful registration, the new fabric's struct config_item is | ||
516 | * return. Also, a pointer to this struct is set in the passed | ||
517 | * struct target_fabric_configfs. | ||
518 | */ | ||
519 | int target_fabric_configfs_register( | ||
520 | struct target_fabric_configfs *tf) | ||
521 | { | 448 | { |
449 | struct target_fabric_configfs *tf; | ||
522 | int ret; | 450 | int ret; |
523 | 451 | ||
452 | ret = target_fabric_tf_ops_check(fo); | ||
453 | if (ret) | ||
454 | return ret; | ||
455 | |||
456 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | ||
524 | if (!tf) { | 457 | if (!tf) { |
525 | pr_err("Unable to locate target_fabric_configfs" | 458 | pr_err("%s: could not allocate memory!\n", __func__); |
526 | " pointer\n"); | 459 | return -ENOMEM; |
527 | return -EINVAL; | ||
528 | } | ||
529 | if (!tf->tf_subsys) { | ||
530 | pr_err("Unable to target struct config_subsystem" | ||
531 | " pointer\n"); | ||
532 | return -EINVAL; | ||
533 | } | 460 | } |
534 | ret = target_fabric_tf_ops_check(tf); | ||
535 | if (ret < 0) | ||
536 | return ret; | ||
537 | 461 | ||
538 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" | 462 | INIT_LIST_HEAD(&tf->tf_list); |
539 | ">>>>>>>>>>\n"); | 463 | atomic_set(&tf->tf_access_cnt, 0); |
464 | |||
465 | /* | ||
466 | * Setup the default generic struct config_item_type's (cits) in | ||
467 | * struct target_fabric_configfs->tf_cit_tmpl | ||
468 | */ | ||
469 | tf->tf_module = fo->module; | ||
470 | tf->tf_subsys = target_core_subsystem[0]; | ||
471 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); | ||
472 | |||
473 | tf->tf_ops = *fo; | ||
474 | target_fabric_setup_cits(tf); | ||
475 | |||
476 | mutex_lock(&g_tf_lock); | ||
477 | list_add_tail(&tf->tf_list, &g_tf_list); | ||
478 | mutex_unlock(&g_tf_lock); | ||
479 | |||
540 | return 0; | 480 | return 0; |
541 | } | 481 | } |
542 | EXPORT_SYMBOL(target_fabric_configfs_register); | 482 | EXPORT_SYMBOL(target_register_template); |
543 | 483 | ||
544 | void target_fabric_configfs_deregister( | 484 | void target_unregister_template(const struct target_core_fabric_ops *fo) |
545 | struct target_fabric_configfs *tf) | ||
546 | { | 485 | { |
547 | struct configfs_subsystem *su; | 486 | struct target_fabric_configfs *t; |
548 | 487 | ||
549 | if (!tf) { | ||
550 | pr_err("Unable to locate passed target_fabric_" | ||
551 | "configfs\n"); | ||
552 | return; | ||
553 | } | ||
554 | su = tf->tf_subsys; | ||
555 | if (!su) { | ||
556 | pr_err("Unable to locate passed tf->tf_subsys" | ||
557 | " pointer\n"); | ||
558 | return; | ||
559 | } | ||
560 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" | ||
561 | ">>>>>>>>>>>>\n"); | ||
562 | mutex_lock(&g_tf_lock); | 488 | mutex_lock(&g_tf_lock); |
563 | if (atomic_read(&tf->tf_access_cnt)) { | 489 | list_for_each_entry(t, &g_tf_list, tf_list) { |
564 | mutex_unlock(&g_tf_lock); | 490 | if (!strcmp(t->tf_name, fo->name)) { |
565 | pr_err("Non zero tf->tf_access_cnt for fabric %s\n", | 491 | BUG_ON(atomic_read(&t->tf_access_cnt)); |
566 | tf->tf_name); | 492 | list_del(&t->tf_list); |
567 | BUG(); | 493 | kfree(t); |
494 | break; | ||
495 | } | ||
568 | } | 496 | } |
569 | list_del(&tf->tf_list); | ||
570 | mutex_unlock(&g_tf_lock); | 497 | mutex_unlock(&g_tf_lock); |
571 | |||
572 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" | ||
573 | " %s\n", tf->tf_name); | ||
574 | tf->tf_module = NULL; | ||
575 | tf->tf_subsys = NULL; | ||
576 | kfree(tf); | ||
577 | |||
578 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" | ||
579 | ">>>>>\n"); | ||
580 | } | 498 | } |
581 | EXPORT_SYMBOL(target_fabric_configfs_deregister); | 499 | EXPORT_SYMBOL(target_unregister_template); |
582 | 500 | ||
583 | /*############################################################################## | 501 | /*############################################################################## |
584 | // Stop functions called by external Target Fabrics Modules | 502 | // Stop functions called by external Target Fabrics Modules |
@@ -945,7 +863,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
945 | struct se_lun *lun; | 863 | struct se_lun *lun; |
946 | struct se_portal_group *se_tpg; | 864 | struct se_portal_group *se_tpg; |
947 | struct t10_pr_registration *pr_reg; | 865 | struct t10_pr_registration *pr_reg; |
948 | struct target_core_fabric_ops *tfo; | 866 | const struct target_core_fabric_ops *tfo; |
949 | ssize_t len = 0; | 867 | ssize_t len = 0; |
950 | 868 | ||
951 | spin_lock(&dev->dev_reservation_lock); | 869 | spin_lock(&dev->dev_reservation_lock); |
@@ -979,7 +897,7 @@ SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); | |||
979 | static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | 897 | static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( |
980 | struct se_device *dev, char *page) | 898 | struct se_device *dev, char *page) |
981 | { | 899 | { |
982 | struct target_core_fabric_ops *tfo; | 900 | const struct target_core_fabric_ops *tfo; |
983 | struct t10_pr_registration *pr_reg; | 901 | struct t10_pr_registration *pr_reg; |
984 | unsigned char buf[384]; | 902 | unsigned char buf[384]; |
985 | char i_buf[PR_REG_ISID_ID_LEN]; | 903 | char i_buf[PR_REG_ISID_ID_LEN]; |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0c3f90130b7d..1f7886bb16bf 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -56,6 +56,20 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) | |||
56 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | 56 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ | ||
60 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | ||
61 | { \ | ||
62 | struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ | ||
63 | struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ | ||
64 | struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \ | ||
65 | \ | ||
66 | cit->ct_item_ops = _item_ops; \ | ||
67 | cit->ct_group_ops = _group_ops; \ | ||
68 | cit->ct_attrs = attrs; \ | ||
69 | cit->ct_owner = tf->tf_module; \ | ||
70 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | ||
71 | } | ||
72 | |||
59 | /* Start of tfc_tpg_mappedlun_cit */ | 73 | /* Start of tfc_tpg_mappedlun_cit */ |
60 | 74 | ||
61 | static int target_fabric_mappedlun_link( | 75 | static int target_fabric_mappedlun_link( |
@@ -278,7 +292,7 @@ static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { | |||
278 | .store_attribute = target_fabric_nacl_attrib_attr_store, | 292 | .store_attribute = target_fabric_nacl_attrib_attr_store, |
279 | }; | 293 | }; |
280 | 294 | ||
281 | TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); | 295 | TF_CIT_SETUP_DRV(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL); |
282 | 296 | ||
283 | /* End of tfc_tpg_nacl_attrib_cit */ | 297 | /* End of tfc_tpg_nacl_attrib_cit */ |
284 | 298 | ||
@@ -291,7 +305,7 @@ static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { | |||
291 | .store_attribute = target_fabric_nacl_auth_attr_store, | 305 | .store_attribute = target_fabric_nacl_auth_attr_store, |
292 | }; | 306 | }; |
293 | 307 | ||
294 | TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); | 308 | TF_CIT_SETUP_DRV(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL); |
295 | 309 | ||
296 | /* End of tfc_tpg_nacl_auth_cit */ | 310 | /* End of tfc_tpg_nacl_auth_cit */ |
297 | 311 | ||
@@ -304,7 +318,7 @@ static struct configfs_item_operations target_fabric_nacl_param_item_ops = { | |||
304 | .store_attribute = target_fabric_nacl_param_attr_store, | 318 | .store_attribute = target_fabric_nacl_param_attr_store, |
305 | }; | 319 | }; |
306 | 320 | ||
307 | TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); | 321 | TF_CIT_SETUP_DRV(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL); |
308 | 322 | ||
309 | /* End of tfc_tpg_nacl_param_cit */ | 323 | /* End of tfc_tpg_nacl_param_cit */ |
310 | 324 | ||
@@ -461,8 +475,8 @@ static struct configfs_group_operations target_fabric_nacl_base_group_ops = { | |||
461 | .drop_item = target_fabric_drop_mappedlun, | 475 | .drop_item = target_fabric_drop_mappedlun, |
462 | }; | 476 | }; |
463 | 477 | ||
464 | TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, | 478 | TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops, |
465 | &target_fabric_nacl_base_group_ops, NULL); | 479 | &target_fabric_nacl_base_group_ops); |
466 | 480 | ||
467 | /* End of tfc_tpg_nacl_base_cit */ | 481 | /* End of tfc_tpg_nacl_base_cit */ |
468 | 482 | ||
@@ -570,7 +584,7 @@ static struct configfs_item_operations target_fabric_np_base_item_ops = { | |||
570 | .store_attribute = target_fabric_np_base_attr_store, | 584 | .store_attribute = target_fabric_np_base_attr_store, |
571 | }; | 585 | }; |
572 | 586 | ||
573 | TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); | 587 | TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL); |
574 | 588 | ||
575 | /* End of tfc_tpg_np_base_cit */ | 589 | /* End of tfc_tpg_np_base_cit */ |
576 | 590 | ||
@@ -966,7 +980,7 @@ static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { | |||
966 | .store_attribute = target_fabric_tpg_attrib_attr_store, | 980 | .store_attribute = target_fabric_tpg_attrib_attr_store, |
967 | }; | 981 | }; |
968 | 982 | ||
969 | TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); | 983 | TF_CIT_SETUP_DRV(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL); |
970 | 984 | ||
971 | /* End of tfc_tpg_attrib_cit */ | 985 | /* End of tfc_tpg_attrib_cit */ |
972 | 986 | ||
@@ -979,7 +993,7 @@ static struct configfs_item_operations target_fabric_tpg_auth_item_ops = { | |||
979 | .store_attribute = target_fabric_tpg_auth_attr_store, | 993 | .store_attribute = target_fabric_tpg_auth_attr_store, |
980 | }; | 994 | }; |
981 | 995 | ||
982 | TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL); | 996 | TF_CIT_SETUP_DRV(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL); |
983 | 997 | ||
984 | /* End of tfc_tpg_attrib_cit */ | 998 | /* End of tfc_tpg_attrib_cit */ |
985 | 999 | ||
@@ -992,7 +1006,7 @@ static struct configfs_item_operations target_fabric_tpg_param_item_ops = { | |||
992 | .store_attribute = target_fabric_tpg_param_attr_store, | 1006 | .store_attribute = target_fabric_tpg_param_attr_store, |
993 | }; | 1007 | }; |
994 | 1008 | ||
995 | TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | 1009 | TF_CIT_SETUP_DRV(tpg_param, &target_fabric_tpg_param_item_ops, NULL); |
996 | 1010 | ||
997 | /* End of tfc_tpg_param_cit */ | 1011 | /* End of tfc_tpg_param_cit */ |
998 | 1012 | ||
@@ -1018,7 +1032,7 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | |||
1018 | .store_attribute = target_fabric_tpg_attr_store, | 1032 | .store_attribute = target_fabric_tpg_attr_store, |
1019 | }; | 1033 | }; |
1020 | 1034 | ||
1021 | TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); | 1035 | TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); |
1022 | 1036 | ||
1023 | /* End of tfc_tpg_base_cit */ | 1037 | /* End of tfc_tpg_base_cit */ |
1024 | 1038 | ||
@@ -1192,7 +1206,7 @@ static struct configfs_item_operations target_fabric_wwn_item_ops = { | |||
1192 | .store_attribute = target_fabric_wwn_attr_store, | 1206 | .store_attribute = target_fabric_wwn_attr_store, |
1193 | }; | 1207 | }; |
1194 | 1208 | ||
1195 | TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); | 1209 | TF_CIT_SETUP_DRV(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops); |
1196 | 1210 | ||
1197 | /* End of tfc_wwn_cit */ | 1211 | /* End of tfc_wwn_cit */ |
1198 | 1212 | ||
@@ -1206,7 +1220,7 @@ static struct configfs_item_operations target_fabric_discovery_item_ops = { | |||
1206 | .store_attribute = target_fabric_discovery_attr_store, | 1220 | .store_attribute = target_fabric_discovery_attr_store, |
1207 | }; | 1221 | }; |
1208 | 1222 | ||
1209 | TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); | 1223 | TF_CIT_SETUP_DRV(discovery, &target_fabric_discovery_item_ops, NULL); |
1210 | 1224 | ||
1211 | /* End of tfc_discovery_cit */ | 1225 | /* End of tfc_discovery_cit */ |
1212 | 1226 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 44620fb6bd45..f7e6e51aed36 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, | |||
264 | struct se_device *se_dev = cmd->se_dev; | 264 | struct se_device *se_dev = cmd->se_dev; |
265 | struct fd_dev *dev = FD_DEV(se_dev); | 265 | struct fd_dev *dev = FD_DEV(se_dev); |
266 | struct file *prot_fd = dev->fd_prot_file; | 266 | struct file *prot_fd = dev->fd_prot_file; |
267 | struct scatterlist *sg; | ||
268 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); | 267 | loff_t pos = (cmd->t_task_lba * se_dev->prot_length); |
269 | unsigned char *buf; | 268 | unsigned char *buf; |
270 | u32 prot_size, len, size; | 269 | u32 prot_size; |
271 | int rc, ret = 1, i; | 270 | int rc, ret = 1; |
272 | 271 | ||
273 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * | 272 | prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * |
274 | se_dev->prot_length; | 273 | se_dev->prot_length; |
275 | 274 | ||
276 | if (!is_write) { | 275 | if (!is_write) { |
277 | fd_prot->prot_buf = vzalloc(prot_size); | 276 | fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); |
278 | if (!fd_prot->prot_buf) { | 277 | if (!fd_prot->prot_buf) { |
279 | pr_err("Unable to allocate fd_prot->prot_buf\n"); | 278 | pr_err("Unable to allocate fd_prot->prot_buf\n"); |
280 | return -ENOMEM; | 279 | return -ENOMEM; |
281 | } | 280 | } |
282 | buf = fd_prot->prot_buf; | 281 | buf = fd_prot->prot_buf; |
283 | 282 | ||
284 | fd_prot->prot_sg_nents = cmd->t_prot_nents; | 283 | fd_prot->prot_sg_nents = 1; |
285 | fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * | 284 | fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), |
286 | fd_prot->prot_sg_nents, GFP_KERNEL); | 285 | GFP_KERNEL); |
287 | if (!fd_prot->prot_sg) { | 286 | if (!fd_prot->prot_sg) { |
288 | pr_err("Unable to allocate fd_prot->prot_sg\n"); | 287 | pr_err("Unable to allocate fd_prot->prot_sg\n"); |
289 | vfree(fd_prot->prot_buf); | 288 | kfree(fd_prot->prot_buf); |
290 | return -ENOMEM; | 289 | return -ENOMEM; |
291 | } | 290 | } |
292 | size = prot_size; | 291 | sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); |
293 | 292 | sg_set_buf(fd_prot->prot_sg, buf, prot_size); | |
294 | for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { | ||
295 | |||
296 | len = min_t(u32, PAGE_SIZE, size); | ||
297 | sg_set_buf(sg, buf, len); | ||
298 | size -= len; | ||
299 | buf += len; | ||
300 | } | ||
301 | } | 293 | } |
302 | 294 | ||
303 | if (is_write) { | 295 | if (is_write) { |
@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, | |||
318 | 310 | ||
319 | if (is_write || ret < 0) { | 311 | if (is_write || ret < 0) { |
320 | kfree(fd_prot->prot_sg); | 312 | kfree(fd_prot->prot_sg); |
321 | vfree(fd_prot->prot_buf); | 313 | kfree(fd_prot->prot_buf); |
322 | } | 314 | } |
323 | 315 | ||
324 | return ret; | 316 | return ret; |
@@ -331,36 +323,33 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, | |||
331 | struct fd_dev *dev = FD_DEV(se_dev); | 323 | struct fd_dev *dev = FD_DEV(se_dev); |
332 | struct file *fd = dev->fd_file; | 324 | struct file *fd = dev->fd_file; |
333 | struct scatterlist *sg; | 325 | struct scatterlist *sg; |
334 | struct iovec *iov; | 326 | struct iov_iter iter; |
335 | mm_segment_t old_fs; | 327 | struct bio_vec *bvec; |
328 | ssize_t len = 0; | ||
336 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); | 329 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); |
337 | int ret = 0, i; | 330 | int ret = 0, i; |
338 | 331 | ||
339 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); | 332 | bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); |
340 | if (!iov) { | 333 | if (!bvec) { |
341 | pr_err("Unable to allocate fd_do_readv iov[]\n"); | 334 | pr_err("Unable to allocate fd_do_readv iov[]\n"); |
342 | return -ENOMEM; | 335 | return -ENOMEM; |
343 | } | 336 | } |
344 | 337 | ||
345 | for_each_sg(sgl, sg, sgl_nents, i) { | 338 | for_each_sg(sgl, sg, sgl_nents, i) { |
346 | iov[i].iov_len = sg->length; | 339 | bvec[i].bv_page = sg_page(sg); |
347 | iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; | 340 | bvec[i].bv_len = sg->length; |
348 | } | 341 | bvec[i].bv_offset = sg->offset; |
349 | 342 | ||
350 | old_fs = get_fs(); | 343 | len += sg->length; |
351 | set_fs(get_ds()); | 344 | } |
352 | 345 | ||
346 | iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); | ||
353 | if (is_write) | 347 | if (is_write) |
354 | ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); | 348 | ret = vfs_iter_write(fd, &iter, &pos); |
355 | else | 349 | else |
356 | ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); | 350 | ret = vfs_iter_read(fd, &iter, &pos); |
357 | |||
358 | set_fs(old_fs); | ||
359 | |||
360 | for_each_sg(sgl, sg, sgl_nents, i) | ||
361 | kunmap(sg_page(sg)); | ||
362 | 351 | ||
363 | kfree(iov); | 352 | kfree(bvec); |
364 | 353 | ||
365 | if (is_write) { | 354 | if (is_write) { |
366 | if (ret < 0 || ret != cmd->data_length) { | 355 | if (ret < 0 || ret != cmd->data_length) { |
@@ -436,59 +425,17 @@ fd_execute_sync_cache(struct se_cmd *cmd) | |||
436 | return 0; | 425 | return 0; |
437 | } | 426 | } |
438 | 427 | ||
439 | static unsigned char * | ||
440 | fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, | ||
441 | unsigned int len) | ||
442 | { | ||
443 | struct se_device *se_dev = cmd->se_dev; | ||
444 | unsigned int block_size = se_dev->dev_attrib.block_size; | ||
445 | unsigned int i = 0, end; | ||
446 | unsigned char *buf, *p, *kmap_buf; | ||
447 | |||
448 | buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); | ||
449 | if (!buf) { | ||
450 | pr_err("Unable to allocate fd_execute_write_same buf\n"); | ||
451 | return NULL; | ||
452 | } | ||
453 | |||
454 | kmap_buf = kmap(sg_page(sg)) + sg->offset; | ||
455 | if (!kmap_buf) { | ||
456 | pr_err("kmap() failed in fd_setup_write_same\n"); | ||
457 | kfree(buf); | ||
458 | return NULL; | ||
459 | } | ||
460 | /* | ||
461 | * Fill local *buf to contain multiple WRITE_SAME blocks up to | ||
462 | * min(len, PAGE_SIZE) | ||
463 | */ | ||
464 | p = buf; | ||
465 | end = min_t(unsigned int, len, PAGE_SIZE); | ||
466 | |||
467 | while (i < end) { | ||
468 | memcpy(p, kmap_buf, block_size); | ||
469 | |||
470 | i += block_size; | ||
471 | p += block_size; | ||
472 | } | ||
473 | kunmap(sg_page(sg)); | ||
474 | |||
475 | return buf; | ||
476 | } | ||
477 | |||
478 | static sense_reason_t | 428 | static sense_reason_t |
479 | fd_execute_write_same(struct se_cmd *cmd) | 429 | fd_execute_write_same(struct se_cmd *cmd) |
480 | { | 430 | { |
481 | struct se_device *se_dev = cmd->se_dev; | 431 | struct se_device *se_dev = cmd->se_dev; |
482 | struct fd_dev *fd_dev = FD_DEV(se_dev); | 432 | struct fd_dev *fd_dev = FD_DEV(se_dev); |
483 | struct file *f = fd_dev->fd_file; | ||
484 | struct scatterlist *sg; | ||
485 | struct iovec *iov; | ||
486 | mm_segment_t old_fs; | ||
487 | sector_t nolb = sbc_get_write_same_sectors(cmd); | ||
488 | loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; | 433 | loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
489 | unsigned int len, len_tmp, iov_num; | 434 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
490 | int i, rc; | 435 | struct iov_iter iter; |
491 | unsigned char *buf; | 436 | struct bio_vec *bvec; |
437 | unsigned int len = 0, i; | ||
438 | ssize_t ret; | ||
492 | 439 | ||
493 | if (!nolb) { | 440 | if (!nolb) { |
494 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 441 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
@@ -499,56 +446,92 @@ fd_execute_write_same(struct se_cmd *cmd) | |||
499 | " backends not supported\n"); | 446 | " backends not supported\n"); |
500 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 447 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
501 | } | 448 | } |
502 | sg = &cmd->t_data_sg[0]; | ||
503 | 449 | ||
504 | if (cmd->t_data_nents > 1 || | 450 | if (cmd->t_data_nents > 1 || |
505 | sg->length != cmd->se_dev->dev_attrib.block_size) { | 451 | cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { |
506 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" | 452 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" |
507 | " block_size: %u\n", cmd->t_data_nents, sg->length, | 453 | " block_size: %u\n", |
454 | cmd->t_data_nents, | ||
455 | cmd->t_data_sg[0].length, | ||
508 | cmd->se_dev->dev_attrib.block_size); | 456 | cmd->se_dev->dev_attrib.block_size); |
509 | return TCM_INVALID_CDB_FIELD; | 457 | return TCM_INVALID_CDB_FIELD; |
510 | } | 458 | } |
511 | 459 | ||
512 | len = len_tmp = nolb * se_dev->dev_attrib.block_size; | 460 | bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); |
513 | iov_num = DIV_ROUND_UP(len, PAGE_SIZE); | 461 | if (!bvec) |
514 | |||
515 | buf = fd_setup_write_same_buf(cmd, sg, len); | ||
516 | if (!buf) | ||
517 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 462 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
518 | 463 | ||
519 | iov = vzalloc(sizeof(struct iovec) * iov_num); | 464 | for (i = 0; i < nolb; i++) { |
520 | if (!iov) { | 465 | bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); |
521 | pr_err("Unable to allocate fd_execute_write_same iovecs\n"); | 466 | bvec[i].bv_len = cmd->t_data_sg[0].length; |
522 | kfree(buf); | 467 | bvec[i].bv_offset = cmd->t_data_sg[0].offset; |
468 | |||
469 | len += se_dev->dev_attrib.block_size; | ||
470 | } | ||
471 | |||
472 | iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); | ||
473 | ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos); | ||
474 | |||
475 | kfree(bvec); | ||
476 | if (ret < 0 || ret != len) { | ||
477 | pr_err("vfs_iter_write() returned %zd for write same\n", ret); | ||
523 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 478 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
524 | } | 479 | } |
525 | /* | 480 | |
526 | * Map the single fabric received scatterlist block now populated | 481 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
527 | * in *buf into each iovec for I/O submission. | 482 | return 0; |
528 | */ | 483 | } |
529 | for (i = 0; i < iov_num; i++) { | 484 | |
530 | iov[i].iov_base = buf; | 485 | static int |
531 | iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); | 486 | fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, |
532 | len_tmp -= iov[i].iov_len; | 487 | void *buf, size_t bufsize) |
488 | { | ||
489 | struct fd_dev *fd_dev = FD_DEV(se_dev); | ||
490 | struct file *prot_fd = fd_dev->fd_prot_file; | ||
491 | sector_t prot_length, prot; | ||
492 | loff_t pos = lba * se_dev->prot_length; | ||
493 | |||
494 | if (!prot_fd) { | ||
495 | pr_err("Unable to locate fd_dev->fd_prot_file\n"); | ||
496 | return -ENODEV; | ||
533 | } | 497 | } |
534 | 498 | ||
535 | old_fs = get_fs(); | 499 | prot_length = nolb * se_dev->prot_length; |
536 | set_fs(get_ds()); | ||
537 | rc = vfs_writev(f, &iov[0], iov_num, &pos); | ||
538 | set_fs(old_fs); | ||
539 | 500 | ||
540 | vfree(iov); | 501 | for (prot = 0; prot < prot_length;) { |
541 | kfree(buf); | 502 | sector_t len = min_t(sector_t, bufsize, prot_length - prot); |
503 | ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); | ||
542 | 504 | ||
543 | if (rc < 0 || rc != len) { | 505 | if (ret != len) { |
544 | pr_err("vfs_writev() returned %d for write same\n", rc); | 506 | pr_err("vfs_write to prot file failed: %zd\n", ret); |
545 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 507 | return ret < 0 ? ret : -ENODEV; |
508 | } | ||
509 | prot += ret; | ||
546 | } | 510 | } |
547 | 511 | ||
548 | target_complete_cmd(cmd, SAM_STAT_GOOD); | ||
549 | return 0; | 512 | return 0; |
550 | } | 513 | } |
551 | 514 | ||
515 | static int | ||
516 | fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) | ||
517 | { | ||
518 | void *buf; | ||
519 | int rc; | ||
520 | |||
521 | buf = (void *)__get_free_page(GFP_KERNEL); | ||
522 | if (!buf) { | ||
523 | pr_err("Unable to allocate FILEIO prot buf\n"); | ||
524 | return -ENOMEM; | ||
525 | } | ||
526 | memset(buf, 0xff, PAGE_SIZE); | ||
527 | |||
528 | rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); | ||
529 | |||
530 | free_page((unsigned long)buf); | ||
531 | |||
532 | return rc; | ||
533 | } | ||
534 | |||
552 | static sense_reason_t | 535 | static sense_reason_t |
553 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | 536 | fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) |
554 | { | 537 | { |
@@ -556,6 +539,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) | |||
556 | struct inode *inode = file->f_mapping->host; | 539 | struct inode *inode = file->f_mapping->host; |
557 | int ret; | 540 | int ret; |
558 | 541 | ||
542 | if (cmd->se_dev->dev_attrib.pi_prot_type) { | ||
543 | ret = fd_do_prot_unmap(cmd, lba, nolb); | ||
544 | if (ret) | ||
545 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
546 | } | ||
547 | |||
559 | if (S_ISBLK(inode->i_mode)) { | 548 | if (S_ISBLK(inode->i_mode)) { |
560 | /* The backend is block device, use discard */ | 549 | /* The backend is block device, use discard */ |
561 | struct block_device *bdev = inode->i_bdev; | 550 | struct block_device *bdev = inode->i_bdev; |
@@ -595,7 +584,7 @@ fd_execute_write_same_unmap(struct se_cmd *cmd) | |||
595 | struct file *file = fd_dev->fd_file; | 584 | struct file *file = fd_dev->fd_file; |
596 | sector_t lba = cmd->t_task_lba; | 585 | sector_t lba = cmd->t_task_lba; |
597 | sector_t nolb = sbc_get_write_same_sectors(cmd); | 586 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
598 | int ret; | 587 | sense_reason_t ret; |
599 | 588 | ||
600 | if (!nolb) { | 589 | if (!nolb) { |
601 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 590 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
@@ -643,7 +632,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
643 | if (data_direction == DMA_FROM_DEVICE) { | 632 | if (data_direction == DMA_FROM_DEVICE) { |
644 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | 633 | memset(&fd_prot, 0, sizeof(struct fd_prot)); |
645 | 634 | ||
646 | if (cmd->prot_type) { | 635 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
647 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 636 | ret = fd_do_prot_rw(cmd, &fd_prot, false); |
648 | if (ret < 0) | 637 | if (ret < 0) |
649 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 638 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -651,23 +640,23 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
651 | 640 | ||
652 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); | 641 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); |
653 | 642 | ||
654 | if (ret > 0 && cmd->prot_type) { | 643 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
655 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 644 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; |
656 | 645 | ||
657 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, | 646 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, |
658 | 0, fd_prot.prot_sg, 0); | 647 | 0, fd_prot.prot_sg, 0); |
659 | if (rc) { | 648 | if (rc) { |
660 | kfree(fd_prot.prot_sg); | 649 | kfree(fd_prot.prot_sg); |
661 | vfree(fd_prot.prot_buf); | 650 | kfree(fd_prot.prot_buf); |
662 | return rc; | 651 | return rc; |
663 | } | 652 | } |
664 | kfree(fd_prot.prot_sg); | 653 | kfree(fd_prot.prot_sg); |
665 | vfree(fd_prot.prot_buf); | 654 | kfree(fd_prot.prot_buf); |
666 | } | 655 | } |
667 | } else { | 656 | } else { |
668 | memset(&fd_prot, 0, sizeof(struct fd_prot)); | 657 | memset(&fd_prot, 0, sizeof(struct fd_prot)); |
669 | 658 | ||
670 | if (cmd->prot_type) { | 659 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
671 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; | 660 | u32 sectors = cmd->data_length / dev->dev_attrib.block_size; |
672 | 661 | ||
673 | ret = fd_do_prot_rw(cmd, &fd_prot, false); | 662 | ret = fd_do_prot_rw(cmd, &fd_prot, false); |
@@ -678,7 +667,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
678 | 0, fd_prot.prot_sg, 0); | 667 | 0, fd_prot.prot_sg, 0); |
679 | if (rc) { | 668 | if (rc) { |
680 | kfree(fd_prot.prot_sg); | 669 | kfree(fd_prot.prot_sg); |
681 | vfree(fd_prot.prot_buf); | 670 | kfree(fd_prot.prot_buf); |
682 | return rc; | 671 | return rc; |
683 | } | 672 | } |
684 | } | 673 | } |
@@ -705,7 +694,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
705 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); | 694 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
706 | } | 695 | } |
707 | 696 | ||
708 | if (ret > 0 && cmd->prot_type) { | 697 | if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
709 | ret = fd_do_prot_rw(cmd, &fd_prot, true); | 698 | ret = fd_do_prot_rw(cmd, &fd_prot, true); |
710 | if (ret < 0) | 699 | if (ret < 0) |
711 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 700 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -714,7 +703,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
714 | 703 | ||
715 | if (ret < 0) { | 704 | if (ret < 0) { |
716 | kfree(fd_prot.prot_sg); | 705 | kfree(fd_prot.prot_sg); |
717 | vfree(fd_prot.prot_buf); | 706 | kfree(fd_prot.prot_buf); |
718 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 707 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
719 | } | 708 | } |
720 | 709 | ||
@@ -878,48 +867,28 @@ static int fd_init_prot(struct se_device *dev) | |||
878 | 867 | ||
879 | static int fd_format_prot(struct se_device *dev) | 868 | static int fd_format_prot(struct se_device *dev) |
880 | { | 869 | { |
881 | struct fd_dev *fd_dev = FD_DEV(dev); | ||
882 | struct file *prot_fd = fd_dev->fd_prot_file; | ||
883 | sector_t prot_length, prot; | ||
884 | unsigned char *buf; | 870 | unsigned char *buf; |
885 | loff_t pos = 0; | ||
886 | int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; | 871 | int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; |
887 | int rc, ret = 0, size, len; | 872 | int ret; |
888 | 873 | ||
889 | if (!dev->dev_attrib.pi_prot_type) { | 874 | if (!dev->dev_attrib.pi_prot_type) { |
890 | pr_err("Unable to format_prot while pi_prot_type == 0\n"); | 875 | pr_err("Unable to format_prot while pi_prot_type == 0\n"); |
891 | return -ENODEV; | 876 | return -ENODEV; |
892 | } | 877 | } |
893 | if (!prot_fd) { | ||
894 | pr_err("Unable to locate fd_dev->fd_prot_file\n"); | ||
895 | return -ENODEV; | ||
896 | } | ||
897 | 878 | ||
898 | buf = vzalloc(unit_size); | 879 | buf = vzalloc(unit_size); |
899 | if (!buf) { | 880 | if (!buf) { |
900 | pr_err("Unable to allocate FILEIO prot buf\n"); | 881 | pr_err("Unable to allocate FILEIO prot buf\n"); |
901 | return -ENOMEM; | 882 | return -ENOMEM; |
902 | } | 883 | } |
903 | prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; | ||
904 | size = prot_length; | ||
905 | 884 | ||
906 | pr_debug("Using FILEIO prot_length: %llu\n", | 885 | pr_debug("Using FILEIO prot_length: %llu\n", |
907 | (unsigned long long)prot_length); | 886 | (unsigned long long)(dev->transport->get_blocks(dev) + 1) * |
887 | dev->prot_length); | ||
908 | 888 | ||
909 | memset(buf, 0xff, unit_size); | 889 | memset(buf, 0xff, unit_size); |
910 | for (prot = 0; prot < prot_length; prot += unit_size) { | 890 | ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, |
911 | len = min(unit_size, size); | 891 | buf, unit_size); |
912 | rc = kernel_write(prot_fd, buf, len, pos); | ||
913 | if (rc != len) { | ||
914 | pr_err("vfs_write to prot file failed: %d\n", rc); | ||
915 | ret = -ENODEV; | ||
916 | goto out; | ||
917 | } | ||
918 | pos += len; | ||
919 | size -= len; | ||
920 | } | ||
921 | |||
922 | out: | ||
923 | vfree(buf); | 892 | vfree(buf); |
924 | return ret; | 893 | return ret; |
925 | } | 894 | } |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d4a4b0fb444a..1b7947c2510f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -444,7 +444,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd) | |||
444 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; | 444 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; |
445 | sector_t lba = cmd->t_task_lba; | 445 | sector_t lba = cmd->t_task_lba; |
446 | sector_t nolb = sbc_get_write_same_sectors(cmd); | 446 | sector_t nolb = sbc_get_write_same_sectors(cmd); |
447 | int ret; | 447 | sense_reason_t ret; |
448 | 448 | ||
449 | ret = iblock_do_unmap(cmd, bdev, lba, nolb); | 449 | ret = iblock_do_unmap(cmd, bdev, lba, nolb); |
450 | if (ret) | 450 | if (ret) |
@@ -774,7 +774,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
774 | sg_num--; | 774 | sg_num--; |
775 | } | 775 | } |
776 | 776 | ||
777 | if (cmd->prot_type) { | 777 | if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { |
778 | int rc = iblock_alloc_bip(cmd, bio_start); | 778 | int rc = iblock_alloc_bip(cmd, bio_start); |
779 | if (rc) | 779 | if (rc) |
780 | goto fail_put_bios; | 780 | goto fail_put_bios; |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 60381db90026..874a9bc988d8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -4,7 +4,13 @@ | |||
4 | /* target_core_alua.c */ | 4 | /* target_core_alua.c */ |
5 | extern struct t10_alua_lu_gp *default_lu_gp; | 5 | extern struct t10_alua_lu_gp *default_lu_gp; |
6 | 6 | ||
7 | /* target_core_configfs.c */ | ||
8 | extern struct configfs_subsystem *target_core_subsystem[]; | ||
9 | |||
7 | /* target_core_device.c */ | 10 | /* target_core_device.c */ |
11 | extern struct mutex g_device_mutex; | ||
12 | extern struct list_head g_device_list; | ||
13 | |||
8 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); | 14 | struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); |
9 | int core_free_device_list_for_node(struct se_node_acl *, | 15 | int core_free_device_list_for_node(struct se_node_acl *, |
10 | struct se_portal_group *); | 16 | struct se_portal_group *); |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2de6fb8cee8d..c1aa9655e96e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -78,6 +78,22 @@ enum preempt_type { | |||
78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, | 78 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, |
79 | struct t10_pr_registration *, int, int); | 79 | struct t10_pr_registration *, int, int); |
80 | 80 | ||
81 | static int is_reservation_holder( | ||
82 | struct t10_pr_registration *pr_res_holder, | ||
83 | struct t10_pr_registration *pr_reg) | ||
84 | { | ||
85 | int pr_res_type; | ||
86 | |||
87 | if (pr_res_holder) { | ||
88 | pr_res_type = pr_res_holder->pr_res_type; | ||
89 | |||
90 | return pr_res_holder == pr_reg || | ||
91 | pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || | ||
92 | pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG; | ||
93 | } | ||
94 | return 0; | ||
95 | } | ||
96 | |||
81 | static sense_reason_t | 97 | static sense_reason_t |
82 | target_scsi2_reservation_check(struct se_cmd *cmd) | 98 | target_scsi2_reservation_check(struct se_cmd *cmd) |
83 | { | 99 | { |
@@ -664,7 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
664 | struct se_dev_entry *deve_tmp; | 680 | struct se_dev_entry *deve_tmp; |
665 | struct se_node_acl *nacl_tmp; | 681 | struct se_node_acl *nacl_tmp; |
666 | struct se_port *port, *port_tmp; | 682 | struct se_port *port, *port_tmp; |
667 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 683 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
668 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; | 684 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; |
669 | int ret; | 685 | int ret; |
670 | /* | 686 | /* |
@@ -963,7 +979,7 @@ int core_scsi3_check_aptpl_registration( | |||
963 | } | 979 | } |
964 | 980 | ||
965 | static void __core_scsi3_dump_registration( | 981 | static void __core_scsi3_dump_registration( |
966 | struct target_core_fabric_ops *tfo, | 982 | const struct target_core_fabric_ops *tfo, |
967 | struct se_device *dev, | 983 | struct se_device *dev, |
968 | struct se_node_acl *nacl, | 984 | struct se_node_acl *nacl, |
969 | struct t10_pr_registration *pr_reg, | 985 | struct t10_pr_registration *pr_reg, |
@@ -1004,7 +1020,7 @@ static void __core_scsi3_add_registration( | |||
1004 | enum register_type register_type, | 1020 | enum register_type register_type, |
1005 | int register_move) | 1021 | int register_move) |
1006 | { | 1022 | { |
1007 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 1023 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
1008 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1024 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1025 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1010 | 1026 | ||
@@ -1220,8 +1236,10 @@ static void __core_scsi3_free_registration( | |||
1220 | struct t10_pr_registration *pr_reg, | 1236 | struct t10_pr_registration *pr_reg, |
1221 | struct list_head *preempt_and_abort_list, | 1237 | struct list_head *preempt_and_abort_list, |
1222 | int dec_holders) | 1238 | int dec_holders) |
1239 | __releases(&pr_tmpl->registration_lock) | ||
1240 | __acquires(&pr_tmpl->registration_lock) | ||
1223 | { | 1241 | { |
1224 | struct target_core_fabric_ops *tfo = | 1242 | const struct target_core_fabric_ops *tfo = |
1225 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | 1243 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; |
1226 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 1244 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
1227 | char i_buf[PR_REG_ISID_ID_LEN]; | 1245 | char i_buf[PR_REG_ISID_ID_LEN]; |
@@ -1445,7 +1463,7 @@ core_scsi3_decode_spec_i_port( | |||
1445 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1463 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1446 | LIST_HEAD(tid_dest_list); | 1464 | LIST_HEAD(tid_dest_list); |
1447 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1465 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1448 | struct target_core_fabric_ops *tmp_tf_ops; | 1466 | const struct target_core_fabric_ops *tmp_tf_ops; |
1449 | unsigned char *buf; | 1467 | unsigned char *buf; |
1450 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | 1468 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; |
1451 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; | 1469 | char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; |
@@ -2287,7 +2305,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2287 | spin_lock(&dev->dev_reservation_lock); | 2305 | spin_lock(&dev->dev_reservation_lock); |
2288 | pr_res_holder = dev->dev_pr_res_holder; | 2306 | pr_res_holder = dev->dev_pr_res_holder; |
2289 | if (pr_res_holder) { | 2307 | if (pr_res_holder) { |
2290 | int pr_res_type = pr_res_holder->pr_res_type; | ||
2291 | /* | 2308 | /* |
2292 | * From spc4r17 Section 5.7.9: Reserving: | 2309 | * From spc4r17 Section 5.7.9: Reserving: |
2293 | * | 2310 | * |
@@ -2298,9 +2315,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) | |||
2298 | * the logical unit, then the command shall be completed with | 2315 | * the logical unit, then the command shall be completed with |
2299 | * RESERVATION CONFLICT status. | 2316 | * RESERVATION CONFLICT status. |
2300 | */ | 2317 | */ |
2301 | if ((pr_res_holder != pr_reg) && | 2318 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
2302 | (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && | ||
2303 | (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
2304 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2319 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2305 | pr_err("SPC-3 PR: Attempted RESERVE from" | 2320 | pr_err("SPC-3 PR: Attempted RESERVE from" |
2306 | " [%s]: %s while reservation already held by" | 2321 | " [%s]: %s while reservation already held by" |
@@ -2409,7 +2424,7 @@ static void __core_scsi3_complete_pro_release( | |||
2409 | int explicit, | 2424 | int explicit, |
2410 | int unreg) | 2425 | int unreg) |
2411 | { | 2426 | { |
2412 | struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; | 2427 | const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; |
2413 | char i_buf[PR_REG_ISID_ID_LEN]; | 2428 | char i_buf[PR_REG_ISID_ID_LEN]; |
2414 | int pr_res_type = 0, pr_res_scope = 0; | 2429 | int pr_res_type = 0, pr_res_scope = 0; |
2415 | 2430 | ||
@@ -2477,7 +2492,6 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2477 | struct se_lun *se_lun = cmd->se_lun; | 2492 | struct se_lun *se_lun = cmd->se_lun; |
2478 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; | 2493 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; |
2479 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2494 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2480 | int all_reg = 0; | ||
2481 | sense_reason_t ret = 0; | 2495 | sense_reason_t ret = 0; |
2482 | 2496 | ||
2483 | if (!se_sess || !se_lun) { | 2497 | if (!se_sess || !se_lun) { |
@@ -2514,13 +2528,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, | |||
2514 | spin_unlock(&dev->dev_reservation_lock); | 2528 | spin_unlock(&dev->dev_reservation_lock); |
2515 | goto out_put_pr_reg; | 2529 | goto out_put_pr_reg; |
2516 | } | 2530 | } |
2517 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
2518 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) | ||
2519 | all_reg = 1; | ||
2520 | 2531 | ||
2521 | if ((all_reg == 0) && (pr_res_holder != pr_reg)) { | 2532 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
2522 | /* | 2533 | /* |
2523 | * Non 'All Registrants' PR Type cases.. | ||
2524 | * Release request from a registered I_T nexus that is not a | 2534 | * Release request from a registered I_T nexus that is not a |
2525 | * persistent reservation holder. return GOOD status. | 2535 | * persistent reservation holder. return GOOD status. |
2526 | */ | 2536 | */ |
@@ -2726,7 +2736,7 @@ static void __core_scsi3_complete_pro_preempt( | |||
2726 | enum preempt_type preempt_type) | 2736 | enum preempt_type preempt_type) |
2727 | { | 2737 | { |
2728 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; | 2738 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; |
2729 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 2739 | const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
2730 | char i_buf[PR_REG_ISID_ID_LEN]; | 2740 | char i_buf[PR_REG_ISID_ID_LEN]; |
2731 | 2741 | ||
2732 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 2742 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
@@ -3111,7 +3121,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3111 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3121 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3112 | struct se_port *se_port; | 3122 | struct se_port *se_port; |
3113 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | 3123 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; |
3114 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | 3124 | const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; |
3115 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | 3125 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; |
3116 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 3126 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
3117 | unsigned char *buf; | 3127 | unsigned char *buf; |
@@ -3375,7 +3385,7 @@ after_iport_check: | |||
3375 | * From spc4r17 section 5.7.8 Table 50 -- | 3385 | * From spc4r17 section 5.7.8 Table 50 -- |
3376 | * Register behaviors for a REGISTER AND MOVE service action | 3386 | * Register behaviors for a REGISTER AND MOVE service action |
3377 | */ | 3387 | */ |
3378 | if (pr_res_holder != pr_reg) { | 3388 | if (!is_reservation_holder(pr_res_holder, pr_reg)) { |
3379 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" | 3389 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" |
3380 | " Nexus is not reservation holder\n"); | 3390 | " Nexus is not reservation holder\n"); |
3381 | spin_unlock(&dev->dev_reservation_lock); | 3391 | spin_unlock(&dev->dev_reservation_lock); |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 98e83ac5661b..a263bf5fab8d 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -139,10 +139,22 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * | |||
139 | unsigned char *p; | 139 | unsigned char *p; |
140 | 140 | ||
141 | while (total_sg_needed) { | 141 | while (total_sg_needed) { |
142 | unsigned int chain_entry = 0; | ||
143 | |||
142 | sg_per_table = (total_sg_needed > max_sg_per_table) ? | 144 | sg_per_table = (total_sg_needed > max_sg_per_table) ? |
143 | max_sg_per_table : total_sg_needed; | 145 | max_sg_per_table : total_sg_needed; |
144 | 146 | ||
145 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), | 147 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN |
148 | |||
149 | /* | ||
150 | * Reserve extra element for chain entry | ||
151 | */ | ||
152 | if (sg_per_table < total_sg_needed) | ||
153 | chain_entry = 1; | ||
154 | |||
155 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ | ||
156 | |||
157 | sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), | ||
146 | GFP_KERNEL); | 158 | GFP_KERNEL); |
147 | if (!sg) { | 159 | if (!sg) { |
148 | pr_err("Unable to allocate scatterlist array" | 160 | pr_err("Unable to allocate scatterlist array" |
@@ -150,7 +162,16 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * | |||
150 | return -ENOMEM; | 162 | return -ENOMEM; |
151 | } | 163 | } |
152 | 164 | ||
153 | sg_init_table(sg, sg_per_table); | 165 | sg_init_table(sg, sg_per_table + chain_entry); |
166 | |||
167 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN | ||
168 | |||
169 | if (i > 0) { | ||
170 | sg_chain(sg_table[i - 1].sg_table, | ||
171 | max_sg_per_table + 1, sg); | ||
172 | } | ||
173 | |||
174 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ | ||
154 | 175 | ||
155 | sg_table[i].sg_table = sg; | 176 | sg_table[i].sg_table = sg; |
156 | sg_table[i].rd_sg_count = sg_per_table; | 177 | sg_table[i].rd_sg_count = sg_per_table; |
@@ -382,6 +403,76 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page | |||
382 | return NULL; | 403 | return NULL; |
383 | } | 404 | } |
384 | 405 | ||
406 | typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int, | ||
407 | unsigned int, struct scatterlist *, int); | ||
408 | |||
409 | static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) | ||
410 | { | ||
411 | struct se_device *se_dev = cmd->se_dev; | ||
412 | struct rd_dev *dev = RD_DEV(se_dev); | ||
413 | struct rd_dev_sg_table *prot_table; | ||
414 | bool need_to_release = false; | ||
415 | struct scatterlist *prot_sg; | ||
416 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
417 | u32 prot_offset, prot_page; | ||
418 | u32 prot_npages __maybe_unused; | ||
419 | u64 tmp; | ||
420 | sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
421 | |||
422 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
423 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
424 | prot_page = tmp; | ||
425 | |||
426 | prot_table = rd_get_prot_table(dev, prot_page); | ||
427 | if (!prot_table) | ||
428 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
429 | |||
430 | prot_sg = &prot_table->sg_table[prot_page - | ||
431 | prot_table->page_start_offset]; | ||
432 | |||
433 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN | ||
434 | |||
435 | prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, | ||
436 | PAGE_SIZE); | ||
437 | |||
438 | /* | ||
439 | * Allocate temporaly contiguous scatterlist entries if prot pages | ||
440 | * straddles multiple scatterlist tables. | ||
441 | */ | ||
442 | if (prot_table->page_end_offset < prot_page + prot_npages - 1) { | ||
443 | int i; | ||
444 | |||
445 | prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); | ||
446 | if (!prot_sg) | ||
447 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
448 | |||
449 | need_to_release = true; | ||
450 | sg_init_table(prot_sg, prot_npages); | ||
451 | |||
452 | for (i = 0; i < prot_npages; i++) { | ||
453 | if (prot_page + i > prot_table->page_end_offset) { | ||
454 | prot_table = rd_get_prot_table(dev, | ||
455 | prot_page + i); | ||
456 | if (!prot_table) { | ||
457 | kfree(prot_sg); | ||
458 | return rc; | ||
459 | } | ||
460 | sg_unmark_end(&prot_sg[i - 1]); | ||
461 | } | ||
462 | prot_sg[i] = prot_table->sg_table[prot_page + i - | ||
463 | prot_table->page_start_offset]; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ | ||
468 | |||
469 | rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); | ||
470 | if (need_to_release) | ||
471 | kfree(prot_sg); | ||
472 | |||
473 | return rc; | ||
474 | } | ||
475 | |||
385 | static sense_reason_t | 476 | static sense_reason_t |
386 | rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | 477 | rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
387 | enum dma_data_direction data_direction) | 478 | enum dma_data_direction data_direction) |
@@ -419,24 +510,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
419 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", | 510 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
420 | cmd->t_task_lba, rd_size, rd_page, rd_offset); | 511 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
421 | 512 | ||
422 | if (cmd->prot_type && data_direction == DMA_TO_DEVICE) { | 513 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
423 | struct rd_dev_sg_table *prot_table; | 514 | data_direction == DMA_TO_DEVICE) { |
424 | struct scatterlist *prot_sg; | 515 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_write); |
425 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
426 | u32 prot_offset, prot_page; | ||
427 | |||
428 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
429 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
430 | prot_page = tmp; | ||
431 | |||
432 | prot_table = rd_get_prot_table(dev, prot_page); | ||
433 | if (!prot_table) | ||
434 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
435 | |||
436 | prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; | ||
437 | |||
438 | rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0, | ||
439 | prot_sg, prot_offset); | ||
440 | if (rc) | 516 | if (rc) |
441 | return rc; | 517 | return rc; |
442 | } | 518 | } |
@@ -502,24 +578,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
502 | } | 578 | } |
503 | sg_miter_stop(&m); | 579 | sg_miter_stop(&m); |
504 | 580 | ||
505 | if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) { | 581 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
506 | struct rd_dev_sg_table *prot_table; | 582 | data_direction == DMA_FROM_DEVICE) { |
507 | struct scatterlist *prot_sg; | 583 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_read); |
508 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | ||
509 | u32 prot_offset, prot_page; | ||
510 | |||
511 | tmp = cmd->t_task_lba * se_dev->prot_length; | ||
512 | prot_offset = do_div(tmp, PAGE_SIZE); | ||
513 | prot_page = tmp; | ||
514 | |||
515 | prot_table = rd_get_prot_table(dev, prot_page); | ||
516 | if (!prot_table) | ||
517 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
518 | |||
519 | prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; | ||
520 | |||
521 | rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, | ||
522 | prot_sg, prot_offset); | ||
523 | if (rc) | 584 | if (rc) |
524 | return rc; | 585 | return rc; |
525 | } | 586 | } |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 3e7297411110..8855781ac653 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -93,6 +93,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) | |||
93 | { | 93 | { |
94 | struct se_device *dev = cmd->se_dev; | 94 | struct se_device *dev = cmd->se_dev; |
95 | struct se_session *sess = cmd->se_sess; | 95 | struct se_session *sess = cmd->se_sess; |
96 | int pi_prot_type = dev->dev_attrib.pi_prot_type; | ||
97 | |||
96 | unsigned char *rbuf; | 98 | unsigned char *rbuf; |
97 | unsigned char buf[32]; | 99 | unsigned char buf[32]; |
98 | unsigned long long blocks = dev->transport->get_blocks(dev); | 100 | unsigned long long blocks = dev->transport->get_blocks(dev); |
@@ -114,8 +116,15 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) | |||
114 | * Set P_TYPE and PROT_EN bits for DIF support | 116 | * Set P_TYPE and PROT_EN bits for DIF support |
115 | */ | 117 | */ |
116 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 118 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
117 | if (dev->dev_attrib.pi_prot_type) | 119 | /* |
118 | buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; | 120 | * Only override a device's pi_prot_type if no T10-PI is |
121 | * available, and sess_prot_type has been explicitly enabled. | ||
122 | */ | ||
123 | if (!pi_prot_type) | ||
124 | pi_prot_type = sess->sess_prot_type; | ||
125 | |||
126 | if (pi_prot_type) | ||
127 | buf[12] = (pi_prot_type - 1) << 1 | 0x1; | ||
119 | } | 128 | } |
120 | 129 | ||
121 | if (dev->transport->get_lbppbe) | 130 | if (dev->transport->get_lbppbe) |
@@ -312,7 +321,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o | |||
312 | return 0; | 321 | return 0; |
313 | } | 322 | } |
314 | 323 | ||
315 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) | 324 | static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) |
316 | { | 325 | { |
317 | unsigned char *buf, *addr; | 326 | unsigned char *buf, *addr; |
318 | struct scatterlist *sg; | 327 | struct scatterlist *sg; |
@@ -376,7 +385,7 @@ sbc_execute_rw(struct se_cmd *cmd) | |||
376 | cmd->data_direction); | 385 | cmd->data_direction); |
377 | } | 386 | } |
378 | 387 | ||
379 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd) | 388 | static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) |
380 | { | 389 | { |
381 | struct se_device *dev = cmd->se_dev; | 390 | struct se_device *dev = cmd->se_dev; |
382 | 391 | ||
@@ -399,7 +408,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd) | |||
399 | return TCM_NO_SENSE; | 408 | return TCM_NO_SENSE; |
400 | } | 409 | } |
401 | 410 | ||
402 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | 411 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) |
403 | { | 412 | { |
404 | struct se_device *dev = cmd->se_dev; | 413 | struct se_device *dev = cmd->se_dev; |
405 | struct scatterlist *write_sg = NULL, *sg; | 414 | struct scatterlist *write_sg = NULL, *sg; |
@@ -414,11 +423,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | |||
414 | 423 | ||
415 | /* | 424 | /* |
416 | * Handle early failure in transport_generic_request_failure(), | 425 | * Handle early failure in transport_generic_request_failure(), |
417 | * which will not have taken ->caw_mutex yet.. | 426 | * which will not have taken ->caw_sem yet.. |
418 | */ | 427 | */ |
419 | if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) | 428 | if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) |
420 | return TCM_NO_SENSE; | 429 | return TCM_NO_SENSE; |
421 | /* | 430 | /* |
431 | * Handle special case for zero-length COMPARE_AND_WRITE | ||
432 | */ | ||
433 | if (!cmd->data_length) | ||
434 | goto out; | ||
435 | /* | ||
422 | * Immediately exit + release dev->caw_sem if command has already | 436 | * Immediately exit + release dev->caw_sem if command has already |
423 | * been failed with a non-zero SCSI status. | 437 | * been failed with a non-zero SCSI status. |
424 | */ | 438 | */ |
@@ -581,12 +595,13 @@ sbc_compare_and_write(struct se_cmd *cmd) | |||
581 | } | 595 | } |
582 | 596 | ||
583 | static int | 597 | static int |
584 | sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, | 598 | sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, |
585 | bool is_write, struct se_cmd *cmd) | 599 | bool is_write, struct se_cmd *cmd) |
586 | { | 600 | { |
587 | if (is_write) { | 601 | if (is_write) { |
588 | cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : | 602 | cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : |
589 | TARGET_PROT_DOUT_INSERT; | 603 | protect ? TARGET_PROT_DOUT_PASS : |
604 | TARGET_PROT_DOUT_INSERT; | ||
590 | switch (protect) { | 605 | switch (protect) { |
591 | case 0x0: | 606 | case 0x0: |
592 | case 0x3: | 607 | case 0x3: |
@@ -610,8 +625,9 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, | |||
610 | return -EINVAL; | 625 | return -EINVAL; |
611 | } | 626 | } |
612 | } else { | 627 | } else { |
613 | cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : | 628 | cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : |
614 | TARGET_PROT_DIN_STRIP; | 629 | protect ? TARGET_PROT_DIN_PASS : |
630 | TARGET_PROT_DIN_STRIP; | ||
615 | switch (protect) { | 631 | switch (protect) { |
616 | case 0x0: | 632 | case 0x0: |
617 | case 0x1: | 633 | case 0x1: |
@@ -644,11 +660,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, | |||
644 | u32 sectors, bool is_write) | 660 | u32 sectors, bool is_write) |
645 | { | 661 | { |
646 | u8 protect = cdb[1] >> 5; | 662 | u8 protect = cdb[1] >> 5; |
663 | int sp_ops = cmd->se_sess->sup_prot_ops; | ||
664 | int pi_prot_type = dev->dev_attrib.pi_prot_type; | ||
665 | bool fabric_prot = false; | ||
647 | 666 | ||
648 | if (!cmd->t_prot_sg || !cmd->t_prot_nents) { | 667 | if (!cmd->t_prot_sg || !cmd->t_prot_nents) { |
649 | if (protect && !dev->dev_attrib.pi_prot_type) { | 668 | if (unlikely(protect && |
650 | pr_err("CDB contains protect bit, but device does not" | 669 | !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { |
651 | " advertise PROTECT=1 feature bit\n"); | 670 | pr_err("CDB contains protect bit, but device + fabric does" |
671 | " not advertise PROTECT=1 feature bit\n"); | ||
652 | return TCM_INVALID_CDB_FIELD; | 672 | return TCM_INVALID_CDB_FIELD; |
653 | } | 673 | } |
654 | if (cmd->prot_pto) | 674 | if (cmd->prot_pto) |
@@ -669,15 +689,32 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, | |||
669 | cmd->reftag_seed = cmd->t_task_lba; | 689 | cmd->reftag_seed = cmd->t_task_lba; |
670 | break; | 690 | break; |
671 | case TARGET_DIF_TYPE0_PROT: | 691 | case TARGET_DIF_TYPE0_PROT: |
692 | /* | ||
693 | * See if the fabric supports T10-PI, and the session has been | ||
694 | * configured to allow export PROTECT=1 feature bit with backend | ||
695 | * devices that don't support T10-PI. | ||
696 | */ | ||
697 | fabric_prot = is_write ? | ||
698 | !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : | ||
699 | !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); | ||
700 | |||
701 | if (fabric_prot && cmd->se_sess->sess_prot_type) { | ||
702 | pi_prot_type = cmd->se_sess->sess_prot_type; | ||
703 | break; | ||
704 | } | ||
705 | if (!protect) | ||
706 | return TCM_NO_SENSE; | ||
707 | /* Fallthrough */ | ||
672 | default: | 708 | default: |
673 | return TCM_NO_SENSE; | 709 | pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " |
710 | "PROTECT: 0x%02x\n", cdb[0], protect); | ||
711 | return TCM_INVALID_CDB_FIELD; | ||
674 | } | 712 | } |
675 | 713 | ||
676 | if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, | 714 | if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) |
677 | is_write, cmd)) | ||
678 | return TCM_INVALID_CDB_FIELD; | 715 | return TCM_INVALID_CDB_FIELD; |
679 | 716 | ||
680 | cmd->prot_type = dev->dev_attrib.pi_prot_type; | 717 | cmd->prot_type = pi_prot_type; |
681 | cmd->prot_length = dev->prot_length * sectors; | 718 | cmd->prot_length = dev->prot_length * sectors; |
682 | 719 | ||
683 | /** | 720 | /** |
@@ -1166,14 +1203,16 @@ sbc_dif_generate(struct se_cmd *cmd) | |||
1166 | sdt = paddr + offset; | 1203 | sdt = paddr + offset; |
1167 | sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, | 1204 | sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, |
1168 | dev->dev_attrib.block_size)); | 1205 | dev->dev_attrib.block_size)); |
1169 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) | 1206 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) |
1170 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); | 1207 | sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); |
1171 | sdt->app_tag = 0; | 1208 | sdt->app_tag = 0; |
1172 | 1209 | ||
1173 | pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" | 1210 | pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" |
1174 | " app_tag: 0x%04x ref_tag: %u\n", | 1211 | " app_tag: 0x%04x ref_tag: %u\n", |
1175 | (unsigned long long)sector, sdt->guard_tag, | 1212 | (cmd->data_direction == DMA_TO_DEVICE) ? |
1176 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | 1213 | "WRITE" : "READ", (unsigned long long)sector, |
1214 | sdt->guard_tag, sdt->app_tag, | ||
1215 | be32_to_cpu(sdt->ref_tag)); | ||
1177 | 1216 | ||
1178 | sector++; | 1217 | sector++; |
1179 | offset += sizeof(struct se_dif_v1_tuple); | 1218 | offset += sizeof(struct se_dif_v1_tuple); |
@@ -1185,12 +1224,16 @@ sbc_dif_generate(struct se_cmd *cmd) | |||
1185 | } | 1224 | } |
1186 | 1225 | ||
1187 | static sense_reason_t | 1226 | static sense_reason_t |
1188 | sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | 1227 | sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, |
1189 | const void *p, sector_t sector, unsigned int ei_lba) | 1228 | const void *p, sector_t sector, unsigned int ei_lba) |
1190 | { | 1229 | { |
1230 | struct se_device *dev = cmd->se_dev; | ||
1191 | int block_size = dev->dev_attrib.block_size; | 1231 | int block_size = dev->dev_attrib.block_size; |
1192 | __be16 csum; | 1232 | __be16 csum; |
1193 | 1233 | ||
1234 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) | ||
1235 | goto check_ref; | ||
1236 | |||
1194 | csum = cpu_to_be16(crc_t10dif(p, block_size)); | 1237 | csum = cpu_to_be16(crc_t10dif(p, block_size)); |
1195 | 1238 | ||
1196 | if (sdt->guard_tag != csum) { | 1239 | if (sdt->guard_tag != csum) { |
@@ -1200,7 +1243,11 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | |||
1200 | return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | 1243 | return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; |
1201 | } | 1244 | } |
1202 | 1245 | ||
1203 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && | 1246 | check_ref: |
1247 | if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) | ||
1248 | return 0; | ||
1249 | |||
1250 | if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && | ||
1204 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { | 1251 | be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { |
1205 | pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" | 1252 | pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" |
1206 | " sector MSB: 0x%08x\n", (unsigned long long)sector, | 1253 | " sector MSB: 0x%08x\n", (unsigned long long)sector, |
@@ -1208,7 +1255,7 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, | |||
1208 | return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | 1255 | return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; |
1209 | } | 1256 | } |
1210 | 1257 | ||
1211 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && | 1258 | if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && |
1212 | be32_to_cpu(sdt->ref_tag) != ei_lba) { | 1259 | be32_to_cpu(sdt->ref_tag) != ei_lba) { |
1213 | pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" | 1260 | pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" |
1214 | " ei_lba: 0x%08x\n", (unsigned long long)sector, | 1261 | " ei_lba: 0x%08x\n", (unsigned long long)sector, |
@@ -1229,6 +1276,9 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1229 | unsigned int i, len, left; | 1276 | unsigned int i, len, left; |
1230 | unsigned int offset = sg_off; | 1277 | unsigned int offset = sg_off; |
1231 | 1278 | ||
1279 | if (!sg) | ||
1280 | return; | ||
1281 | |||
1232 | left = sectors * dev->prot_length; | 1282 | left = sectors * dev->prot_length; |
1233 | 1283 | ||
1234 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { | 1284 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
@@ -1292,7 +1342,7 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1292 | (unsigned long long)sector, sdt->guard_tag, | 1342 | (unsigned long long)sector, sdt->guard_tag, |
1293 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); | 1343 | sdt->app_tag, be32_to_cpu(sdt->ref_tag)); |
1294 | 1344 | ||
1295 | rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, | 1345 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, |
1296 | ei_lba); | 1346 | ei_lba); |
1297 | if (rc) { | 1347 | if (rc) { |
1298 | kunmap_atomic(paddr); | 1348 | kunmap_atomic(paddr); |
@@ -1309,6 +1359,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1309 | kunmap_atomic(paddr); | 1359 | kunmap_atomic(paddr); |
1310 | kunmap_atomic(daddr); | 1360 | kunmap_atomic(daddr); |
1311 | } | 1361 | } |
1362 | if (!sg) | ||
1363 | return 0; | ||
1364 | |||
1312 | sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); | 1365 | sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); |
1313 | 1366 | ||
1314 | return 0; | 1367 | return 0; |
@@ -1353,7 +1406,7 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, | |||
1353 | continue; | 1406 | continue; |
1354 | } | 1407 | } |
1355 | 1408 | ||
1356 | rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, | 1409 | rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, |
1357 | ei_lba); | 1410 | ei_lba); |
1358 | if (rc) { | 1411 | if (rc) { |
1359 | kunmap_atomic(paddr); | 1412 | kunmap_atomic(paddr); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 6c8bd6bc175c..7912aa124385 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -103,10 +103,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) | |||
103 | buf[5] |= 0x8; | 103 | buf[5] |= 0x8; |
104 | /* | 104 | /* |
105 | * Set Protection (PROTECT) bit when DIF has been enabled on the | 105 | * Set Protection (PROTECT) bit when DIF has been enabled on the |
106 | * device, and the transport supports VERIFY + PASS. | 106 | * device, and the fabric supports VERIFY + PASS. Also report |
107 | * PROTECT=1 if sess_prot_type has been configured to allow T10-PI | ||
108 | * to unprotected devices. | ||
107 | */ | 109 | */ |
108 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 110 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
109 | if (dev->dev_attrib.pi_prot_type) | 111 | if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) |
110 | buf[5] |= 0x1; | 112 | buf[5] |= 0x1; |
111 | } | 113 | } |
112 | 114 | ||
@@ -467,9 +469,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
467 | * only for TYPE3 protection. | 469 | * only for TYPE3 protection. |
468 | */ | 470 | */ |
469 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 471 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
470 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) | 472 | if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || |
473 | cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) | ||
471 | buf[4] = 0x5; | 474 | buf[4] = 0x5; |
472 | else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) | 475 | else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || |
476 | cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) | ||
473 | buf[4] = 0x4; | 477 | buf[4] = 0x4; |
474 | } | 478 | } |
475 | 479 | ||
@@ -861,7 +865,7 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) | |||
861 | * TAG field. | 865 | * TAG field. |
862 | */ | 866 | */ |
863 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { | 867 | if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { |
864 | if (dev->dev_attrib.pi_prot_type) | 868 | if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) |
865 | p[5] |= 0x80; | 869 | p[5] |= 0x80; |
866 | } | 870 | } |
867 | 871 | ||
@@ -1099,7 +1103,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) | |||
1099 | unsigned char *buf; | 1103 | unsigned char *buf; |
1100 | unsigned char tbuf[SE_MODE_PAGE_BUF]; | 1104 | unsigned char tbuf[SE_MODE_PAGE_BUF]; |
1101 | int length; | 1105 | int length; |
1102 | int ret = 0; | 1106 | sense_reason_t ret = 0; |
1103 | int i; | 1107 | int i; |
1104 | 1108 | ||
1105 | if (!cmd->data_length) { | 1109 | if (!cmd->data_length) { |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index fa5e157db47b..315ec3458eeb 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -125,8 +125,8 @@ void core_tmr_abort_task( | |||
125 | if (dev != se_cmd->se_dev) | 125 | if (dev != se_cmd->se_dev) |
126 | continue; | 126 | continue; |
127 | 127 | ||
128 | /* skip se_cmd associated with tmr */ | 128 | /* skip task management functions, including tmr->task_cmd */ |
129 | if (tmr->task_cmd == se_cmd) | 129 | if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
130 | continue; | 130 | continue; |
131 | 131 | ||
132 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); | 132 | ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 0696de9553d3..47f064415bf6 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -672,7 +672,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
672 | } | 672 | } |
673 | 673 | ||
674 | int core_tpg_register( | 674 | int core_tpg_register( |
675 | struct target_core_fabric_ops *tfo, | 675 | const struct target_core_fabric_ops *tfo, |
676 | struct se_wwn *se_wwn, | 676 | struct se_wwn *se_wwn, |
677 | struct se_portal_group *se_tpg, | 677 | struct se_portal_group *se_tpg, |
678 | void *tpg_fabric_ptr, | 678 | void *tpg_fabric_ptr, |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ac3cbabdbdf0..3fe5cb240b6f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -322,6 +322,7 @@ void __transport_register_session( | |||
322 | struct se_session *se_sess, | 322 | struct se_session *se_sess, |
323 | void *fabric_sess_ptr) | 323 | void *fabric_sess_ptr) |
324 | { | 324 | { |
325 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; | ||
325 | unsigned char buf[PR_REG_ISID_LEN]; | 326 | unsigned char buf[PR_REG_ISID_LEN]; |
326 | 327 | ||
327 | se_sess->se_tpg = se_tpg; | 328 | se_sess->se_tpg = se_tpg; |
@@ -334,6 +335,21 @@ void __transport_register_session( | |||
334 | */ | 335 | */ |
335 | if (se_nacl) { | 336 | if (se_nacl) { |
336 | /* | 337 | /* |
338 | * | ||
339 | * Determine if fabric allows for T10-PI feature bits exposed to | ||
340 | * initiators for device backends with !dev->dev_attrib.pi_prot_type. | ||
341 | * | ||
342 | * If so, then always save prot_type on a per se_node_acl node | ||
343 | * basis and re-instate the previous sess_prot_type to avoid | ||
344 | * disabling PI from below any previously initiator side | ||
345 | * registered LUNs. | ||
346 | */ | ||
347 | if (se_nacl->saved_prot_type) | ||
348 | se_sess->sess_prot_type = se_nacl->saved_prot_type; | ||
349 | else if (tfo->tpg_check_prot_fabric_only) | ||
350 | se_sess->sess_prot_type = se_nacl->saved_prot_type = | ||
351 | tfo->tpg_check_prot_fabric_only(se_tpg); | ||
352 | /* | ||
337 | * If the fabric module supports an ISID based TransportID, | 353 | * If the fabric module supports an ISID based TransportID, |
338 | * save this value in binary from the fabric I_T Nexus now. | 354 | * save this value in binary from the fabric I_T Nexus now. |
339 | */ | 355 | */ |
@@ -404,6 +420,30 @@ void target_put_session(struct se_session *se_sess) | |||
404 | } | 420 | } |
405 | EXPORT_SYMBOL(target_put_session); | 421 | EXPORT_SYMBOL(target_put_session); |
406 | 422 | ||
423 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) | ||
424 | { | ||
425 | struct se_session *se_sess; | ||
426 | ssize_t len = 0; | ||
427 | |||
428 | spin_lock_bh(&se_tpg->session_lock); | ||
429 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
430 | if (!se_sess->se_node_acl) | ||
431 | continue; | ||
432 | if (!se_sess->se_node_acl->dynamic_node_acl) | ||
433 | continue; | ||
434 | if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) | ||
435 | break; | ||
436 | |||
437 | len += snprintf(page + len, PAGE_SIZE - len, "%s\n", | ||
438 | se_sess->se_node_acl->initiatorname); | ||
439 | len += 1; /* Include NULL terminator */ | ||
440 | } | ||
441 | spin_unlock_bh(&se_tpg->session_lock); | ||
442 | |||
443 | return len; | ||
444 | } | ||
445 | EXPORT_SYMBOL(target_show_dynamic_sessions); | ||
446 | |||
407 | static void target_complete_nacl(struct kref *kref) | 447 | static void target_complete_nacl(struct kref *kref) |
408 | { | 448 | { |
409 | struct se_node_acl *nacl = container_of(kref, | 449 | struct se_node_acl *nacl = container_of(kref, |
@@ -462,7 +502,7 @@ EXPORT_SYMBOL(transport_free_session); | |||
462 | void transport_deregister_session(struct se_session *se_sess) | 502 | void transport_deregister_session(struct se_session *se_sess) |
463 | { | 503 | { |
464 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 504 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
465 | struct target_core_fabric_ops *se_tfo; | 505 | const struct target_core_fabric_ops *se_tfo; |
466 | struct se_node_acl *se_nacl; | 506 | struct se_node_acl *se_nacl; |
467 | unsigned long flags; | 507 | unsigned long flags; |
468 | bool comp_nacl = true; | 508 | bool comp_nacl = true; |
@@ -1118,7 +1158,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
1118 | */ | 1158 | */ |
1119 | void transport_init_se_cmd( | 1159 | void transport_init_se_cmd( |
1120 | struct se_cmd *cmd, | 1160 | struct se_cmd *cmd, |
1121 | struct target_core_fabric_ops *tfo, | 1161 | const struct target_core_fabric_ops *tfo, |
1122 | struct se_session *se_sess, | 1162 | struct se_session *se_sess, |
1123 | u32 data_length, | 1163 | u32 data_length, |
1124 | int data_direction, | 1164 | int data_direction, |
@@ -1570,6 +1610,8 @@ EXPORT_SYMBOL(target_submit_tmr); | |||
1570 | * has completed. | 1610 | * has completed. |
1571 | */ | 1611 | */ |
1572 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) | 1612 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) |
1613 | __releases(&cmd->t_state_lock) | ||
1614 | __acquires(&cmd->t_state_lock) | ||
1573 | { | 1615 | { |
1574 | bool was_active = false; | 1616 | bool was_active = false; |
1575 | 1617 | ||
@@ -1615,11 +1657,11 @@ void transport_generic_request_failure(struct se_cmd *cmd, | |||
1615 | transport_complete_task_attr(cmd); | 1657 | transport_complete_task_attr(cmd); |
1616 | /* | 1658 | /* |
1617 | * Handle special case for COMPARE_AND_WRITE failure, where the | 1659 | * Handle special case for COMPARE_AND_WRITE failure, where the |
1618 | * callback is expected to drop the per device ->caw_mutex. | 1660 | * callback is expected to drop the per device ->caw_sem. |
1619 | */ | 1661 | */ |
1620 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | 1662 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
1621 | cmd->transport_complete_callback) | 1663 | cmd->transport_complete_callback) |
1622 | cmd->transport_complete_callback(cmd); | 1664 | cmd->transport_complete_callback(cmd, false); |
1623 | 1665 | ||
1624 | switch (sense_reason) { | 1666 | switch (sense_reason) { |
1625 | case TCM_NON_EXISTENT_LUN: | 1667 | case TCM_NON_EXISTENT_LUN: |
@@ -1706,6 +1748,41 @@ void __target_execute_cmd(struct se_cmd *cmd) | |||
1706 | } | 1748 | } |
1707 | } | 1749 | } |
1708 | 1750 | ||
1751 | static int target_write_prot_action(struct se_cmd *cmd) | ||
1752 | { | ||
1753 | u32 sectors; | ||
1754 | /* | ||
1755 | * Perform WRITE_INSERT of PI using software emulation when backend | ||
1756 | * device has PI enabled, if the transport has not already generated | ||
1757 | * PI using hardware WRITE_INSERT offload. | ||
1758 | */ | ||
1759 | switch (cmd->prot_op) { | ||
1760 | case TARGET_PROT_DOUT_INSERT: | ||
1761 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) | ||
1762 | sbc_dif_generate(cmd); | ||
1763 | break; | ||
1764 | case TARGET_PROT_DOUT_STRIP: | ||
1765 | if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) | ||
1766 | break; | ||
1767 | |||
1768 | sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); | ||
1769 | cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba, | ||
1770 | sectors, 0, NULL, 0); | ||
1771 | if (unlikely(cmd->pi_err)) { | ||
1772 | spin_lock_irq(&cmd->t_state_lock); | ||
1773 | cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; | ||
1774 | spin_unlock_irq(&cmd->t_state_lock); | ||
1775 | transport_generic_request_failure(cmd, cmd->pi_err); | ||
1776 | return -1; | ||
1777 | } | ||
1778 | break; | ||
1779 | default: | ||
1780 | break; | ||
1781 | } | ||
1782 | |||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1709 | static bool target_handle_task_attr(struct se_cmd *cmd) | 1786 | static bool target_handle_task_attr(struct se_cmd *cmd) |
1710 | { | 1787 | { |
1711 | struct se_device *dev = cmd->se_dev; | 1788 | struct se_device *dev = cmd->se_dev; |
@@ -1785,15 +1862,9 @@ void target_execute_cmd(struct se_cmd *cmd) | |||
1785 | cmd->t_state = TRANSPORT_PROCESSING; | 1862 | cmd->t_state = TRANSPORT_PROCESSING; |
1786 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; | 1863 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; |
1787 | spin_unlock_irq(&cmd->t_state_lock); | 1864 | spin_unlock_irq(&cmd->t_state_lock); |
1788 | /* | 1865 | |
1789 | * Perform WRITE_INSERT of PI using software emulation when backend | 1866 | if (target_write_prot_action(cmd)) |
1790 | * device has PI enabled, if the transport has not already generated | 1867 | return; |
1791 | * PI using hardware WRITE_INSERT offload. | ||
1792 | */ | ||
1793 | if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) { | ||
1794 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) | ||
1795 | sbc_dif_generate(cmd); | ||
1796 | } | ||
1797 | 1868 | ||
1798 | if (target_handle_task_attr(cmd)) { | 1869 | if (target_handle_task_attr(cmd)) { |
1799 | spin_lock_irq(&cmd->t_state_lock); | 1870 | spin_lock_irq(&cmd->t_state_lock); |
@@ -1919,16 +1990,28 @@ static void transport_handle_queue_full( | |||
1919 | schedule_work(&cmd->se_dev->qf_work_queue); | 1990 | schedule_work(&cmd->se_dev->qf_work_queue); |
1920 | } | 1991 | } |
1921 | 1992 | ||
1922 | static bool target_check_read_strip(struct se_cmd *cmd) | 1993 | static bool target_read_prot_action(struct se_cmd *cmd) |
1923 | { | 1994 | { |
1924 | sense_reason_t rc; | 1995 | sense_reason_t rc; |
1925 | 1996 | ||
1926 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { | 1997 | switch (cmd->prot_op) { |
1927 | rc = sbc_dif_read_strip(cmd); | 1998 | case TARGET_PROT_DIN_STRIP: |
1928 | if (rc) { | 1999 | if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { |
1929 | cmd->pi_err = rc; | 2000 | rc = sbc_dif_read_strip(cmd); |
1930 | return true; | 2001 | if (rc) { |
2002 | cmd->pi_err = rc; | ||
2003 | return true; | ||
2004 | } | ||
1931 | } | 2005 | } |
2006 | break; | ||
2007 | case TARGET_PROT_DIN_INSERT: | ||
2008 | if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) | ||
2009 | break; | ||
2010 | |||
2011 | sbc_dif_generate(cmd); | ||
2012 | break; | ||
2013 | default: | ||
2014 | break; | ||
1932 | } | 2015 | } |
1933 | 2016 | ||
1934 | return false; | 2017 | return false; |
@@ -1975,8 +2058,12 @@ static void target_complete_ok_work(struct work_struct *work) | |||
1975 | if (cmd->transport_complete_callback) { | 2058 | if (cmd->transport_complete_callback) { |
1976 | sense_reason_t rc; | 2059 | sense_reason_t rc; |
1977 | 2060 | ||
1978 | rc = cmd->transport_complete_callback(cmd); | 2061 | rc = cmd->transport_complete_callback(cmd, true); |
1979 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { | 2062 | if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { |
2063 | if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | ||
2064 | !cmd->data_length) | ||
2065 | goto queue_rsp; | ||
2066 | |||
1980 | return; | 2067 | return; |
1981 | } else if (rc) { | 2068 | } else if (rc) { |
1982 | ret = transport_send_check_condition_and_sense(cmd, | 2069 | ret = transport_send_check_condition_and_sense(cmd, |
@@ -1990,6 +2077,7 @@ static void target_complete_ok_work(struct work_struct *work) | |||
1990 | } | 2077 | } |
1991 | } | 2078 | } |
1992 | 2079 | ||
2080 | queue_rsp: | ||
1993 | switch (cmd->data_direction) { | 2081 | switch (cmd->data_direction) { |
1994 | case DMA_FROM_DEVICE: | 2082 | case DMA_FROM_DEVICE: |
1995 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2083 | spin_lock(&cmd->se_lun->lun_sep_lock); |
@@ -2003,8 +2091,7 @@ static void target_complete_ok_work(struct work_struct *work) | |||
2003 | * backend had PI enabled, if the transport will not be | 2091 | * backend had PI enabled, if the transport will not be |
2004 | * performing hardware READ_STRIP offload. | 2092 | * performing hardware READ_STRIP offload. |
2005 | */ | 2093 | */ |
2006 | if (cmd->prot_op == TARGET_PROT_DIN_STRIP && | 2094 | if (target_read_prot_action(cmd)) { |
2007 | target_check_read_strip(cmd)) { | ||
2008 | ret = transport_send_check_condition_and_sense(cmd, | 2095 | ret = transport_send_check_condition_and_sense(cmd, |
2009 | cmd->pi_err, 0); | 2096 | cmd->pi_err, 0); |
2010 | if (ret == -EAGAIN || ret == -ENOMEM) | 2097 | if (ret == -EAGAIN || ret == -ENOMEM) |
@@ -2094,6 +2181,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) | |||
2094 | static inline void transport_free_pages(struct se_cmd *cmd) | 2181 | static inline void transport_free_pages(struct se_cmd *cmd) |
2095 | { | 2182 | { |
2096 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { | 2183 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { |
2184 | /* | ||
2185 | * Release special case READ buffer payload required for | ||
2186 | * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE | ||
2187 | */ | ||
2188 | if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { | ||
2189 | transport_free_sgl(cmd->t_bidi_data_sg, | ||
2190 | cmd->t_bidi_data_nents); | ||
2191 | cmd->t_bidi_data_sg = NULL; | ||
2192 | cmd->t_bidi_data_nents = 0; | ||
2193 | } | ||
2097 | transport_reset_sgl_orig(cmd); | 2194 | transport_reset_sgl_orig(cmd); |
2098 | return; | 2195 | return; |
2099 | } | 2196 | } |
@@ -2246,6 +2343,7 @@ sense_reason_t | |||
2246 | transport_generic_new_cmd(struct se_cmd *cmd) | 2343 | transport_generic_new_cmd(struct se_cmd *cmd) |
2247 | { | 2344 | { |
2248 | int ret = 0; | 2345 | int ret = 0; |
2346 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); | ||
2249 | 2347 | ||
2250 | /* | 2348 | /* |
2251 | * Determine is the TCM fabric module has already allocated physical | 2349 | * Determine is the TCM fabric module has already allocated physical |
@@ -2254,7 +2352,6 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2254 | */ | 2352 | */ |
2255 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && | 2353 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
2256 | cmd->data_length) { | 2354 | cmd->data_length) { |
2257 | bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); | ||
2258 | 2355 | ||
2259 | if ((cmd->se_cmd_flags & SCF_BIDI) || | 2356 | if ((cmd->se_cmd_flags & SCF_BIDI) || |
2260 | (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { | 2357 | (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { |
@@ -2285,6 +2382,20 @@ transport_generic_new_cmd(struct se_cmd *cmd) | |||
2285 | cmd->data_length, zero_flag); | 2382 | cmd->data_length, zero_flag); |
2286 | if (ret < 0) | 2383 | if (ret < 0) |
2287 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2384 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2385 | } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && | ||
2386 | cmd->data_length) { | ||
2387 | /* | ||
2388 | * Special case for COMPARE_AND_WRITE with fabrics | ||
2389 | * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. | ||
2390 | */ | ||
2391 | u32 caw_length = cmd->t_task_nolb * | ||
2392 | cmd->se_dev->dev_attrib.block_size; | ||
2393 | |||
2394 | ret = target_alloc_sgl(&cmd->t_bidi_data_sg, | ||
2395 | &cmd->t_bidi_data_nents, | ||
2396 | caw_length, zero_flag); | ||
2397 | if (ret < 0) | ||
2398 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
2288 | } | 2399 | } |
2289 | /* | 2400 | /* |
2290 | * If this command is not a write we can execute it right here, | 2401 | * If this command is not a write we can execute it right here, |
@@ -2376,10 +2487,8 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2376 | * fabric acknowledgement that requires two target_put_sess_cmd() | 2487 | * fabric acknowledgement that requires two target_put_sess_cmd() |
2377 | * invocations before se_cmd descriptor release. | 2488 | * invocations before se_cmd descriptor release. |
2378 | */ | 2489 | */ |
2379 | if (ack_kref) { | 2490 | if (ack_kref) |
2380 | kref_get(&se_cmd->cmd_kref); | 2491 | kref_get(&se_cmd->cmd_kref); |
2381 | se_cmd->se_cmd_flags |= SCF_ACK_KREF; | ||
2382 | } | ||
2383 | 2492 | ||
2384 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2493 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2385 | if (se_sess->sess_tearing_down) { | 2494 | if (se_sess->sess_tearing_down) { |
@@ -2398,6 +2507,7 @@ out: | |||
2398 | EXPORT_SYMBOL(target_get_sess_cmd); | 2507 | EXPORT_SYMBOL(target_get_sess_cmd); |
2399 | 2508 | ||
2400 | static void target_release_cmd_kref(struct kref *kref) | 2509 | static void target_release_cmd_kref(struct kref *kref) |
2510 | __releases(&se_cmd->se_sess->sess_cmd_lock) | ||
2401 | { | 2511 | { |
2402 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2512 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
2403 | struct se_session *se_sess = se_cmd->se_sess; | 2513 | struct se_session *se_sess = se_cmd->se_sess; |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1a1bcf71ec9d..dbc872a6c981 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -344,8 +344,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
344 | 344 | ||
345 | entry = (void *) mb + CMDR_OFF + cmd_head; | 345 | entry = (void *) mb + CMDR_OFF + cmd_head; |
346 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 346 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
347 | tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD); | 347 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); |
348 | tcmu_hdr_set_len(&entry->hdr, pad_size); | 348 | tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); |
349 | entry->hdr.cmd_id = 0; /* not used for PAD */ | ||
350 | entry->hdr.kflags = 0; | ||
351 | entry->hdr.uflags = 0; | ||
349 | 352 | ||
350 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); | 353 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); |
351 | 354 | ||
@@ -355,9 +358,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
355 | 358 | ||
356 | entry = (void *) mb + CMDR_OFF + cmd_head; | 359 | entry = (void *) mb + CMDR_OFF + cmd_head; |
357 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 360 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
358 | tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD); | 361 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); |
359 | tcmu_hdr_set_len(&entry->hdr, command_size); | 362 | tcmu_hdr_set_len(&entry->hdr.len_op, command_size); |
360 | entry->cmd_id = tcmu_cmd->cmd_id; | 363 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; |
364 | entry->hdr.kflags = 0; | ||
365 | entry->hdr.uflags = 0; | ||
361 | 366 | ||
362 | /* | 367 | /* |
363 | * Fix up iovecs, and handle if allocation in data ring wrapped. | 368 | * Fix up iovecs, and handle if allocation in data ring wrapped. |
@@ -376,7 +381,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
376 | 381 | ||
377 | /* Even iov_base is relative to mb_addr */ | 382 | /* Even iov_base is relative to mb_addr */ |
378 | iov->iov_len = copy_bytes; | 383 | iov->iov_len = copy_bytes; |
379 | iov->iov_base = (void *) udev->data_off + udev->data_head; | 384 | iov->iov_base = (void __user *) udev->data_off + |
385 | udev->data_head; | ||
380 | iov_cnt++; | 386 | iov_cnt++; |
381 | iov++; | 387 | iov++; |
382 | 388 | ||
@@ -388,7 +394,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
388 | copy_bytes = sg->length - copy_bytes; | 394 | copy_bytes = sg->length - copy_bytes; |
389 | 395 | ||
390 | iov->iov_len = copy_bytes; | 396 | iov->iov_len = copy_bytes; |
391 | iov->iov_base = (void *) udev->data_off + udev->data_head; | 397 | iov->iov_base = (void __user *) udev->data_off + |
398 | udev->data_head; | ||
392 | 399 | ||
393 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | 400 | if (se_cmd->data_direction == DMA_TO_DEVICE) { |
394 | to = (void *) mb + udev->data_off + udev->data_head; | 401 | to = (void *) mb + udev->data_off + udev->data_head; |
@@ -405,6 +412,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
405 | kunmap_atomic(from); | 412 | kunmap_atomic(from); |
406 | } | 413 | } |
407 | entry->req.iov_cnt = iov_cnt; | 414 | entry->req.iov_cnt = iov_cnt; |
415 | entry->req.iov_bidi_cnt = 0; | ||
416 | entry->req.iov_dif_cnt = 0; | ||
408 | 417 | ||
409 | /* All offsets relative to mb_addr, not start of entry! */ | 418 | /* All offsets relative to mb_addr, not start of entry! */ |
410 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | 419 | cdb_off = CMDR_OFF + cmd_head + base_command_size; |
@@ -462,6 +471,17 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
462 | return; | 471 | return; |
463 | } | 472 | } |
464 | 473 | ||
474 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { | ||
475 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | ||
476 | pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", | ||
477 | cmd->se_cmd); | ||
478 | transport_generic_request_failure(cmd->se_cmd, | ||
479 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); | ||
480 | cmd->se_cmd = NULL; | ||
481 | kmem_cache_free(tcmu_cmd_cache, cmd); | ||
482 | return; | ||
483 | } | ||
484 | |||
465 | if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { | 485 | if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { |
466 | memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, | 486 | memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, |
467 | se_cmd->scsi_sense_length); | 487 | se_cmd->scsi_sense_length); |
@@ -540,14 +560,16 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
540 | 560 | ||
541 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 561 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
542 | 562 | ||
543 | if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) { | 563 | if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { |
544 | UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); | 564 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
565 | tcmu_hdr_get_len(entry->hdr.len_op), | ||
566 | udev->cmdr_size); | ||
545 | continue; | 567 | continue; |
546 | } | 568 | } |
547 | WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD); | 569 | WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); |
548 | 570 | ||
549 | spin_lock(&udev->commands_lock); | 571 | spin_lock(&udev->commands_lock); |
550 | cmd = idr_find(&udev->commands, entry->cmd_id); | 572 | cmd = idr_find(&udev->commands, entry->hdr.cmd_id); |
551 | if (cmd) | 573 | if (cmd) |
552 | idr_remove(&udev->commands, cmd->cmd_id); | 574 | idr_remove(&udev->commands, cmd->cmd_id); |
553 | spin_unlock(&udev->commands_lock); | 575 | spin_unlock(&udev->commands_lock); |
@@ -560,7 +582,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
560 | 582 | ||
561 | tcmu_handle_completion(cmd, entry); | 583 | tcmu_handle_completion(cmd, entry); |
562 | 584 | ||
563 | UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); | 585 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
586 | tcmu_hdr_get_len(entry->hdr.len_op), | ||
587 | udev->cmdr_size); | ||
564 | 588 | ||
565 | handled++; | 589 | handled++; |
566 | } | 590 | } |
@@ -838,14 +862,14 @@ static int tcmu_configure_device(struct se_device *dev) | |||
838 | udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; | 862 | udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; |
839 | 863 | ||
840 | mb = udev->mb_addr; | 864 | mb = udev->mb_addr; |
841 | mb->version = 1; | 865 | mb->version = TCMU_MAILBOX_VERSION; |
842 | mb->cmdr_off = CMDR_OFF; | 866 | mb->cmdr_off = CMDR_OFF; |
843 | mb->cmdr_size = udev->cmdr_size; | 867 | mb->cmdr_size = udev->cmdr_size; |
844 | 868 | ||
845 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); | 869 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); |
846 | WARN_ON(udev->data_size % PAGE_SIZE); | 870 | WARN_ON(udev->data_size % PAGE_SIZE); |
847 | 871 | ||
848 | info->version = "1"; | 872 | info->version = xstr(TCMU_MAILBOX_VERSION); |
849 | 873 | ||
850 | info->mem[0].name = "tcm-user command & data buffer"; | 874 | info->mem[0].name = "tcm-user command & data buffer"; |
851 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; | 875 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 33ac39bf75e5..a600ff15dcfd 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -34,20 +34,12 @@ | |||
34 | #include <target/target_core_fabric.h> | 34 | #include <target/target_core_fabric.h> |
35 | #include <target/target_core_configfs.h> | 35 | #include <target/target_core_configfs.h> |
36 | 36 | ||
37 | #include "target_core_internal.h" | ||
37 | #include "target_core_pr.h" | 38 | #include "target_core_pr.h" |
38 | #include "target_core_ua.h" | 39 | #include "target_core_ua.h" |
39 | #include "target_core_xcopy.h" | 40 | #include "target_core_xcopy.h" |
40 | 41 | ||
41 | static struct workqueue_struct *xcopy_wq = NULL; | 42 | static struct workqueue_struct *xcopy_wq = NULL; |
42 | /* | ||
43 | * From target_core_device.c | ||
44 | */ | ||
45 | extern struct mutex g_device_mutex; | ||
46 | extern struct list_head g_device_list; | ||
47 | /* | ||
48 | * From target_core_configfs.c | ||
49 | */ | ||
50 | extern struct configfs_subsystem *target_core_subsystem[]; | ||
51 | 43 | ||
52 | static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) | 44 | static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) |
53 | { | 45 | { |
@@ -433,7 +425,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd) | |||
433 | return 0; | 425 | return 0; |
434 | } | 426 | } |
435 | 427 | ||
436 | static struct target_core_fabric_ops xcopy_pt_tfo = { | 428 | static const struct target_core_fabric_ops xcopy_pt_tfo = { |
437 | .get_fabric_name = xcopy_pt_get_fabric_name, | 429 | .get_fabric_name = xcopy_pt_get_fabric_name, |
438 | .get_task_tag = xcopy_pt_get_tag, | 430 | .get_task_tag = xcopy_pt_get_tag, |
439 | .get_cmd_state = xcopy_pt_get_cmd_state, | 431 | .get_cmd_state = xcopy_pt_get_cmd_state, |
@@ -548,33 +540,22 @@ static void target_xcopy_setup_pt_port( | |||
548 | } | 540 | } |
549 | } | 541 | } |
550 | 542 | ||
551 | static int target_xcopy_init_pt_lun( | 543 | static void target_xcopy_init_pt_lun(struct se_device *se_dev, |
552 | struct xcopy_pt_cmd *xpt_cmd, | 544 | struct se_cmd *pt_cmd, bool remote_port) |
553 | struct xcopy_op *xop, | ||
554 | struct se_device *se_dev, | ||
555 | struct se_cmd *pt_cmd, | ||
556 | bool remote_port) | ||
557 | { | 545 | { |
558 | /* | 546 | /* |
559 | * Don't allocate + init an pt_cmd->se_lun if honoring local port for | 547 | * Don't allocate + init an pt_cmd->se_lun if honoring local port for |
560 | * reservations. The pt_cmd->se_lun pointer will be setup from within | 548 | * reservations. The pt_cmd->se_lun pointer will be setup from within |
561 | * target_xcopy_setup_pt_port() | 549 | * target_xcopy_setup_pt_port() |
562 | */ | 550 | */ |
563 | if (!remote_port) { | 551 | if (remote_port) { |
564 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; | 552 | pr_debug("Setup emulated se_dev: %p from se_dev\n", |
565 | return 0; | 553 | pt_cmd->se_dev); |
554 | pt_cmd->se_lun = &se_dev->xcopy_lun; | ||
555 | pt_cmd->se_dev = se_dev; | ||
566 | } | 556 | } |
567 | 557 | ||
568 | pt_cmd->se_lun = &se_dev->xcopy_lun; | 558 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
569 | pt_cmd->se_dev = se_dev; | ||
570 | |||
571 | pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); | ||
572 | pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; | ||
573 | |||
574 | pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", | ||
575 | pt_cmd->se_lun->lun_se_dev); | ||
576 | |||
577 | return 0; | ||
578 | } | 559 | } |
579 | 560 | ||
580 | static int target_xcopy_setup_pt_cmd( | 561 | static int target_xcopy_setup_pt_cmd( |
@@ -592,11 +573,8 @@ static int target_xcopy_setup_pt_cmd( | |||
592 | * Setup LUN+port to honor reservations based upon xop->op_origin for | 573 | * Setup LUN+port to honor reservations based upon xop->op_origin for |
593 | * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. | 574 | * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. |
594 | */ | 575 | */ |
595 | rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); | 576 | target_xcopy_init_pt_lun(se_dev, cmd, remote_port); |
596 | if (rc < 0) { | 577 | |
597 | ret = rc; | ||
598 | goto out; | ||
599 | } | ||
600 | xpt_cmd->xcopy_op = xop; | 578 | xpt_cmd->xcopy_op = xop; |
601 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); | 579 | target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); |
602 | 580 | ||
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index a0bcfd3e7e7d..881deb3d499a 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -129,7 +129,6 @@ struct ft_cmd { | |||
129 | 129 | ||
130 | extern struct mutex ft_lport_lock; | 130 | extern struct mutex ft_lport_lock; |
131 | extern struct fc4_prov ft_prov; | 131 | extern struct fc4_prov ft_prov; |
132 | extern struct target_fabric_configfs *ft_configfs; | ||
133 | extern unsigned int ft_debug_logging; | 132 | extern unsigned int ft_debug_logging; |
134 | 133 | ||
135 | /* | 134 | /* |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index efdcb9663a1a..65dce1345966 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | #include "tcm_fc.h" | 49 | #include "tcm_fc.h" |
50 | 50 | ||
51 | struct target_fabric_configfs *ft_configfs; | 51 | static const struct target_core_fabric_ops ft_fabric_ops; |
52 | 52 | ||
53 | static LIST_HEAD(ft_wwn_list); | 53 | static LIST_HEAD(ft_wwn_list); |
54 | DEFINE_MUTEX(ft_lport_lock); | 54 | DEFINE_MUTEX(ft_lport_lock); |
@@ -337,7 +337,7 @@ static struct se_portal_group *ft_add_tpg( | |||
337 | return NULL; | 337 | return NULL; |
338 | } | 338 | } |
339 | 339 | ||
340 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, | 340 | ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg, |
341 | tpg, TRANSPORT_TPG_TYPE_NORMAL); | 341 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
342 | if (ret < 0) { | 342 | if (ret < 0) { |
343 | destroy_workqueue(wq); | 343 | destroy_workqueue(wq); |
@@ -507,7 +507,9 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) | |||
507 | return tpg->index; | 507 | return tpg->index; |
508 | } | 508 | } |
509 | 509 | ||
510 | static struct target_core_fabric_ops ft_fabric_ops = { | 510 | static const struct target_core_fabric_ops ft_fabric_ops = { |
511 | .module = THIS_MODULE, | ||
512 | .name = "fc", | ||
511 | .get_fabric_name = ft_get_fabric_name, | 513 | .get_fabric_name = ft_get_fabric_name, |
512 | .get_fabric_proto_ident = fc_get_fabric_proto_ident, | 514 | .get_fabric_proto_ident = fc_get_fabric_proto_ident, |
513 | .tpg_get_wwn = ft_get_fabric_wwn, | 515 | .tpg_get_wwn = ft_get_fabric_wwn, |
@@ -552,62 +554,10 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
552 | .fabric_drop_np = NULL, | 554 | .fabric_drop_np = NULL, |
553 | .fabric_make_nodeacl = &ft_add_acl, | 555 | .fabric_make_nodeacl = &ft_add_acl, |
554 | .fabric_drop_nodeacl = &ft_del_acl, | 556 | .fabric_drop_nodeacl = &ft_del_acl, |
555 | }; | ||
556 | |||
557 | static int ft_register_configfs(void) | ||
558 | { | ||
559 | struct target_fabric_configfs *fabric; | ||
560 | int ret; | ||
561 | |||
562 | /* | ||
563 | * Register the top level struct config_item_type with TCM core | ||
564 | */ | ||
565 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); | ||
566 | if (IS_ERR(fabric)) { | ||
567 | pr_err("%s: target_fabric_configfs_init() failed!\n", | ||
568 | __func__); | ||
569 | return PTR_ERR(fabric); | ||
570 | } | ||
571 | fabric->tf_ops = ft_fabric_ops; | ||
572 | |||
573 | /* | ||
574 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
575 | */ | ||
576 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; | ||
577 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; | ||
578 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
579 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | ||
580 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
581 | fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = | ||
582 | ft_nacl_base_attrs; | ||
583 | fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | ||
584 | fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | ||
585 | fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | ||
586 | /* | ||
587 | * register the fabric for use within TCM | ||
588 | */ | ||
589 | ret = target_fabric_configfs_register(fabric); | ||
590 | if (ret < 0) { | ||
591 | pr_debug("target_fabric_configfs_register() for" | ||
592 | " FC Target failed!\n"); | ||
593 | target_fabric_configfs_free(fabric); | ||
594 | return -1; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * Setup our local pointer to *fabric. | ||
599 | */ | ||
600 | ft_configfs = fabric; | ||
601 | return 0; | ||
602 | } | ||
603 | 557 | ||
604 | static void ft_deregister_configfs(void) | 558 | .tfc_wwn_attrs = ft_wwn_attrs, |
605 | { | 559 | .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, |
606 | if (!ft_configfs) | 560 | }; |
607 | return; | ||
608 | target_fabric_configfs_deregister(ft_configfs); | ||
609 | ft_configfs = NULL; | ||
610 | } | ||
611 | 561 | ||
612 | static struct notifier_block ft_notifier = { | 562 | static struct notifier_block ft_notifier = { |
613 | .notifier_call = ft_lport_notify | 563 | .notifier_call = ft_lport_notify |
@@ -615,15 +565,24 @@ static struct notifier_block ft_notifier = { | |||
615 | 565 | ||
616 | static int __init ft_init(void) | 566 | static int __init ft_init(void) |
617 | { | 567 | { |
618 | if (ft_register_configfs()) | 568 | int ret; |
619 | return -1; | 569 | |
620 | if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) { | 570 | ret = target_register_template(&ft_fabric_ops); |
621 | ft_deregister_configfs(); | 571 | if (ret) |
622 | return -1; | 572 | goto out; |
623 | } | 573 | |
574 | ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov); | ||
575 | if (ret) | ||
576 | goto out_unregister_template; | ||
577 | |||
624 | blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); | 578 | blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); |
625 | fc_lport_iterate(ft_lport_add, NULL); | 579 | fc_lport_iterate(ft_lport_add, NULL); |
626 | return 0; | 580 | return 0; |
581 | |||
582 | out_unregister_template: | ||
583 | target_unregister_template(&ft_fabric_ops); | ||
584 | out: | ||
585 | return ret; | ||
627 | } | 586 | } |
628 | 587 | ||
629 | static void __exit ft_exit(void) | 588 | static void __exit ft_exit(void) |
@@ -632,7 +591,7 @@ static void __exit ft_exit(void) | |||
632 | &ft_notifier); | 591 | &ft_notifier); |
633 | fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); | 592 | fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); |
634 | fc_lport_iterate(ft_lport_del, NULL); | 593 | fc_lport_iterate(ft_lport_del, NULL); |
635 | ft_deregister_configfs(); | 594 | target_unregister_template(&ft_fabric_ops); |
636 | synchronize_rcu(); | 595 | synchronize_rcu(); |
637 | } | 596 | } |
638 | 597 | ||