diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 13:40:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 13:40:41 -0400 |
commit | 393bcfaeb8be7f46a4cd7d673e33541ebee76b12 (patch) | |
tree | 380c7a151a3218b6bf7c3e44612f1dced6efea51 | |
parent | a37484638ca5e0aa7c205ecb91c9ace92e83c32c (diff) | |
parent | 5e0cf5e6c43b9e19fc0284f69e5cd2b4a47523b0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"Here are the target-pending fixes for v4.12-rc4:
- ibmviscsis ABORT_TASK handling fixes that missed the v4.12 merge
window. (Bryant Ly and Michael Cyr)
- Re-add a target-core check enforcing WRITE overflow reject that was
relaxed in v4.3, to avoid unsupported iscsi-target immediate data
overflow. (nab)
- Fix a target-core-user OOPs during device removal. (MNC + Bryant
Ly)
- Fix a long standing iscsi-target potential issue where kthread exit
did not wait for kthread_should_stop(). (Jiang Yi)
- Fix a iscsi-target v3.12.y regression OOPs involving initial login
PDU processing during asynchronous TCP connection close. (MNC +
nab)
This is a little larger than usual for an -rc4, primarily due to the
iscsi-target v3.12.y regression OOPs bug-fix.
However, it's an important patch as MNC + Hannes where both able to
trigger it using a reduced iscsi initiator login timeout combined with
a backend taking a long time to complete I/Os during iscsi login
driven session reinstatement"
* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
iscsi-target: Always wait for kthread_should_stop() before kthread exit
iscsi-target: Fix initial login PDU asynchronous socket close OOPs
tcmu: fix crash during device removal
target: Re-add check to reject control WRITEs with overflow data
ibmvscsis: Fix the incorrect req_lim_delta
ibmvscsis: Clear left-over abort_cmd pointers
-rw-r--r-- | drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 27 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target.c | 30 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target_erl0.c | 6 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target_erl0.h | 2 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target_login.c | 4 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target_nego.c | 194 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 23 | ||||
-rw-r--r-- | drivers/target/target_core_user.c | 46 | ||||
-rw-r--r-- | include/target/iscsi/iscsi_target_core.h | 1 |
9 files changed, 241 insertions, 92 deletions
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index d390325c99ec..abf6026645dd 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) | |||
1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, | 1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, |
1171 | struct ibmvscsis_cmd, list); | 1171 | struct ibmvscsis_cmd, list); |
1172 | if (cmd) { | 1172 | if (cmd) { |
1173 | if (cmd->abort_cmd) | ||
1174 | cmd->abort_cmd = NULL; | ||
1173 | cmd->flags &= ~(DELAY_SEND); | 1175 | cmd->flags &= ~(DELAY_SEND); |
1174 | list_del(&cmd->list); | 1176 | list_del(&cmd->list); |
1175 | cmd->iue = iue; | 1177 | cmd->iue = iue; |
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
1774 | if (cmd->abort_cmd) { | 1776 | if (cmd->abort_cmd) { |
1775 | retry = true; | 1777 | retry = true; |
1776 | cmd->abort_cmd->flags &= ~(DELAY_SEND); | 1778 | cmd->abort_cmd->flags &= ~(DELAY_SEND); |
1779 | cmd->abort_cmd = NULL; | ||
1777 | } | 1780 | } |
1778 | 1781 | ||
1779 | /* | 1782 | /* |
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
1788 | list_del(&cmd->list); | 1791 | list_del(&cmd->list); |
1789 | ibmvscsis_free_cmd_resources(vscsi, | 1792 | ibmvscsis_free_cmd_resources(vscsi, |
1790 | cmd); | 1793 | cmd); |
1794 | /* | ||
1795 | * With a successfully aborted op | ||
1796 | * through LIO we want to increment the | ||
1797 | * the vscsi credit so that when we dont | ||
1798 | * send a rsp to the original scsi abort | ||
1799 | * op (h_send_crq), but the tm rsp to | ||
1800 | * the abort is sent, the credit is | ||
1801 | * correctly sent with the abort tm rsp. | ||
1802 | * We would need 1 for the abort tm rsp | ||
1803 | * and 1 credit for the aborted scsi op. | ||
1804 | * Thus we need to increment here. | ||
1805 | * Also we want to increment the credit | ||
1806 | * here because we want to make sure | ||
1807 | * cmd is actually released first | ||
1808 | * otherwise the client will think it | ||
1809 | * it can send a new cmd, and we could | ||
1810 | * find ourselves short of cmd elements. | ||
1811 | */ | ||
1812 | vscsi->credit += 1; | ||
1791 | } else { | 1813 | } else { |
1792 | iue = cmd->iue; | 1814 | iue = cmd->iue; |
1793 | 1815 | ||
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi, | |||
2962 | 2984 | ||
2963 | rsp->opcode = SRP_RSP; | 2985 | rsp->opcode = SRP_RSP; |
2964 | 2986 | ||
2965 | if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) | 2987 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); |
2966 | rsp->req_lim_delta = cpu_to_be32(vscsi->credit); | ||
2967 | else | ||
2968 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); | ||
2969 | rsp->tag = cmd->rsp.tag; | 2988 | rsp->tag = cmd->rsp.tag; |
2970 | rsp->flags = 0; | 2989 | rsp->flags = 0; |
2971 | 2990 | ||
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 26a9bcd5ee6a..0d8f81591bed 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg) | |||
3790 | { | 3790 | { |
3791 | int ret = 0; | 3791 | int ret = 0; |
3792 | struct iscsi_conn *conn = arg; | 3792 | struct iscsi_conn *conn = arg; |
3793 | bool conn_freed = false; | ||
3794 | |||
3793 | /* | 3795 | /* |
3794 | * Allow ourselves to be interrupted by SIGINT so that a | 3796 | * Allow ourselves to be interrupted by SIGINT so that a |
3795 | * connection recovery / failure event can be triggered externally. | 3797 | * connection recovery / failure event can be triggered externally. |
@@ -3815,12 +3817,14 @@ get_immediate: | |||
3815 | goto transport_err; | 3817 | goto transport_err; |
3816 | 3818 | ||
3817 | ret = iscsit_handle_response_queue(conn); | 3819 | ret = iscsit_handle_response_queue(conn); |
3818 | if (ret == 1) | 3820 | if (ret == 1) { |
3819 | goto get_immediate; | 3821 | goto get_immediate; |
3820 | else if (ret == -ECONNRESET) | 3822 | } else if (ret == -ECONNRESET) { |
3823 | conn_freed = true; | ||
3821 | goto out; | 3824 | goto out; |
3822 | else if (ret < 0) | 3825 | } else if (ret < 0) { |
3823 | goto transport_err; | 3826 | goto transport_err; |
3827 | } | ||
3824 | } | 3828 | } |
3825 | 3829 | ||
3826 | transport_err: | 3830 | transport_err: |
@@ -3830,8 +3834,13 @@ transport_err: | |||
3830 | * responsible for cleaning up the early connection failure. | 3834 | * responsible for cleaning up the early connection failure. |
3831 | */ | 3835 | */ |
3832 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) | 3836 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) |
3833 | iscsit_take_action_for_connection_exit(conn); | 3837 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
3834 | out: | 3838 | out: |
3839 | if (!conn_freed) { | ||
3840 | while (!kthread_should_stop()) { | ||
3841 | msleep(100); | ||
3842 | } | ||
3843 | } | ||
3835 | return 0; | 3844 | return 0; |
3836 | } | 3845 | } |
3837 | 3846 | ||
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4004 | { | 4013 | { |
4005 | int rc; | 4014 | int rc; |
4006 | struct iscsi_conn *conn = arg; | 4015 | struct iscsi_conn *conn = arg; |
4016 | bool conn_freed = false; | ||
4007 | 4017 | ||
4008 | /* | 4018 | /* |
4009 | * Allow ourselves to be interrupted by SIGINT so that a | 4019 | * Allow ourselves to be interrupted by SIGINT so that a |
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4016 | */ | 4026 | */ |
4017 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | 4027 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); |
4018 | if (rc < 0 || iscsi_target_check_conn_state(conn)) | 4028 | if (rc < 0 || iscsi_target_check_conn_state(conn)) |
4019 | return 0; | 4029 | goto out; |
4020 | 4030 | ||
4021 | if (!conn->conn_transport->iscsit_get_rx_pdu) | 4031 | if (!conn->conn_transport->iscsit_get_rx_pdu) |
4022 | return 0; | 4032 | return 0; |
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg) | |||
4025 | 4035 | ||
4026 | if (!signal_pending(current)) | 4036 | if (!signal_pending(current)) |
4027 | atomic_set(&conn->transport_failed, 1); | 4037 | atomic_set(&conn->transport_failed, 1); |
4028 | iscsit_take_action_for_connection_exit(conn); | 4038 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
4039 | |||
4040 | out: | ||
4041 | if (!conn_freed) { | ||
4042 | while (!kthread_should_stop()) { | ||
4043 | msleep(100); | ||
4044 | } | ||
4045 | } | ||
4046 | |||
4029 | return 0; | 4047 | return 0; |
4030 | } | 4048 | } |
4031 | 4049 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 9a96e17bf7cd..7fe2aa73cff6 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) | |||
930 | } | 930 | } |
931 | } | 931 | } |
932 | 932 | ||
933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | 933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) |
934 | { | 934 | { |
935 | *conn_freed = false; | ||
936 | |||
935 | spin_lock_bh(&conn->state_lock); | 937 | spin_lock_bh(&conn->state_lock); |
936 | if (atomic_read(&conn->connection_exit)) { | 938 | if (atomic_read(&conn->connection_exit)) { |
937 | spin_unlock_bh(&conn->state_lock); | 939 | spin_unlock_bh(&conn->state_lock); |
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
942 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 944 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
943 | spin_unlock_bh(&conn->state_lock); | 945 | spin_unlock_bh(&conn->state_lock); |
944 | iscsit_close_connection(conn); | 946 | iscsit_close_connection(conn); |
947 | *conn_freed = true; | ||
945 | return; | 948 | return; |
946 | } | 949 | } |
947 | 950 | ||
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
955 | spin_unlock_bh(&conn->state_lock); | 958 | spin_unlock_bh(&conn->state_lock); |
956 | 959 | ||
957 | iscsit_handle_connection_cleanup(conn); | 960 | iscsit_handle_connection_cleanup(conn); |
961 | *conn_freed = true; | ||
958 | } | 962 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 60e69e2af6ed..3822d9cd1230 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h | |||
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); | |||
15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); | 15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); |
16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); | 16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); |
17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); | 17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); |
18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); | 18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); |
19 | 19 | ||
20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ | 20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 66238477137b..92b96b51d506 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg) | |||
1464 | break; | 1464 | break; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | while (!kthread_should_stop()) { | ||
1468 | msleep(100); | ||
1469 | } | ||
1470 | |||
1467 | return 0; | 1471 | return 0; |
1468 | } | 1472 | } |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ccc9c1cbfd1..6f88b31242b0 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) | |||
493 | 493 | ||
494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); | 494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); |
495 | 495 | ||
496 | static bool iscsi_target_sk_state_check(struct sock *sk) | 496 | static bool __iscsi_target_sk_check_close(struct sock *sk) |
497 | { | 497 | { |
498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { | 498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { |
499 | pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," | 499 | pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," |
500 | "returning FALSE\n"); | 500 | "returning FALSE\n"); |
501 | return false; | 501 | return true; |
502 | } | 502 | } |
503 | return true; | 503 | return false; |
504 | } | ||
505 | |||
506 | static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) | ||
507 | { | ||
508 | bool state = false; | ||
509 | |||
510 | if (conn->sock) { | ||
511 | struct sock *sk = conn->sock->sk; | ||
512 | |||
513 | read_lock_bh(&sk->sk_callback_lock); | ||
514 | state = (__iscsi_target_sk_check_close(sk) || | ||
515 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
516 | read_unlock_bh(&sk->sk_callback_lock); | ||
517 | } | ||
518 | return state; | ||
519 | } | ||
520 | |||
521 | static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) | ||
522 | { | ||
523 | bool state = false; | ||
524 | |||
525 | if (conn->sock) { | ||
526 | struct sock *sk = conn->sock->sk; | ||
527 | |||
528 | read_lock_bh(&sk->sk_callback_lock); | ||
529 | state = test_bit(flag, &conn->login_flags); | ||
530 | read_unlock_bh(&sk->sk_callback_lock); | ||
531 | } | ||
532 | return state; | ||
533 | } | ||
534 | |||
535 | static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) | ||
536 | { | ||
537 | bool state = false; | ||
538 | |||
539 | if (conn->sock) { | ||
540 | struct sock *sk = conn->sock->sk; | ||
541 | |||
542 | write_lock_bh(&sk->sk_callback_lock); | ||
543 | state = (__iscsi_target_sk_check_close(sk) || | ||
544 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
545 | if (!state) | ||
546 | clear_bit(flag, &conn->login_flags); | ||
547 | write_unlock_bh(&sk->sk_callback_lock); | ||
548 | } | ||
549 | return state; | ||
504 | } | 550 | } |
505 | 551 | ||
506 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) | 552 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) |
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
540 | 586 | ||
541 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", | 587 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", |
542 | conn, current->comm, current->pid); | 588 | conn, current->comm, current->pid); |
589 | /* | ||
590 | * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() | ||
591 | * before initial PDU processing in iscsi_target_start_negotiation() | ||
592 | * has completed, go ahead and retry until it's cleared. | ||
593 | * | ||
594 | * Otherwise if the TCP connection drops while this is occuring, | ||
595 | * iscsi_target_start_negotiation() will detect the failure, call | ||
596 | * cancel_delayed_work_sync(&conn->login_work), and cleanup the | ||
597 | * remaining iscsi connection resources from iscsi_np process context. | ||
598 | */ | ||
599 | if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { | ||
600 | schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); | ||
601 | return; | ||
602 | } | ||
543 | 603 | ||
544 | spin_lock(&tpg->tpg_state_lock); | 604 | spin_lock(&tpg->tpg_state_lock); |
545 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); | 605 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); |
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
547 | 607 | ||
548 | if (!state) { | 608 | if (!state) { |
549 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); | 609 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); |
550 | iscsi_target_restore_sock_callbacks(conn); | 610 | goto err; |
551 | iscsi_target_login_drop(conn, login); | ||
552 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
553 | return; | ||
554 | } | 611 | } |
555 | 612 | ||
556 | if (conn->sock) { | 613 | if (iscsi_target_sk_check_close(conn)) { |
557 | struct sock *sk = conn->sock->sk; | 614 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); |
558 | 615 | goto err; | |
559 | read_lock_bh(&sk->sk_callback_lock); | ||
560 | state = iscsi_target_sk_state_check(sk); | ||
561 | read_unlock_bh(&sk->sk_callback_lock); | ||
562 | |||
563 | if (!state) { | ||
564 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); | ||
565 | iscsi_target_restore_sock_callbacks(conn); | ||
566 | iscsi_target_login_drop(conn, login); | ||
567 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
568 | return; | ||
569 | } | ||
570 | } | 616 | } |
571 | 617 | ||
572 | conn->login_kworker = current; | 618 | conn->login_kworker = current; |
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
584 | flush_signals(current); | 630 | flush_signals(current); |
585 | conn->login_kworker = NULL; | 631 | conn->login_kworker = NULL; |
586 | 632 | ||
587 | if (rc < 0) { | 633 | if (rc < 0) |
588 | iscsi_target_restore_sock_callbacks(conn); | 634 | goto err; |
589 | iscsi_target_login_drop(conn, login); | ||
590 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
591 | return; | ||
592 | } | ||
593 | 635 | ||
594 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", | 636 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", |
595 | conn, current->comm, current->pid); | 637 | conn, current->comm, current->pid); |
596 | 638 | ||
597 | rc = iscsi_target_do_login(conn, login); | 639 | rc = iscsi_target_do_login(conn, login); |
598 | if (rc < 0) { | 640 | if (rc < 0) { |
599 | iscsi_target_restore_sock_callbacks(conn); | 641 | goto err; |
600 | iscsi_target_login_drop(conn, login); | ||
601 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
602 | } else if (!rc) { | 642 | } else if (!rc) { |
603 | if (conn->sock) { | 643 | if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) |
604 | struct sock *sk = conn->sock->sk; | 644 | goto err; |
605 | |||
606 | write_lock_bh(&sk->sk_callback_lock); | ||
607 | clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); | ||
608 | write_unlock_bh(&sk->sk_callback_lock); | ||
609 | } | ||
610 | } else if (rc == 1) { | 645 | } else if (rc == 1) { |
611 | iscsi_target_nego_release(conn); | 646 | iscsi_target_nego_release(conn); |
612 | iscsi_post_login_handler(np, conn, zero_tsih); | 647 | iscsi_post_login_handler(np, conn, zero_tsih); |
613 | iscsit_deaccess_np(np, tpg, tpg_np); | 648 | iscsit_deaccess_np(np, tpg, tpg_np); |
614 | } | 649 | } |
650 | return; | ||
651 | |||
652 | err: | ||
653 | iscsi_target_restore_sock_callbacks(conn); | ||
654 | iscsi_target_login_drop(conn, login); | ||
655 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
615 | } | 656 | } |
616 | 657 | ||
617 | static void iscsi_target_do_cleanup(struct work_struct *work) | 658 | static void iscsi_target_do_cleanup(struct work_struct *work) |
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) | |||
659 | orig_state_change(sk); | 700 | orig_state_change(sk); |
660 | return; | 701 | return; |
661 | } | 702 | } |
703 | state = __iscsi_target_sk_check_close(sk); | ||
704 | pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); | ||
705 | |||
662 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { | 706 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { |
663 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" | 707 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" |
664 | " conn: %p\n", conn); | 708 | " conn: %p\n", conn); |
709 | if (state) | ||
710 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
665 | write_unlock_bh(&sk->sk_callback_lock); | 711 | write_unlock_bh(&sk->sk_callback_lock); |
666 | orig_state_change(sk); | 712 | orig_state_change(sk); |
667 | return; | 713 | return; |
668 | } | 714 | } |
669 | if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { | 715 | if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { |
670 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", | 716 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", |
671 | conn); | 717 | conn); |
672 | write_unlock_bh(&sk->sk_callback_lock); | 718 | write_unlock_bh(&sk->sk_callback_lock); |
673 | orig_state_change(sk); | 719 | orig_state_change(sk); |
674 | return; | 720 | return; |
675 | } | 721 | } |
722 | /* | ||
723 | * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, | ||
724 | * but only queue conn->login_work -> iscsi_target_do_login_rx() | ||
725 | * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. | ||
726 | * | ||
727 | * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() | ||
728 | * will detect the dropped TCP connection from delayed workqueue context. | ||
729 | * | ||
730 | * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial | ||
731 | * iscsi_target_start_negotiation() is running, iscsi_target_do_login() | ||
732 | * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() | ||
733 | * via iscsi_target_sk_check_and_clear() is responsible for detecting the | ||
734 | * dropped TCP connection in iscsi_np process context, and cleaning up | ||
735 | * the remaining iscsi connection resources. | ||
736 | */ | ||
737 | if (state) { | ||
738 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | ||
739 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
740 | state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
741 | write_unlock_bh(&sk->sk_callback_lock); | ||
676 | 742 | ||
677 | state = iscsi_target_sk_state_check(sk); | 743 | orig_state_change(sk); |
678 | write_unlock_bh(&sk->sk_callback_lock); | ||
679 | |||
680 | pr_debug("iscsi_target_sk_state_change: state: %d\n", state); | ||
681 | 744 | ||
682 | if (!state) { | 745 | if (!state) |
683 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | 746 | schedule_delayed_work(&conn->login_work, 0); |
684 | schedule_delayed_work(&conn->login_cleanup_work, 0); | ||
685 | return; | 747 | return; |
686 | } | 748 | } |
749 | write_unlock_bh(&sk->sk_callback_lock); | ||
750 | |||
687 | orig_state_change(sk); | 751 | orig_state_change(sk); |
688 | } | 752 | } |
689 | 753 | ||
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
946 | if (iscsi_target_handle_csg_one(conn, login) < 0) | 1010 | if (iscsi_target_handle_csg_one(conn, login) < 0) |
947 | return -1; | 1011 | return -1; |
948 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { | 1012 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { |
1013 | /* | ||
1014 | * Check to make sure the TCP connection has not | ||
1015 | * dropped asynchronously while session reinstatement | ||
1016 | * was occuring in this kthread context, before | ||
1017 | * transitioning to full feature phase operation. | ||
1018 | */ | ||
1019 | if (iscsi_target_sk_check_close(conn)) | ||
1020 | return -1; | ||
1021 | |||
949 | login->tsih = conn->sess->tsih; | 1022 | login->tsih = conn->sess->tsih; |
950 | login->login_complete = 1; | 1023 | login->login_complete = 1; |
951 | iscsi_target_restore_sock_callbacks(conn); | 1024 | iscsi_target_restore_sock_callbacks(conn); |
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
972 | break; | 1045 | break; |
973 | } | 1046 | } |
974 | 1047 | ||
975 | if (conn->sock) { | ||
976 | struct sock *sk = conn->sock->sk; | ||
977 | bool state; | ||
978 | |||
979 | read_lock_bh(&sk->sk_callback_lock); | ||
980 | state = iscsi_target_sk_state_check(sk); | ||
981 | read_unlock_bh(&sk->sk_callback_lock); | ||
982 | |||
983 | if (!state) { | ||
984 | pr_debug("iscsi_target_do_login() failed state for" | ||
985 | " conn: %p\n", conn); | ||
986 | return -1; | ||
987 | } | ||
988 | } | ||
989 | |||
990 | return 0; | 1048 | return 0; |
991 | } | 1049 | } |
992 | 1050 | ||
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation( | |||
1255 | 1313 | ||
1256 | write_lock_bh(&sk->sk_callback_lock); | 1314 | write_lock_bh(&sk->sk_callback_lock); |
1257 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); | 1315 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
1316 | set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
1258 | write_unlock_bh(&sk->sk_callback_lock); | 1317 | write_unlock_bh(&sk->sk_callback_lock); |
1259 | } | 1318 | } |
1260 | 1319 | /* | |
1320 | * If iscsi_target_do_login returns zero to signal more PDU | ||
1321 | * exchanges are required to complete the login, go ahead and | ||
1322 | * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection | ||
1323 | * is still active. | ||
1324 | * | ||
1325 | * Otherwise if TCP connection dropped asynchronously, go ahead | ||
1326 | * and perform connection cleanup now. | ||
1327 | */ | ||
1261 | ret = iscsi_target_do_login(conn, login); | 1328 | ret = iscsi_target_do_login(conn, login); |
1329 | if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) | ||
1330 | ret = -1; | ||
1331 | |||
1262 | if (ret < 0) { | 1332 | if (ret < 0) { |
1263 | cancel_delayed_work_sync(&conn->login_work); | 1333 | cancel_delayed_work_sync(&conn->login_work); |
1264 | cancel_delayed_work_sync(&conn->login_cleanup_work); | 1334 | cancel_delayed_work_sync(&conn->login_cleanup_work); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 37f57357d4a0..6025935036c9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
1160 | if (cmd->unknown_data_length) { | 1160 | if (cmd->unknown_data_length) { |
1161 | cmd->data_length = size; | 1161 | cmd->data_length = size; |
1162 | } else if (size != cmd->data_length) { | 1162 | } else if (size != cmd->data_length) { |
1163 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" | 1163 | pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" |
1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), | 1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
1166 | cmd->data_length, size, cmd->t_task_cdb[0]); | 1166 | cmd->data_length, size, cmd->t_task_cdb[0]); |
1167 | 1167 | ||
1168 | if (cmd->data_direction == DMA_TO_DEVICE && | 1168 | if (cmd->data_direction == DMA_TO_DEVICE) { |
1169 | cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { | 1169 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
1170 | pr_err("Rejecting underflow/overflow WRITE data\n"); | 1170 | pr_err_ratelimited("Rejecting underflow/overflow" |
1171 | return TCM_INVALID_CDB_FIELD; | 1171 | " for WRITE data CDB\n"); |
1172 | return TCM_INVALID_CDB_FIELD; | ||
1173 | } | ||
1174 | /* | ||
1175 | * Some fabric drivers like iscsi-target still expect to | ||
1176 | * always reject overflow writes. Reject this case until | ||
1177 | * full fabric driver level support for overflow writes | ||
1178 | * is introduced tree-wide. | ||
1179 | */ | ||
1180 | if (size > cmd->data_length) { | ||
1181 | pr_err_ratelimited("Rejecting overflow for" | ||
1182 | " WRITE control CDB\n"); | ||
1183 | return TCM_INVALID_CDB_FIELD; | ||
1184 | } | ||
1172 | } | 1185 | } |
1173 | /* | 1186 | /* |
1174 | * Reject READ_* or WRITE_* with overflow/underflow for | 1187 | * Reject READ_* or WRITE_* with overflow/underflow for |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9045837f748b..beb5f098f32d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -97,7 +97,7 @@ struct tcmu_hba { | |||
97 | 97 | ||
98 | struct tcmu_dev { | 98 | struct tcmu_dev { |
99 | struct list_head node; | 99 | struct list_head node; |
100 | 100 | struct kref kref; | |
101 | struct se_device se_dev; | 101 | struct se_device se_dev; |
102 | 102 | ||
103 | char *name; | 103 | char *name; |
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | 969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); |
970 | if (!udev) | 970 | if (!udev) |
971 | return NULL; | 971 | return NULL; |
972 | kref_init(&udev->kref); | ||
972 | 973 | ||
973 | udev->name = kstrdup(name, GFP_KERNEL); | 974 | udev->name = kstrdup(name, GFP_KERNEL); |
974 | if (!udev->name) { | 975 | if (!udev->name) { |
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) | |||
1145 | return 0; | 1146 | return 0; |
1146 | } | 1147 | } |
1147 | 1148 | ||
1149 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
1150 | { | ||
1151 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
1152 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
1153 | |||
1154 | kfree(udev->uio_info.name); | ||
1155 | kfree(udev->name); | ||
1156 | kfree(udev); | ||
1157 | } | ||
1158 | |||
1159 | static void tcmu_dev_kref_release(struct kref *kref) | ||
1160 | { | ||
1161 | struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); | ||
1162 | struct se_device *dev = &udev->se_dev; | ||
1163 | |||
1164 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | ||
1165 | } | ||
1166 | |||
1148 | static int tcmu_release(struct uio_info *info, struct inode *inode) | 1167 | static int tcmu_release(struct uio_info *info, struct inode *inode) |
1149 | { | 1168 | { |
1150 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | 1169 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); |
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) | |||
1152 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | 1171 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); |
1153 | 1172 | ||
1154 | pr_debug("close\n"); | 1173 | pr_debug("close\n"); |
1155 | 1174 | /* release ref from configure */ | |
1175 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1156 | return 0; | 1176 | return 0; |
1157 | } | 1177 | } |
1158 | 1178 | ||
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev) | |||
1272 | dev->dev_attrib.hw_max_sectors = 128; | 1292 | dev->dev_attrib.hw_max_sectors = 128; |
1273 | dev->dev_attrib.hw_queue_depth = 128; | 1293 | dev->dev_attrib.hw_queue_depth = 128; |
1274 | 1294 | ||
1295 | /* | ||
1296 | * Get a ref incase userspace does a close on the uio device before | ||
1297 | * LIO has initiated tcmu_free_device. | ||
1298 | */ | ||
1299 | kref_get(&udev->kref); | ||
1300 | |||
1275 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, | 1301 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, |
1276 | udev->uio_info.uio_dev->minor); | 1302 | udev->uio_info.uio_dev->minor); |
1277 | if (ret) | 1303 | if (ret) |
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev) | |||
1284 | return 0; | 1310 | return 0; |
1285 | 1311 | ||
1286 | err_netlink: | 1312 | err_netlink: |
1313 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1287 | uio_unregister_device(&udev->uio_info); | 1314 | uio_unregister_device(&udev->uio_info); |
1288 | err_register: | 1315 | err_register: |
1289 | vfree(udev->mb_addr); | 1316 | vfree(udev->mb_addr); |
1290 | err_vzalloc: | 1317 | err_vzalloc: |
1291 | kfree(info->name); | 1318 | kfree(info->name); |
1319 | info->name = NULL; | ||
1292 | 1320 | ||
1293 | return ret; | 1321 | return ret; |
1294 | } | 1322 | } |
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) | |||
1302 | return -EINVAL; | 1330 | return -EINVAL; |
1303 | } | 1331 | } |
1304 | 1332 | ||
1305 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
1306 | { | ||
1307 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
1308 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
1309 | |||
1310 | kfree(udev); | ||
1311 | } | ||
1312 | |||
1313 | static bool tcmu_dev_configured(struct tcmu_dev *udev) | 1333 | static bool tcmu_dev_configured(struct tcmu_dev *udev) |
1314 | { | 1334 | { |
1315 | return udev->uio_info.uio_dev ? true : false; | 1335 | return udev->uio_info.uio_dev ? true : false; |
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev) | |||
1364 | udev->uio_info.uio_dev->minor); | 1384 | udev->uio_info.uio_dev->minor); |
1365 | 1385 | ||
1366 | uio_unregister_device(&udev->uio_info); | 1386 | uio_unregister_device(&udev->uio_info); |
1367 | kfree(udev->uio_info.name); | ||
1368 | kfree(udev->name); | ||
1369 | } | 1387 | } |
1370 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | 1388 | |
1389 | /* release ref from init */ | ||
1390 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1371 | } | 1391 | } |
1372 | 1392 | ||
1373 | enum { | 1393 | enum { |
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 275581d483dd..5f17fb770477 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h | |||
@@ -557,6 +557,7 @@ struct iscsi_conn { | |||
557 | #define LOGIN_FLAGS_READ_ACTIVE 1 | 557 | #define LOGIN_FLAGS_READ_ACTIVE 1 |
558 | #define LOGIN_FLAGS_CLOSED 2 | 558 | #define LOGIN_FLAGS_CLOSED 2 |
559 | #define LOGIN_FLAGS_READY 4 | 559 | #define LOGIN_FLAGS_READY 4 |
560 | #define LOGIN_FLAGS_INITIAL_PDU 8 | ||
560 | unsigned long login_flags; | 561 | unsigned long login_flags; |
561 | struct delayed_work login_work; | 562 | struct delayed_work login_work; |
562 | struct delayed_work login_cleanup_work; | 563 | struct delayed_work login_cleanup_work; |