diff options
author | Mike Christie <mchristi@redhat.com> | 2017-11-28 13:40:37 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2018-01-12 18:07:17 -0500 |
commit | 6fd0ce79724dabe2cd0bd8aed111cbe94755bf88 (patch) | |
tree | 6598e0d294210bb1e9ca238293c9f86e09baffe6 /drivers/target | |
parent | 3e60913579b2fefa74eeb3269426e864f4afa7e7 (diff) |
tcmu: prep queue_cmd_ring to be used by unmap wq
In the next patches we will call queue_cmd_ring from the submitting
context and also the completion path. This changes the queue_cmd_ring
return code so in the next patches we can return a sense_reason_t
and also signal if a command was requeued.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_user.c | 42 |
1 files changed, 27 insertions, 15 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8d0dc471fce8..68d1d7214eeb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -776,8 +776,16 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) | |||
776 | return 0; | 776 | return 0; |
777 | } | 777 | } |
778 | 778 | ||
779 | static sense_reason_t | 779 | /** |
780 | tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | 780 | * queue_cmd_ring - queue cmd to ring or internally |
781 | * @tcmu_cmd: cmd to queue | ||
782 | * @scsi_err: TCM error code if failure (-1) returned. | ||
783 | * | ||
784 | * Returns: | ||
785 | * -1 we cannot queue internally or to the ring. | ||
786 | * 0 success | ||
787 | */ | ||
788 | static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) | ||
781 | { | 789 | { |
782 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 790 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
783 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | 791 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; |
@@ -791,8 +799,12 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
791 | bool copy_to_data_area; | 799 | bool copy_to_data_area; |
792 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); | 800 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); |
793 | 801 | ||
794 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) | 802 | *scsi_err = TCM_NO_SENSE; |
795 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 803 | |
804 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | ||
805 | *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
806 | return -1; | ||
807 | } | ||
796 | 808 | ||
797 | /* | 809 | /* |
798 | * Must be a certain minimum size for response sense info, but | 810 | * Must be a certain minimum size for response sense info, but |
@@ -819,7 +831,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
819 | "cmd ring/data area\n", command_size, data_length, | 831 | "cmd ring/data area\n", command_size, data_length, |
820 | udev->cmdr_size, udev->data_size); | 832 | udev->cmdr_size, udev->data_size); |
821 | mutex_unlock(&udev->cmdr_lock); | 833 | mutex_unlock(&udev->cmdr_lock); |
822 | return TCM_INVALID_CDB_FIELD; | 834 | *scsi_err = TCM_INVALID_CDB_FIELD; |
835 | return -1; | ||
823 | } | 836 | } |
824 | 837 | ||
825 | while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { | 838 | while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { |
@@ -845,7 +858,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
845 | finish_wait(&udev->wait_cmdr, &__wait); | 858 | finish_wait(&udev->wait_cmdr, &__wait); |
846 | if (!ret) { | 859 | if (!ret) { |
847 | pr_warn("tcmu: command timed out\n"); | 860 | pr_warn("tcmu: command timed out\n"); |
848 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 861 | *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
862 | return -1; | ||
849 | } | 863 | } |
850 | 864 | ||
851 | mutex_lock(&udev->cmdr_lock); | 865 | mutex_lock(&udev->cmdr_lock); |
@@ -902,7 +916,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
902 | if (ret) { | 916 | if (ret) { |
903 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); | 917 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); |
904 | mutex_unlock(&udev->cmdr_lock); | 918 | mutex_unlock(&udev->cmdr_lock); |
905 | return TCM_OUT_OF_RESOURCES; | 919 | |
920 | *scsi_err = TCM_OUT_OF_RESOURCES; | ||
921 | return -1; | ||
906 | } | 922 | } |
907 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; | 923 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; |
908 | 924 | ||
@@ -933,27 +949,23 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
933 | mod_timer(&udev->timeout, round_jiffies_up(jiffies + | 949 | mod_timer(&udev->timeout, round_jiffies_up(jiffies + |
934 | msecs_to_jiffies(udev->cmd_time_out))); | 950 | msecs_to_jiffies(udev->cmd_time_out))); |
935 | 951 | ||
936 | return TCM_NO_SENSE; | 952 | return 0; |
937 | } | 953 | } |
938 | 954 | ||
939 | static sense_reason_t | 955 | static sense_reason_t |
940 | tcmu_queue_cmd(struct se_cmd *se_cmd) | 956 | tcmu_queue_cmd(struct se_cmd *se_cmd) |
941 | { | 957 | { |
942 | struct tcmu_cmd *tcmu_cmd; | 958 | struct tcmu_cmd *tcmu_cmd; |
943 | sense_reason_t ret; | 959 | sense_reason_t scsi_ret; |
944 | 960 | ||
945 | tcmu_cmd = tcmu_alloc_cmd(se_cmd); | 961 | tcmu_cmd = tcmu_alloc_cmd(se_cmd); |
946 | if (!tcmu_cmd) | 962 | if (!tcmu_cmd) |
947 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 963 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
948 | 964 | ||
949 | ret = tcmu_queue_cmd_ring(tcmu_cmd); | 965 | if (queue_cmd_ring(tcmu_cmd, &scsi_ret) < 0) |
950 | if (ret != TCM_NO_SENSE) { | ||
951 | pr_err("TCMU: Could not queue command\n"); | ||
952 | |||
953 | tcmu_free_cmd(tcmu_cmd); | 966 | tcmu_free_cmd(tcmu_cmd); |
954 | } | ||
955 | 967 | ||
956 | return ret; | 968 | return scsi_ret; |
957 | } | 969 | } |
958 | 970 | ||
959 | static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) | 971 | static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) |