aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-05-20 14:34:44 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2012-07-16 20:29:11 -0400
commit5f41a31d0a49a014adb1588edd0cc7f7e30cc55b (patch)
tree6ee74b84c59a0473aba1257d2e762fa6935c4066 /drivers/target
parenta3785c8740c5b56b49ec336b59be996393d83332 (diff)
target: remove the execute list
Since "target: Drop se_device TCQ queue_depth usage from I/O path" we always submit all commands (or back then, tasks) from __transport_execute_tasks. That means the the execute list has lots its purpose, as we can simply submit the commands that are restarted in transport_complete_task_attr directly while we walk the list. In fact doing so also solves a race in the way it currently walks to delayed_cmd_list as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_tmr.c3
-rw-r--r--drivers/target/target_core_transport.c330
3 files changed, 98 insertions, 236 deletions
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 031c2889f34c..88f69e0ba515 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -93,7 +93,6 @@ void release_se_kmem_caches(void);
93u32 scsi_get_new_index(scsi_index_t); 93u32 scsi_get_new_index(scsi_index_t);
94void transport_subsystem_check_init(void); 94void transport_subsystem_check_init(void);
95void transport_cmd_finish_abort(struct se_cmd *, int); 95void transport_cmd_finish_abort(struct se_cmd *, int);
96void __target_remove_from_execute_list(struct se_cmd *);
97unsigned char *transport_dump_cmd_direction(struct se_cmd *); 96unsigned char *transport_dump_cmd_direction(struct se_cmd *);
98void transport_dump_dev_state(struct se_device *, char *, int *); 97void transport_dump_dev_state(struct se_device *, char *, int *);
99void transport_dump_dev_info(struct se_device *, struct se_lun *, 98void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 84caf1bed9a3..4185db109edf 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list(
295 295
296 list_move_tail(&cmd->state_list, &drain_task_list); 296 list_move_tail(&cmd->state_list, &drain_task_list);
297 cmd->state_active = false; 297 cmd->state_active = false;
298
299 if (!list_empty(&cmd->execute_list))
300 __target_remove_from_execute_list(cmd);
301 } 298 }
302 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 299 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
303 300
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7cfb519a83f9..1c53fcec1133 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -68,7 +68,6 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68 68
69static int transport_generic_write_pending(struct se_cmd *); 69static int transport_generic_write_pending(struct se_cmd *);
70static int transport_processing_thread(void *param); 70static int transport_processing_thread(void *param);
71static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72static void transport_complete_task_attr(struct se_cmd *cmd); 71static void transport_complete_task_attr(struct se_cmd *cmd);
73static void transport_handle_queue_full(struct se_cmd *cmd, 72static void transport_handle_queue_full(struct se_cmd *cmd,
74 struct se_device *dev); 73 struct se_device *dev);
@@ -742,65 +741,6 @@ static void target_add_to_state_list(struct se_cmd *cmd)
742 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 741 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
743} 742}
744 743
745static void __target_add_to_execute_list(struct se_cmd *cmd)
746{
747 struct se_device *dev = cmd->se_dev;
748 bool head_of_queue = false;
749
750 if (!list_empty(&cmd->execute_list))
751 return;
752
753 if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
754 cmd->sam_task_attr == MSG_HEAD_TAG)
755 head_of_queue = true;
756
757 if (head_of_queue)
758 list_add(&cmd->execute_list, &dev->execute_list);
759 else
760 list_add_tail(&cmd->execute_list, &dev->execute_list);
761
762 atomic_inc(&dev->execute_tasks);
763
764 if (cmd->state_active)
765 return;
766
767 if (head_of_queue)
768 list_add(&cmd->state_list, &dev->state_list);
769 else
770 list_add_tail(&cmd->state_list, &dev->state_list);
771
772 cmd->state_active = true;
773}
774
775static void target_add_to_execute_list(struct se_cmd *cmd)
776{
777 unsigned long flags;
778 struct se_device *dev = cmd->se_dev;
779
780 spin_lock_irqsave(&dev->execute_task_lock, flags);
781 __target_add_to_execute_list(cmd);
782 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
783}
784
785void __target_remove_from_execute_list(struct se_cmd *cmd)
786{
787 list_del_init(&cmd->execute_list);
788 atomic_dec(&cmd->se_dev->execute_tasks);
789}
790
791static void target_remove_from_execute_list(struct se_cmd *cmd)
792{
793 struct se_device *dev = cmd->se_dev;
794 unsigned long flags;
795
796 if (WARN_ON(list_empty(&cmd->execute_list)))
797 return;
798
799 spin_lock_irqsave(&dev->execute_task_lock, flags);
800 __target_remove_from_execute_list(cmd);
801 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
802}
803
804/* 744/*
805 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 745 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
806 */ 746 */
@@ -874,8 +814,7 @@ void transport_dump_dev_state(
874 break; 814 break;
875 } 815 }
876 816
877 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", 817 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
878 atomic_read(&dev->execute_tasks), dev->queue_depth);
879 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 818 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
880 dev->se_sub_dev->se_dev_attrib.block_size, 819 dev->se_sub_dev->se_dev_attrib.block_size,
881 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 820 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
@@ -1222,7 +1161,6 @@ struct se_device *transport_add_device_to_core_hba(
1222 INIT_LIST_HEAD(&dev->dev_list); 1161 INIT_LIST_HEAD(&dev->dev_list);
1223 INIT_LIST_HEAD(&dev->dev_sep_list); 1162 INIT_LIST_HEAD(&dev->dev_sep_list);
1224 INIT_LIST_HEAD(&dev->dev_tmr_list); 1163 INIT_LIST_HEAD(&dev->dev_tmr_list);
1225 INIT_LIST_HEAD(&dev->execute_list);
1226 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1164 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1227 INIT_LIST_HEAD(&dev->state_list); 1165 INIT_LIST_HEAD(&dev->state_list);
1228 INIT_LIST_HEAD(&dev->qf_cmd_list); 1166 INIT_LIST_HEAD(&dev->qf_cmd_list);
@@ -1382,7 +1320,6 @@ void transport_init_se_cmd(
1382 INIT_LIST_HEAD(&cmd->se_qf_node); 1320 INIT_LIST_HEAD(&cmd->se_qf_node);
1383 INIT_LIST_HEAD(&cmd->se_queue_node); 1321 INIT_LIST_HEAD(&cmd->se_queue_node);
1384 INIT_LIST_HEAD(&cmd->se_cmd_list); 1322 INIT_LIST_HEAD(&cmd->se_cmd_list);
1385 INIT_LIST_HEAD(&cmd->execute_list);
1386 INIT_LIST_HEAD(&cmd->state_list); 1323 INIT_LIST_HEAD(&cmd->state_list);
1387 init_completion(&cmd->transport_lun_fe_stop_comp); 1324 init_completion(&cmd->transport_lun_fe_stop_comp);
1388 init_completion(&cmd->transport_lun_stop_comp); 1325 init_completion(&cmd->transport_lun_stop_comp);
@@ -1926,152 +1863,92 @@ queue_full:
1926} 1863}
1927EXPORT_SYMBOL(transport_generic_request_failure); 1864EXPORT_SYMBOL(transport_generic_request_failure);
1928 1865
1929/* 1866static void __target_execute_cmd(struct se_cmd *cmd)
1930 * Called from Fabric Module context from transport_execute_tasks()
1931 *
1932 * The return of this function determins if the tasks from struct se_cmd
1933 * get added to the execution queue in transport_execute_tasks(),
1934 * or are added to the delayed or ordered lists here.
1935 */
1936static inline int transport_execute_task_attr(struct se_cmd *cmd)
1937{ 1867{
1938 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1868 int error;
1939 return 1; 1869
1870 spin_lock_irq(&cmd->t_state_lock);
1871 cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
1872 spin_unlock_irq(&cmd->t_state_lock);
1873
1874 if (cmd->execute_cmd)
1875 error = cmd->execute_cmd(cmd);
1876 else {
1877 error = cmd->se_dev->transport->execute_cmd(cmd, cmd->t_data_sg,
1878 cmd->t_data_nents, cmd->data_direction);
1879 }
1880
1881 if (error) {
1882 spin_lock_irq(&cmd->t_state_lock);
1883 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1884 spin_unlock_irq(&cmd->t_state_lock);
1885
1886 transport_generic_request_failure(cmd);
1887 }
1888}
1889
1890static void target_execute_cmd(struct se_cmd *cmd)
1891{
1892 struct se_device *dev = cmd->se_dev;
1893
1894 if (transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))
1895 return;
1896
1897 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1898 goto execute;
1899
1940 /* 1900 /*
1941 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1901 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1942 * to allow the passed struct se_cmd list of tasks to the front of the list. 1902 * to allow the passed struct se_cmd list of tasks to the front of the list.
1943 */ 1903 */
1944 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1904 switch (cmd->sam_task_attr) {
1945 pr_debug("Added HEAD_OF_QUEUE for CDB:" 1905 case MSG_HEAD_TAG:
1946 " 0x%02x, se_ordered_id: %u\n", 1906 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1947 cmd->t_task_cdb[0], 1907 "se_ordered_id: %u\n",
1948 cmd->se_ordered_id); 1908 cmd->t_task_cdb[0], cmd->se_ordered_id);
1949 return 1; 1909 goto execute;
1950 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1910 case MSG_ORDERED_TAG:
1951 atomic_inc(&cmd->se_dev->dev_ordered_sync); 1911 atomic_inc(&dev->dev_ordered_sync);
1952 smp_mb__after_atomic_inc(); 1912 smp_mb__after_atomic_inc();
1953 1913
1954 pr_debug("Added ORDERED for CDB: 0x%02x to ordered" 1914 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1955 " list, se_ordered_id: %u\n", 1915 " se_ordered_id: %u\n",
1956 cmd->t_task_cdb[0], 1916 cmd->t_task_cdb[0], cmd->se_ordered_id);
1957 cmd->se_ordered_id); 1917
1958 /* 1918 /*
1959 * Add ORDERED command to tail of execution queue if 1919 * Execute an ORDERED command if no other older commands
1960 * no other older commands exist that need to be 1920 * exist that need to be completed first.
1961 * completed first.
1962 */ 1921 */
1963 if (!atomic_read(&cmd->se_dev->simple_cmds)) 1922 if (!atomic_read(&dev->simple_cmds))
1964 return 1; 1923 goto execute;
1965 } else { 1924 break;
1925 default:
1966 /* 1926 /*
1967 * For SIMPLE and UNTAGGED Task Attribute commands 1927 * For SIMPLE and UNTAGGED Task Attribute commands
1968 */ 1928 */
1969 atomic_inc(&cmd->se_dev->simple_cmds); 1929 atomic_inc(&dev->simple_cmds);
1970 smp_mb__after_atomic_inc(); 1930 smp_mb__after_atomic_inc();
1931 break;
1971 } 1932 }
1972 /* 1933
1973 * Otherwise if one or more outstanding ORDERED task attribute exist, 1934 if (atomic_read(&dev->dev_ordered_sync) != 0) {
1974 * add the dormant task(s) built for the passed struct se_cmd to the 1935 spin_lock(&dev->delayed_cmd_lock);
1975 * execution queue and become in Active state for this struct se_device.
1976 */
1977 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1978 /*
1979 * Otherwise, add cmd w/ tasks to delayed cmd queue that
1980 * will be drained upon completion of HEAD_OF_QUEUE task.
1981 */
1982 spin_lock(&cmd->se_dev->delayed_cmd_lock);
1983 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; 1936 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1984 list_add_tail(&cmd->se_delayed_node, 1937 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1985 &cmd->se_dev->delayed_cmd_list); 1938 spin_unlock(&dev->delayed_cmd_lock);
1986 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1987 1939
1988 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1940 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1989 " delayed CMD list, se_ordered_id: %u\n", 1941 " delayed CMD list, se_ordered_id: %u\n",
1990 cmd->t_task_cdb[0], cmd->sam_task_attr, 1942 cmd->t_task_cdb[0], cmd->sam_task_attr,
1991 cmd->se_ordered_id); 1943 cmd->se_ordered_id);
1992 /* 1944 return;
1993 * Return zero to let transport_execute_tasks() know
1994 * not to add the delayed tasks to the execution list.
1995 */
1996 return 0;
1997 } 1945 }
1998 /*
1999 * Otherwise, no ORDERED task attributes exist..
2000 */
2001 return 1;
2002}
2003 1946
2004/* 1947execute:
2005 * Called from fabric module context in transport_generic_new_cmd() and
2006 * transport_generic_process_write()
2007 */
2008static void transport_execute_tasks(struct se_cmd *cmd)
2009{
2010 int add_tasks;
2011 struct se_device *se_dev = cmd->se_dev;
2012 /* 1948 /*
2013 * Call transport_cmd_check_stop() to see if a fabric exception 1949 * Otherwise, no ORDERED task attributes exist..
2014 * has occurred that prevents execution.
2015 */ 1950 */
2016 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { 1951 __target_execute_cmd(cmd);
2017 /*
2018 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2019 * attribute for the tasks of the received struct se_cmd CDB
2020 */
2021 add_tasks = transport_execute_task_attr(cmd);
2022 if (add_tasks) {
2023 __transport_execute_tasks(se_dev, cmd);
2024 return;
2025 }
2026 }
2027 __transport_execute_tasks(se_dev, NULL);
2028}
2029
2030static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2031{
2032 int error;
2033 struct se_cmd *cmd = NULL;
2034 unsigned long flags;
2035
2036check_depth:
2037 spin_lock_irq(&dev->execute_task_lock);
2038 if (new_cmd != NULL)
2039 __target_add_to_execute_list(new_cmd);
2040
2041 if (list_empty(&dev->execute_list)) {
2042 spin_unlock_irq(&dev->execute_task_lock);
2043 return 0;
2044 }
2045 cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
2046 __target_remove_from_execute_list(cmd);
2047 spin_unlock_irq(&dev->execute_task_lock);
2048
2049 spin_lock_irqsave(&cmd->t_state_lock, flags);
2050 cmd->transport_state |= CMD_T_BUSY;
2051 cmd->transport_state |= CMD_T_SENT;
2052
2053 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2054
2055 if (cmd->execute_cmd)
2056 error = cmd->execute_cmd(cmd);
2057 else {
2058 error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
2059 cmd->t_data_nents, cmd->data_direction);
2060 }
2061
2062 if (error != 0) {
2063 spin_lock_irqsave(&cmd->t_state_lock, flags);
2064 cmd->transport_state &= ~CMD_T_BUSY;
2065 cmd->transport_state &= ~CMD_T_SENT;
2066 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2067
2068 transport_generic_request_failure(cmd);
2069 }
2070
2071 new_cmd = NULL;
2072 goto check_depth;
2073
2074 return 0;
2075} 1952}
2076 1953
2077/* 1954/*
@@ -2130,14 +2007,39 @@ out:
2130} 2007}
2131 2008
2132/* 2009/*
2010 * Process all commands up to the last received ORDERED task attribute which
2011 * requires another blocking boundary
2012 */
2013static void target_restart_delayed_cmds(struct se_device *dev)
2014{
2015 for (;;) {
2016 struct se_cmd *cmd;
2017
2018 spin_lock(&dev->delayed_cmd_lock);
2019 if (list_empty(&dev->delayed_cmd_list)) {
2020 spin_unlock(&dev->delayed_cmd_lock);
2021 break;
2022 }
2023
2024 cmd = list_entry(dev->delayed_cmd_list.next,
2025 struct se_cmd, se_delayed_node);
2026 list_del(&cmd->se_delayed_node);
2027 spin_unlock(&dev->delayed_cmd_lock);
2028
2029 __target_execute_cmd(cmd);
2030
2031 if (cmd->sam_task_attr == MSG_ORDERED_TAG)
2032 break;
2033 }
2034}
2035
2036/*
2133 * Called from I/O completion to determine which dormant/delayed 2037 * Called from I/O completion to determine which dormant/delayed
2134 * and ordered cmds need to have their tasks added to the execution queue. 2038 * and ordered cmds need to have their tasks added to the execution queue.
2135 */ 2039 */
2136static void transport_complete_task_attr(struct se_cmd *cmd) 2040static void transport_complete_task_attr(struct se_cmd *cmd)
2137{ 2041{
2138 struct se_device *dev = cmd->se_dev; 2042 struct se_device *dev = cmd->se_dev;
2139 struct se_cmd *cmd_p, *cmd_tmp;
2140 int new_active_tasks = 0;
2141 2043
2142 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 2044 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
2143 atomic_dec(&dev->simple_cmds); 2045 atomic_dec(&dev->simple_cmds);
@@ -2159,38 +2061,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
2159 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 2061 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
2160 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 2062 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
2161 } 2063 }
2162 /*
2163 * Process all commands up to the last received
2164 * ORDERED task attribute which requires another blocking
2165 * boundary
2166 */
2167 spin_lock(&dev->delayed_cmd_lock);
2168 list_for_each_entry_safe(cmd_p, cmd_tmp,
2169 &dev->delayed_cmd_list, se_delayed_node) {
2170
2171 list_del(&cmd_p->se_delayed_node);
2172 spin_unlock(&dev->delayed_cmd_lock);
2173
2174 pr_debug("Calling add_tasks() for"
2175 " cmd_p: 0x%02x Task Attr: 0x%02x"
2176 " Dormant -> Active, se_ordered_id: %u\n",
2177 cmd_p->t_task_cdb[0],
2178 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
2179
2180 target_add_to_execute_list(cmd_p);
2181 new_active_tasks++;
2182 2064
2183 spin_lock(&dev->delayed_cmd_lock); 2065 target_restart_delayed_cmds(dev);
2184 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
2185 break;
2186 }
2187 spin_unlock(&dev->delayed_cmd_lock);
2188 /*
2189 * If new tasks have become active, wake up the transport thread
2190 * to do the processing of the Active tasks.
2191 */
2192 if (new_active_tasks != 0)
2193 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
2194} 2066}
2195 2067
2196static void transport_complete_qf(struct se_cmd *cmd) 2068static void transport_complete_qf(struct se_cmd *cmd)
@@ -2612,15 +2484,13 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
2612 * 2484 *
2613 * The command will be added to the execution queue after its write 2485 * The command will be added to the execution queue after its write
2614 * data has arrived. 2486 * data has arrived.
2615 */ 2487 *
2616 if (cmd->data_direction == DMA_TO_DEVICE) {
2617 target_add_to_state_list(cmd);
2618 return transport_generic_write_pending(cmd);
2619 }
2620 /*
2621 * Everything else but a WRITE, add the command to the execution queue. 2488 * Everything else but a WRITE, add the command to the execution queue.
2622 */ 2489 */
2623 transport_execute_tasks(cmd); 2490 target_add_to_state_list(cmd);
2491 if (cmd->data_direction == DMA_TO_DEVICE)
2492 return transport_generic_write_pending(cmd);
2493 target_execute_cmd(cmd);
2624 return 0; 2494 return 0;
2625 2495
2626out_fail: 2496out_fail:
@@ -2636,7 +2506,7 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
2636 */ 2506 */
2637void transport_generic_process_write(struct se_cmd *cmd) 2507void transport_generic_process_write(struct se_cmd *cmd)
2638{ 2508{
2639 transport_execute_tasks(cmd); 2509 target_execute_cmd(cmd);
2640} 2510}
2641EXPORT_SYMBOL(transport_generic_process_write); 2511EXPORT_SYMBOL(transport_generic_process_write);
2642 2512
@@ -2872,12 +2742,8 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
2872 (cmd->transport_state & CMD_T_SENT)) { 2742 (cmd->transport_state & CMD_T_SENT)) {
2873 if (!target_stop_cmd(cmd, &flags)) 2743 if (!target_stop_cmd(cmd, &flags))
2874 ret++; 2744 ret++;
2875 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2876 } else {
2877 spin_unlock_irqrestore(&cmd->t_state_lock,
2878 flags);
2879 target_remove_from_execute_list(cmd);
2880 } 2745 }
2746 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2881 2747
2882 pr_debug("ConfigFS: cmd: %p stop tasks ret:" 2748 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2883 " %d\n", cmd, ret); 2749 " %d\n", cmd, ret);