summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2018-11-27 18:52:03 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2018-12-07 21:22:55 -0500
commit2c9fa49e100f962af988f1c0529231bf14905cda (patch)
tree2b993d49e16686f7366fb7315aa257a145ef1c59
parentaaa00cc93c1d0fd2693a76ea2ba375ea1ac1a7f3 (diff)
scsi: target/core: Make ABORT and LUN RESET handling synchronous
Instead of invoking target driver callback functions from the context that handles an abort or LUN RESET task management function, only set the abort flag from that context and perform the actual abort handling from the context of the regular command processing flow. This approach has the advantage that the task management code becomes much easier to read and to verify since the number of potential race conditions against the command processing flow is strongly reduced. This patch has been tested by running the following two shell commands concurrently for about ten minutes for both the iSCSI and the SRP target drivers ($dev is an initiator device node connected with storage provided by the target driver under test): * fio with data verification enabled on a filesystem mounted on top of $dev. * while true; do sg_reset -d $dev; echo -n .; sleep .1; done Cc: Nicholas Bellinger <nab@linux-iscsi.org> Cc: Mike Christie <mchristi@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Disseldorp <ddiss@suse.de> Cc: Hannes Reinecke <hare@suse.de> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_tmr.c49
-rw-r--r--drivers/target/target_core_transport.c230
-rw-r--r--include/target/target_core_fabric.h1
4 files changed, 148 insertions, 134 deletions
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0c6635587930..853344415963 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,6 @@ int init_se_kmem_caches(void);
138void release_se_kmem_caches(void); 138void release_se_kmem_caches(void);
139u32 scsi_get_new_index(scsi_index_t); 139u32 scsi_get_new_index(scsi_index_t);
140void transport_subsystem_check_init(void); 140void transport_subsystem_check_init(void);
141int transport_cmd_finish_abort(struct se_cmd *);
142unsigned char *transport_dump_cmd_direction(struct se_cmd *); 141unsigned char *transport_dump_cmd_direction(struct se_cmd *);
143void transport_dump_dev_state(struct se_device *, char *, int *); 142void transport_dump_dev_state(struct se_device *, char *, int *);
144void transport_dump_dev_info(struct se_device *, struct se_lun *, 143void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -148,7 +147,6 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
148int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 147int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
149int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 148int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
150void transport_clear_lun_ref(struct se_lun *); 149void transport_clear_lun_ref(struct se_lun *);
151void transport_send_task_abort(struct se_cmd *);
152sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 150sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
153void target_qf_do_work(struct work_struct *work); 151void target_qf_do_work(struct work_struct *work);
154bool target_check_wce(struct se_device *dev); 152bool target_check_wce(struct se_device *dev);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 71950355074e..ad0061e09d4c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -171,11 +171,15 @@ void core_tmr_abort_task(
171 171
172 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 172 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
173 173
174 cancel_work_sync(&se_cmd->work); 174 /*
175 transport_wait_for_tasks(se_cmd); 175 * Ensure that this ABORT request is visible to the LU RESET
176 * code.
177 */
178 if (!tmr->tmr_dev)
179 WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd,
180 se_cmd->orig_fe_lun) < 0);
176 181
177 if (!transport_cmd_finish_abort(se_cmd)) 182 target_put_cmd_and_wait(se_cmd);
178 target_put_sess_cmd(se_cmd);
179 183
180 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 184 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
181 " ref_tag: %llu\n", ref_tag); 185 " ref_tag: %llu\n", ref_tag);
@@ -269,14 +273,28 @@ static void core_tmr_drain_tmr_list(
269 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 273 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
270 tmr_p->function, tmr_p->response, cmd->t_state); 274 tmr_p->function, tmr_p->response, cmd->t_state);
271 275
272 cancel_work_sync(&cmd->work); 276 target_put_cmd_and_wait(cmd);
273 transport_wait_for_tasks(cmd);
274
275 if (!transport_cmd_finish_abort(cmd))
276 target_put_sess_cmd(cmd);
277 } 277 }
278} 278}
279 279
280/**
281 * core_tmr_drain_state_list() - abort SCSI commands associated with a device
282 *
283 * @dev: Device for which to abort outstanding SCSI commands.
284 * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
285 * to realize the PREEMPT AND ABORT functionality.
286 * @tmr_sess: Session through which the LUN RESET has been received.
287 * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
288 * A quote from SPC-4, paragraph "7.5.10 Control mode page":
289 * "A task aborted status (TAS) bit set to zero specifies that
290 * aborted commands shall be terminated by the device server
291 * without any response to the application client. A TAS bit set
292 * to one specifies that commands aborted by the actions of an I_T
293 * nexus other than the I_T nexus on which the command was
294 * received shall be completed with TASK ABORTED status."
295 * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
296 * with registrations that will be preempted.
297 */
280static void core_tmr_drain_state_list( 298static void core_tmr_drain_state_list(
281 struct se_device *dev, 299 struct se_device *dev,
282 struct se_cmd *prout_cmd, 300 struct se_cmd *prout_cmd,
@@ -351,18 +369,7 @@ static void core_tmr_drain_state_list(
351 cmd->tag, (preempt_and_abort_list) ? "preempt" : "", 369 cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
352 cmd->pr_res_key); 370 cmd->pr_res_key);
353 371
354 /* 372 target_put_cmd_and_wait(cmd);
355 * If the command may be queued onto a workqueue cancel it now.
356 *
357 * This is equivalent to removal from the execute queue in the
358 * loop above, but we do it down here given that
359 * cancel_work_sync may block.
360 */
361 cancel_work_sync(&cmd->work);
362 transport_wait_for_tasks(cmd);
363
364 if (!transport_cmd_finish_abort(cmd))
365 target_put_sess_cmd(cmd);
366 } 373 }
367} 374}
368 375
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 32457fd7a736..45c8fd13b845 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -707,32 +707,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
707 percpu_ref_put(&lun->lun_ref); 707 percpu_ref_put(&lun->lun_ref);
708} 708}
709 709
710int transport_cmd_finish_abort(struct se_cmd *cmd)
711{
712 bool send_tas = cmd->transport_state & CMD_T_TAS;
713 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
714 int ret = 0;
715
716 if (send_tas)
717 transport_send_task_abort(cmd);
718
719 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
720 transport_lun_remove_cmd(cmd);
721 /*
722 * Allow the fabric driver to unmap any resources before
723 * releasing the descriptor via TFO->release_cmd()
724 */
725 if (!send_tas)
726 cmd->se_tfo->aborted_task(cmd);
727
728 if (transport_cmd_check_stop_to_fabric(cmd))
729 return 1;
730 if (!send_tas && ack_kref)
731 ret = target_put_sess_cmd(cmd);
732
733 return ret;
734}
735
736static void target_complete_failure_work(struct work_struct *work) 710static void target_complete_failure_work(struct work_struct *work)
737{ 711{
738 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 712 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -782,12 +756,88 @@ void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
782} 756}
783EXPORT_SYMBOL(transport_copy_sense_to_cmd); 757EXPORT_SYMBOL(transport_copy_sense_to_cmd);
784 758
759static void target_handle_abort(struct se_cmd *cmd)
760{
761 bool tas = cmd->transport_state & CMD_T_TAS;
762 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
763 int ret;
764
765 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
766
767 if (tas) {
768 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
769 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
770 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
771 cmd->t_task_cdb[0], cmd->tag);
772 trace_target_cmd_complete(cmd);
773 ret = cmd->se_tfo->queue_status(cmd);
774 if (ret) {
775 transport_handle_queue_full(cmd, cmd->se_dev,
776 ret, false);
777 return;
778 }
779 } else {
780 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
781 cmd->se_tfo->queue_tm_rsp(cmd);
782 }
783 } else {
784 /*
785 * Allow the fabric driver to unmap any resources before
786 * releasing the descriptor via TFO->release_cmd().
787 */
788 cmd->se_tfo->aborted_task(cmd);
789 if (ack_kref)
790 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
791 /*
792 * To do: establish a unit attention condition on the I_T
793 * nexus associated with cmd. See also the paragraph "Aborting
794 * commands" in SAM.
795 */
796 }
797
798 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
799
800 transport_lun_remove_cmd(cmd);
801
802 transport_cmd_check_stop_to_fabric(cmd);
803}
804
805static void target_abort_work(struct work_struct *work)
806{
807 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
808
809 target_handle_abort(cmd);
810}
811
812static bool target_cmd_interrupted(struct se_cmd *cmd)
813{
814 int post_ret;
815
816 if (cmd->transport_state & CMD_T_ABORTED) {
817 if (cmd->transport_complete_callback)
818 cmd->transport_complete_callback(cmd, false, &post_ret);
819 INIT_WORK(&cmd->work, target_abort_work);
820 queue_work(target_completion_wq, &cmd->work);
821 return true;
822 } else if (cmd->transport_state & CMD_T_STOP) {
823 if (cmd->transport_complete_callback)
824 cmd->transport_complete_callback(cmd, false, &post_ret);
825 complete_all(&cmd->t_transport_stop_comp);
826 return true;
827 }
828
829 return false;
830}
831
832/* May be called from interrupt context so must not sleep. */
785void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 833void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
786{ 834{
787 struct se_device *dev = cmd->se_dev;
788 int success; 835 int success;
789 unsigned long flags; 836 unsigned long flags;
790 837
838 if (target_cmd_interrupted(cmd))
839 return;
840
791 cmd->scsi_status = scsi_status; 841 cmd->scsi_status = scsi_status;
792 842
793 spin_lock_irqsave(&cmd->t_state_lock, flags); 843 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -803,25 +853,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
803 break; 853 break;
804 } 854 }
805 855
806 /* 856 if (!success) {
807 * Check for case where an explicit ABORT_TASK has been received
808 * and transport_wait_for_tasks() will be waiting for completion..
809 */
810 if (cmd->transport_state & CMD_T_ABORTED ||
811 cmd->transport_state & CMD_T_STOP) {
812 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
813 /*
814 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
815 * release se_device->caw_sem obtained by sbc_compare_and_write()
816 * since target_complete_ok_work() or target_complete_failure_work()
817 * won't be called to invoke the normal CAW completion callbacks.
818 */
819 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
820 up(&dev->caw_sem);
821 }
822 complete_all(&cmd->t_transport_stop_comp);
823 return;
824 } else if (!success) {
825 INIT_WORK(&cmd->work, target_complete_failure_work); 857 INIT_WORK(&cmd->work, target_complete_failure_work);
826 } else { 858 } else {
827 INIT_WORK(&cmd->work, target_complete_ok_work); 859 INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1805,8 +1837,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1805 if (cmd->transport_complete_callback) 1837 if (cmd->transport_complete_callback)
1806 cmd->transport_complete_callback(cmd, false, NULL); 1838 cmd->transport_complete_callback(cmd, false, NULL);
1807 1839
1808 if (cmd->transport_state & CMD_T_ABORTED) 1840 if (cmd->transport_state & CMD_T_ABORTED) {
1841 INIT_WORK(&cmd->work, target_abort_work);
1842 queue_work(target_completion_wq, &cmd->work);
1809 return; 1843 return;
1844 }
1810 1845
1811 switch (sense_reason) { 1846 switch (sense_reason) {
1812 case TCM_NON_EXISTENT_LUN: 1847 case TCM_NON_EXISTENT_LUN:
@@ -2020,20 +2055,10 @@ void target_execute_cmd(struct se_cmd *cmd)
2020 * 2055 *
2021 * If the received CDB has already been aborted stop processing it here. 2056 * If the received CDB has already been aborted stop processing it here.
2022 */ 2057 */
2023 spin_lock_irq(&cmd->t_state_lock); 2058 if (target_cmd_interrupted(cmd))
2024 if (cmd->transport_state & CMD_T_ABORTED) {
2025 spin_unlock_irq(&cmd->t_state_lock);
2026 return;
2027 }
2028 if (cmd->transport_state & CMD_T_STOP) {
2029 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2030 __func__, __LINE__, cmd->tag);
2031
2032 spin_unlock_irq(&cmd->t_state_lock);
2033 complete_all(&cmd->t_transport_stop_comp);
2034 return; 2059 return;
2035 }
2036 2060
2061 spin_lock_irq(&cmd->t_state_lock);
2037 cmd->t_state = TRANSPORT_PROCESSING; 2062 cmd->t_state = TRANSPORT_PROCESSING;
2038 cmd->transport_state &= ~CMD_T_PRE_EXECUTE; 2063 cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
2039 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2064 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
@@ -2647,13 +2672,29 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2647} 2672}
2648 2673
2649/* 2674/*
2675 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2676 * finished.
2677 */
2678void target_put_cmd_and_wait(struct se_cmd *cmd)
2679{
2680 DECLARE_COMPLETION_ONSTACK(compl);
2681
2682 WARN_ON_ONCE(cmd->abrt_compl);
2683 cmd->abrt_compl = &compl;
2684 target_put_sess_cmd(cmd);
2685 wait_for_completion(&compl);
2686}
2687
2688/*
2650 * This function is called by frontend drivers after processing of a command 2689 * This function is called by frontend drivers after processing of a command
2651 * has finished. 2690 * has finished.
2652 * 2691 *
2653 * The protocol for ensuring that either the regular flow or the TMF 2692 * The protocol for ensuring that either the regular frontend command
2654 * code drops one reference is as follows: 2693 * processing flow or target_handle_abort() code drops one reference is as
2694 * follows:
2655 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2695 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2656 * the frontend driver to drop one reference, synchronously or asynchronously. 2696 * the frontend driver to call this function synchronously or asynchronously.
2697 * That will cause one reference to be dropped.
2657 * - During regular command processing the target core sets CMD_T_COMPLETE 2698 * - During regular command processing the target core sets CMD_T_COMPLETE
2658 * before invoking one of the .queue_*() functions. 2699 * before invoking one of the .queue_*() functions.
2659 * - The code that aborts commands skips commands and TMFs for which 2700 * - The code that aborts commands skips commands and TMFs for which
@@ -2665,7 +2706,7 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2665 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2706 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2666 * be called and will drop a reference. 2707 * be called and will drop a reference.
2667 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2708 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2668 * will be called. transport_cmd_finish_abort() will drop the final reference. 2709 * will be called. target_handle_abort() will drop the final reference.
2669 */ 2710 */
2670int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2711int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2671{ 2712{
@@ -2690,8 +2731,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2690 } 2731 }
2691 if (aborted) 2732 if (aborted)
2692 cmd->free_compl = &compl; 2733 cmd->free_compl = &compl;
2693 if (!aborted || tas) 2734 ret = target_put_sess_cmd(cmd);
2694 ret = target_put_sess_cmd(cmd);
2695 if (aborted) { 2735 if (aborted) {
2696 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2736 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2697 wait_for_completion(&compl); 2737 wait_for_completion(&compl);
@@ -3219,6 +3259,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3219{ 3259{
3220 unsigned long flags; 3260 unsigned long flags;
3221 3261
3262 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3263
3222 spin_lock_irqsave(&cmd->t_state_lock, flags); 3264 spin_lock_irqsave(&cmd->t_state_lock, flags);
3223 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3265 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3224 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3266 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -3235,46 +3277,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3235} 3277}
3236EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3278EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3237 3279
3238void transport_send_task_abort(struct se_cmd *cmd)
3239{
3240 unsigned long flags;
3241 int ret;
3242
3243 spin_lock_irqsave(&cmd->t_state_lock, flags);
3244 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3245 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3246 return;
3247 }
3248 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3249
3250 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3251
3252 transport_lun_remove_cmd(cmd);
3253
3254 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3255 cmd->t_task_cdb[0], cmd->tag);
3256
3257 trace_target_cmd_complete(cmd);
3258 ret = cmd->se_tfo->queue_status(cmd);
3259 if (ret)
3260 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3261}
3262
3263static void target_tmr_work(struct work_struct *work) 3280static void target_tmr_work(struct work_struct *work)
3264{ 3281{
3265 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3282 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3266 struct se_device *dev = cmd->se_dev; 3283 struct se_device *dev = cmd->se_dev;
3267 struct se_tmr_req *tmr = cmd->se_tmr_req; 3284 struct se_tmr_req *tmr = cmd->se_tmr_req;
3268 unsigned long flags;
3269 int ret; 3285 int ret;
3270 3286
3271 spin_lock_irqsave(&cmd->t_state_lock, flags); 3287 if (cmd->transport_state & CMD_T_ABORTED)
3272 if (cmd->transport_state & CMD_T_ABORTED) { 3288 goto aborted;
3273 tmr->response = TMR_FUNCTION_REJECTED;
3274 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3275 goto check_stop;
3276 }
3277 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3278 3289
3279 switch (tmr->function) { 3290 switch (tmr->function) {
3280 case TMR_ABORT_TASK: 3291 case TMR_ABORT_TASK:
@@ -3308,18 +3319,16 @@ static void target_tmr_work(struct work_struct *work)
3308 break; 3319 break;
3309 } 3320 }
3310 3321
3311 spin_lock_irqsave(&cmd->t_state_lock, flags); 3322 if (cmd->transport_state & CMD_T_ABORTED)
3312 if (cmd->transport_state & CMD_T_ABORTED) { 3323 goto aborted;
3313 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3314 goto check_stop;
3315 }
3316 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3317 3324
3318 cmd->se_tfo->queue_tm_rsp(cmd); 3325 cmd->se_tfo->queue_tm_rsp(cmd);
3319 3326
3320check_stop:
3321 transport_lun_remove_cmd(cmd);
3322 transport_cmd_check_stop_to_fabric(cmd); 3327 transport_cmd_check_stop_to_fabric(cmd);
3328 return;
3329
3330aborted:
3331 target_handle_abort(cmd);
3323} 3332}
3324 3333
3325int transport_generic_handle_tmr( 3334int transport_generic_handle_tmr(
@@ -3338,11 +3347,10 @@ int transport_generic_handle_tmr(
3338 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3347 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3339 3348
3340 if (aborted) { 3349 if (aborted) {
3341 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3350 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3342 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3351 cmd->se_tmr_req->function,
3343 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3352 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3344 transport_lun_remove_cmd(cmd); 3353 target_handle_abort(cmd);
3345 transport_cmd_check_stop_to_fabric(cmd);
3346 return 0; 3354 return 0;
3347 } 3355 }
3348 3356
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 952f84455cef..ee5ddd81cd8d 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -166,6 +166,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
166int transport_handle_cdb_direct(struct se_cmd *); 166int transport_handle_cdb_direct(struct se_cmd *);
167sense_reason_t transport_generic_new_cmd(struct se_cmd *); 167sense_reason_t transport_generic_new_cmd(struct se_cmd *);
168 168
169void target_put_cmd_and_wait(struct se_cmd *cmd);
169void target_execute_cmd(struct se_cmd *cmd); 170void target_execute_cmd(struct se_cmd *cmd);
170 171
171int transport_generic_free_cmd(struct se_cmd *, int); 172int transport_generic_free_cmd(struct se_cmd *, int);