aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/target_core_tmr.c10
-rw-r--r--drivers/target/target_core_transport.c165
-rw-r--r--include/target/target_core_base.h6
3 files changed, 98 insertions, 83 deletions
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 532ce317406a..570b144a1edb 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -255,6 +255,16 @@ static void core_tmr_drain_task_list(
255 atomic_read(&cmd->t_transport_stop), 255 atomic_read(&cmd->t_transport_stop),
256 atomic_read(&cmd->t_transport_sent)); 256 atomic_read(&cmd->t_transport_sent));
257 257
258 /*
259 * If the command may be queued onto a workqueue cancel it now.
260 *
261 * This is equivalent to removal from the execute queue in the
262 * loop above, but we do it down here given that
263 * cancel_work_sync may block.
264 */
265 if (cmd->t_state == TRANSPORT_COMPLETE)
266 cancel_work_sync(&cmd->work);
267
258 spin_lock_irqsave(&cmd->t_state_lock, flags); 268 spin_lock_irqsave(&cmd->t_state_lock, flags);
259 target_stop_task(task, &flags); 269 target_stop_task(task, &flags);
260 270
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 87beae6c76a0..774ff00b1110 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,6 +58,7 @@
58 58
59static int sub_api_initialized; 59static int sub_api_initialized;
60 60
61static struct workqueue_struct *target_completion_wq;
61static struct kmem_cache *se_cmd_cache; 62static struct kmem_cache *se_cmd_cache;
62static struct kmem_cache *se_sess_cache; 63static struct kmem_cache *se_sess_cache;
63struct kmem_cache *se_tmr_req_cache; 64struct kmem_cache *se_tmr_req_cache;
@@ -84,6 +85,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
84static void transport_put_cmd(struct se_cmd *cmd); 85static void transport_put_cmd(struct se_cmd *cmd);
85static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 86static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
86static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 87static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
88static void transport_generic_request_failure(struct se_cmd *, int, int);
89static void target_complete_ok_work(struct work_struct *work);
87 90
88int init_se_kmem_caches(void) 91int init_se_kmem_caches(void)
89{ 92{
@@ -99,7 +102,7 @@ int init_se_kmem_caches(void)
99 if (!se_tmr_req_cache) { 102 if (!se_tmr_req_cache) {
100 pr_err("kmem_cache_create() for struct se_tmr_req" 103 pr_err("kmem_cache_create() for struct se_tmr_req"
101 " failed\n"); 104 " failed\n");
102 goto out; 105 goto out_free_cmd_cache;
103 } 106 }
104 se_sess_cache = kmem_cache_create("se_sess_cache", 107 se_sess_cache = kmem_cache_create("se_sess_cache",
105 sizeof(struct se_session), __alignof__(struct se_session), 108 sizeof(struct se_session), __alignof__(struct se_session),
@@ -107,14 +110,14 @@ int init_se_kmem_caches(void)
107 if (!se_sess_cache) { 110 if (!se_sess_cache) {
108 pr_err("kmem_cache_create() for struct se_session" 111 pr_err("kmem_cache_create() for struct se_session"
109 " failed\n"); 112 " failed\n");
110 goto out; 113 goto out_free_tmr_req_cache;
111 } 114 }
112 se_ua_cache = kmem_cache_create("se_ua_cache", 115 se_ua_cache = kmem_cache_create("se_ua_cache",
113 sizeof(struct se_ua), __alignof__(struct se_ua), 116 sizeof(struct se_ua), __alignof__(struct se_ua),
114 0, NULL); 117 0, NULL);
115 if (!se_ua_cache) { 118 if (!se_ua_cache) {
116 pr_err("kmem_cache_create() for struct se_ua failed\n"); 119 pr_err("kmem_cache_create() for struct se_ua failed\n");
117 goto out; 120 goto out_free_sess_cache;
118 } 121 }
119 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 122 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
120 sizeof(struct t10_pr_registration), 123 sizeof(struct t10_pr_registration),
@@ -122,7 +125,7 @@ int init_se_kmem_caches(void)
122 if (!t10_pr_reg_cache) { 125 if (!t10_pr_reg_cache) {
123 pr_err("kmem_cache_create() for struct t10_pr_registration" 126 pr_err("kmem_cache_create() for struct t10_pr_registration"
124 " failed\n"); 127 " failed\n");
125 goto out; 128 goto out_free_ua_cache;
126 } 129 }
127 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 130 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
128 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 131 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
@@ -130,7 +133,7 @@ int init_se_kmem_caches(void)
130 if (!t10_alua_lu_gp_cache) { 133 if (!t10_alua_lu_gp_cache) {
131 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 134 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
132 " failed\n"); 135 " failed\n");
133 goto out; 136 goto out_free_pr_reg_cache;
134 } 137 }
135 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 138 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
136 sizeof(struct t10_alua_lu_gp_member), 139 sizeof(struct t10_alua_lu_gp_member),
@@ -138,7 +141,7 @@ int init_se_kmem_caches(void)
138 if (!t10_alua_lu_gp_mem_cache) { 141 if (!t10_alua_lu_gp_mem_cache) {
139 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 142 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
140 "cache failed\n"); 143 "cache failed\n");
141 goto out; 144 goto out_free_lu_gp_cache;
142 } 145 }
143 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 146 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
144 sizeof(struct t10_alua_tg_pt_gp), 147 sizeof(struct t10_alua_tg_pt_gp),
@@ -146,7 +149,7 @@ int init_se_kmem_caches(void)
146 if (!t10_alua_tg_pt_gp_cache) { 149 if (!t10_alua_tg_pt_gp_cache) {
147 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 150 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
148 "cache failed\n"); 151 "cache failed\n");
149 goto out; 152 goto out_free_lu_gp_mem_cache;
150 } 153 }
151 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 154 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
152 "t10_alua_tg_pt_gp_mem_cache", 155 "t10_alua_tg_pt_gp_mem_cache",
@@ -156,34 +159,41 @@ int init_se_kmem_caches(void)
156 if (!t10_alua_tg_pt_gp_mem_cache) { 159 if (!t10_alua_tg_pt_gp_mem_cache) {
157 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 160 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
158 "mem_t failed\n"); 161 "mem_t failed\n");
159 goto out; 162 goto out_free_tg_pt_gp_cache;
160 } 163 }
161 164
165 target_completion_wq = alloc_workqueue("target_completion",
166 WQ_MEM_RECLAIM, 0);
167 if (!target_completion_wq)
168 goto out_free_tg_pt_gp_mem_cache;
169
162 return 0; 170 return 0;
171
172out_free_tg_pt_gp_mem_cache:
173 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
174out_free_tg_pt_gp_cache:
175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176out_free_lu_gp_mem_cache:
177 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
178out_free_lu_gp_cache:
179 kmem_cache_destroy(t10_alua_lu_gp_cache);
180out_free_pr_reg_cache:
181 kmem_cache_destroy(t10_pr_reg_cache);
182out_free_ua_cache:
183 kmem_cache_destroy(se_ua_cache);
184out_free_sess_cache:
185 kmem_cache_destroy(se_sess_cache);
186out_free_tmr_req_cache:
187 kmem_cache_destroy(se_tmr_req_cache);
188out_free_cmd_cache:
189 kmem_cache_destroy(se_cmd_cache);
163out: 190out:
164 if (se_cmd_cache)
165 kmem_cache_destroy(se_cmd_cache);
166 if (se_tmr_req_cache)
167 kmem_cache_destroy(se_tmr_req_cache);
168 if (se_sess_cache)
169 kmem_cache_destroy(se_sess_cache);
170 if (se_ua_cache)
171 kmem_cache_destroy(se_ua_cache);
172 if (t10_pr_reg_cache)
173 kmem_cache_destroy(t10_pr_reg_cache);
174 if (t10_alua_lu_gp_cache)
175 kmem_cache_destroy(t10_alua_lu_gp_cache);
176 if (t10_alua_lu_gp_mem_cache)
177 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
178 if (t10_alua_tg_pt_gp_cache)
179 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
180 if (t10_alua_tg_pt_gp_mem_cache)
181 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
182 return -ENOMEM; 191 return -ENOMEM;
183} 192}
184 193
185void release_se_kmem_caches(void) 194void release_se_kmem_caches(void)
186{ 195{
196 destroy_workqueue(target_completion_wq);
187 kmem_cache_destroy(se_cmd_cache); 197 kmem_cache_destroy(se_cmd_cache);
188 kmem_cache_destroy(se_tmr_req_cache); 198 kmem_cache_destroy(se_tmr_req_cache);
189 kmem_cache_destroy(se_sess_cache); 199 kmem_cache_destroy(se_sess_cache);
@@ -689,6 +699,33 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
689} 699}
690EXPORT_SYMBOL(transport_complete_sync_cache); 700EXPORT_SYMBOL(transport_complete_sync_cache);
691 701
702static void target_complete_timeout_work(struct work_struct *work)
703{
704 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
705 unsigned long flags;
706
707 /*
708 * Reset cmd->t_se_count to allow transport_put_cmd()
709 * to allow last call to free memory resources.
710 */
711 spin_lock_irqsave(&cmd->t_state_lock, flags);
712 if (atomic_read(&cmd->t_transport_timeout) > 1) {
713 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
714
715 atomic_sub(tmp, &cmd->t_se_count);
716 }
717 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
718
719 transport_put_cmd(cmd);
720}
721
722static void target_complete_failure_work(struct work_struct *work)
723{
724 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
725
726 transport_generic_request_failure(cmd, 1, 1);
727}
728
692/* transport_complete_task(): 729/* transport_complete_task():
693 * 730 *
694 * Called from interrupt and non interrupt context depending 731 * Called from interrupt and non interrupt context depending
@@ -698,7 +735,6 @@ void transport_complete_task(struct se_task *task, int success)
698{ 735{
699 struct se_cmd *cmd = task->task_se_cmd; 736 struct se_cmd *cmd = task->task_se_cmd;
700 struct se_device *dev = cmd->se_dev; 737 struct se_device *dev = cmd->se_dev;
701 int t_state;
702 unsigned long flags; 738 unsigned long flags;
703#if 0 739#if 0
704 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, 740 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
@@ -749,17 +785,12 @@ void transport_complete_task(struct se_task *task, int success)
749 * the processing thread. 785 * the processing thread.
750 */ 786 */
751 if (task->task_flags & TF_TIMEOUT) { 787 if (task->task_flags & TF_TIMEOUT) {
752 if (!atomic_dec_and_test( 788 if (!atomic_dec_and_test(&cmd->t_task_cdbs_timeout_left)) {
753 &cmd->t_task_cdbs_timeout_left)) { 789 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
754 spin_unlock_irqrestore(&cmd->t_state_lock,
755 flags);
756 return; 790 return;
757 } 791 }
758 t_state = TRANSPORT_COMPLETE_TIMEOUT; 792 INIT_WORK(&cmd->work, target_complete_timeout_work);
759 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 793 goto out_queue;
760
761 transport_add_cmd_to_queue(cmd, t_state, false);
762 return;
763 } 794 }
764 atomic_dec(&cmd->t_task_cdbs_timeout_left); 795 atomic_dec(&cmd->t_task_cdbs_timeout_left);
765 796
@@ -769,28 +800,29 @@ void transport_complete_task(struct se_task *task, int success)
769 * device queue depending upon int success. 800 * device queue depending upon int success.
770 */ 801 */
771 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 802 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
772 if (!success)
773 cmd->t_tasks_failed = 1;
774
775 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 803 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
776 return; 804 return;
777 } 805 }
778 806
779 if (!success || cmd->t_tasks_failed) { 807 if (!success || cmd->t_tasks_failed) {
780 t_state = TRANSPORT_COMPLETE_FAILURE;
781 if (!task->task_error_status) { 808 if (!task->task_error_status) {
782 task->task_error_status = 809 task->task_error_status =
783 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 810 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
784 cmd->transport_error_status = 811 cmd->transport_error_status =
785 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 812 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
786 } 813 }
814 INIT_WORK(&cmd->work, target_complete_failure_work);
787 } else { 815 } else {
788 atomic_set(&cmd->t_transport_complete, 1); 816 atomic_set(&cmd->t_transport_complete, 1);
789 t_state = TRANSPORT_COMPLETE_OK; 817 INIT_WORK(&cmd->work, target_complete_ok_work);
790 } 818 }
819
820out_queue:
821 cmd->t_state = TRANSPORT_COMPLETE;
822 atomic_set(&cmd->t_transport_active, 1);
791 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 823 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
792 824
793 transport_add_cmd_to_queue(cmd, t_state, false); 825 queue_work(target_completion_wq, &cmd->work);
794} 826}
795EXPORT_SYMBOL(transport_complete_task); 827EXPORT_SYMBOL(transport_complete_task);
796 828
@@ -1642,8 +1674,6 @@ int transport_generic_allocate_tasks(
1642} 1674}
1643EXPORT_SYMBOL(transport_generic_allocate_tasks); 1675EXPORT_SYMBOL(transport_generic_allocate_tasks);
1644 1676
1645static void transport_generic_request_failure(struct se_cmd *, int, int);
1646
1647/* 1677/*
1648 * Used by fabric module frontends to queue tasks directly. 1678 * Used by fabric module frontends to queue tasks directly.
1649 * Many only be used from process context only 1679 * Many only be used from process context only
@@ -1985,25 +2015,6 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
1985 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2015 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1986} 2016}
1987 2017
1988static void transport_generic_request_timeout(struct se_cmd *cmd)
1989{
1990 unsigned long flags;
1991
1992 /*
1993 * Reset cmd->t_se_count to allow transport_put_cmd()
1994 * to allow last call to free memory resources.
1995 */
1996 spin_lock_irqsave(&cmd->t_state_lock, flags);
1997 if (atomic_read(&cmd->t_transport_timeout) > 1) {
1998 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
1999
2000 atomic_sub(tmp, &cmd->t_se_count);
2001 }
2002 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2003
2004 transport_put_cmd(cmd);
2005}
2006
2007static inline u32 transport_lba_21(unsigned char *cdb) 2018static inline u32 transport_lba_21(unsigned char *cdb)
2008{ 2019{
2009 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 2020 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
@@ -2094,10 +2105,12 @@ static void transport_task_timeout_handler(unsigned long data)
2094 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2105 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2095 task, cmd); 2106 task, cmd);
2096 2107
2097 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; 2108 INIT_WORK(&cmd->work, target_complete_failure_work);
2109 cmd->t_state = TRANSPORT_COMPLETE;
2110 atomic_set(&cmd->t_transport_active, 1);
2098 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2111 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2099 2112
2100 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); 2113 queue_work(target_completion_wq, &cmd->work);
2101} 2114}
2102 2115
2103static void transport_start_task_timer(struct se_task *task) 2116static void transport_start_task_timer(struct se_task *task)
@@ -2879,7 +2892,7 @@ static int transport_generic_cmd_sequencer(
2879 if (passthrough) 2892 if (passthrough)
2880 break; 2893 break;
2881 /* 2894 /*
2882 * Setup BIDI XOR callback to be run during transport_generic_complete_ok() 2895 * Setup BIDI XOR callback to be run after I/O completion.
2883 */ 2896 */
2884 cmd->transport_complete_callback = &transport_xor_callback; 2897 cmd->transport_complete_callback = &transport_xor_callback;
2885 cmd->t_tasks_fua = (cdb[1] & 0x8); 2898 cmd->t_tasks_fua = (cdb[1] & 0x8);
@@ -2913,8 +2926,8 @@ static int transport_generic_cmd_sequencer(
2913 break; 2926 break;
2914 2927
2915 /* 2928 /*
2916 * Setup BIDI XOR callback to be run during 2929 * Setup BIDI XOR callback to be run during after I/O
2917 * transport_generic_complete_ok() 2930 * completion.
2918 */ 2931 */
2919 cmd->transport_complete_callback = &transport_xor_callback; 2932 cmd->transport_complete_callback = &transport_xor_callback;
2920 cmd->t_tasks_fua = (cdb[10] & 0x8); 2933 cmd->t_tasks_fua = (cdb[10] & 0x8);
@@ -3311,8 +3324,7 @@ out_invalid_cdb_field:
3311} 3324}
3312 3325
3313/* 3326/*
3314 * Called from transport_generic_complete_ok() and 3327 * Called from I/O completion to determine which dormant/delayed
3315 * transport_generic_request_failure() to determine which dormant/delayed
3316 * and ordered cmds need to have their tasks added to the execution queue. 3328 * and ordered cmds need to have their tasks added to the execution queue.
3317 */ 3329 */
3318static void transport_complete_task_attr(struct se_cmd *cmd) 3330static void transport_complete_task_attr(struct se_cmd *cmd)
@@ -3433,9 +3445,11 @@ static void transport_handle_queue_full(
3433 schedule_work(&cmd->se_dev->qf_work_queue); 3445 schedule_work(&cmd->se_dev->qf_work_queue);
3434} 3446}
3435 3447
3436static void transport_generic_complete_ok(struct se_cmd *cmd) 3448static void target_complete_ok_work(struct work_struct *work)
3437{ 3449{
3450 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3438 int reason = 0, ret; 3451 int reason = 0, ret;
3452
3439 /* 3453 /*
3440 * Check if we need to move delayed/dormant tasks from cmds on the 3454 * Check if we need to move delayed/dormant tasks from cmds on the
3441 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 3455 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
@@ -4778,21 +4792,12 @@ get_cmd:
4778 case TRANSPORT_PROCESS_WRITE: 4792 case TRANSPORT_PROCESS_WRITE:
4779 transport_generic_process_write(cmd); 4793 transport_generic_process_write(cmd);
4780 break; 4794 break;
4781 case TRANSPORT_COMPLETE_OK:
4782 transport_generic_complete_ok(cmd);
4783 break;
4784 case TRANSPORT_FREE_CMD_INTR: 4795 case TRANSPORT_FREE_CMD_INTR:
4785 transport_generic_free_cmd(cmd, 0); 4796 transport_generic_free_cmd(cmd, 0);
4786 break; 4797 break;
4787 case TRANSPORT_PROCESS_TMR: 4798 case TRANSPORT_PROCESS_TMR:
4788 transport_generic_do_tmr(cmd); 4799 transport_generic_do_tmr(cmd);
4789 break; 4800 break;
4790 case TRANSPORT_COMPLETE_FAILURE:
4791 transport_generic_request_failure(cmd, 1, 1);
4792 break;
4793 case TRANSPORT_COMPLETE_TIMEOUT:
4794 transport_generic_request_timeout(cmd);
4795 break;
4796 case TRANSPORT_COMPLETE_QF_WP: 4801 case TRANSPORT_COMPLETE_QF_WP:
4797 transport_write_pending_qf(cmd); 4802 transport_write_pending_qf(cmd);
4798 break; 4803 break;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 07104bf0a9c8..8e2c83d4fbad 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -86,9 +86,7 @@ enum transport_state_table {
86 TRANSPORT_WRITE_PENDING = 3, 86 TRANSPORT_WRITE_PENDING = 3,
87 TRANSPORT_PROCESS_WRITE = 4, 87 TRANSPORT_PROCESS_WRITE = 4,
88 TRANSPORT_PROCESSING = 5, 88 TRANSPORT_PROCESSING = 5,
89 TRANSPORT_COMPLETE_OK = 6, 89 TRANSPORT_COMPLETE = 6,
90 TRANSPORT_COMPLETE_FAILURE = 7,
91 TRANSPORT_COMPLETE_TIMEOUT = 8,
92 TRANSPORT_PROCESS_TMR = 9, 90 TRANSPORT_PROCESS_TMR = 9,
93 TRANSPORT_ISTATE_PROCESSING = 11, 91 TRANSPORT_ISTATE_PROCESSING = 11,
94 TRANSPORT_NEW_CMD_MAP = 16, 92 TRANSPORT_NEW_CMD_MAP = 16,
@@ -492,6 +490,8 @@ struct se_cmd {
492 struct completion transport_lun_stop_comp; 490 struct completion transport_lun_stop_comp;
493 struct scatterlist *t_tasks_sg_chained; 491 struct scatterlist *t_tasks_sg_chained;
494 492
493 struct work_struct work;
494
495 /* 495 /*
496 * Used for pre-registered fabric SGL passthrough WRITE and READ 496 * Used for pre-registered fabric SGL passthrough WRITE and READ
497 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop 497 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop