aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-09-29 00:37:29 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 23:19:32 -0400
commitd050ffb922c782f092234611b9019e95024481ab (patch)
treea2883666772d23546cda814ff9894ff05edbe005 /drivers/target
parent79a7fef26431830e22e282053d050af790117db8 (diff)
target: Re-org of core_tmr_lun_reset
This patch is a re-orginzation of core_tmr_lun_reset() logic to properly scan the active tmr_list, dev->state_task_list and qobj->qobj_list w/ the relivent locks held, and performing a list_move_tail onto seperate local scope lists before performing the full drain. This involves breaking out the code into three seperate list specific functions: core_tmr_drain_tmr_list(), core_tmr_drain_task_list() and core_tmr_drain_cmd_list(). (nab: Include target: Remove non-active tasks from execute list during LUN_RESET patch to address original breakage) Reported-by: Roland Dreier <roland@purestorage.com> Cc: Roland Dreier <roland@purestorage.com> Cc: Christoph Hellwig <hch@lst.de> Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_tmr.c197
1 files changed, 125 insertions, 72 deletions
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 98b12a8923f0..ed0b1ff99110 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -66,15 +66,16 @@ void core_tmr_release_req(
66 struct se_tmr_req *tmr) 66 struct se_tmr_req *tmr)
67{ 67{
68 struct se_device *dev = tmr->tmr_dev; 68 struct se_device *dev = tmr->tmr_dev;
69 unsigned long flags;
69 70
70 if (!dev) { 71 if (!dev) {
71 kmem_cache_free(se_tmr_req_cache, tmr); 72 kmem_cache_free(se_tmr_req_cache, tmr);
72 return; 73 return;
73 } 74 }
74 75
75 spin_lock_irq(&dev->se_tmr_lock); 76 spin_lock_irqsave(&dev->se_tmr_lock, flags);
76 list_del(&tmr->tmr_list); 77 list_del(&tmr->tmr_list);
77 spin_unlock_irq(&dev->se_tmr_lock); 78 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
78 79
79 kmem_cache_free(se_tmr_req_cache, tmr); 80 kmem_cache_free(se_tmr_req_cache, tmr);
80} 81}
@@ -99,54 +100,20 @@ static void core_tmr_handle_tas_abort(
99 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, 0);
100} 101}
101 102
102int core_tmr_lun_reset( 103static void core_tmr_drain_tmr_list(
103 struct se_device *dev, 104 struct se_device *dev,
104 struct se_tmr_req *tmr, 105 struct se_tmr_req *tmr,
105 struct list_head *preempt_and_abort_list, 106 struct list_head *preempt_and_abort_list)
106 struct se_cmd *prout_cmd)
107{ 107{
108 struct se_cmd *cmd, *tcmd; 108 LIST_HEAD(drain_tmr_list);
109 struct se_node_acl *tmr_nacl = NULL;
110 struct se_portal_group *tmr_tpg = NULL;
111 struct se_queue_obj *qobj = &dev->dev_queue_obj;
112 struct se_tmr_req *tmr_p, *tmr_pp; 109 struct se_tmr_req *tmr_p, *tmr_pp;
113 struct se_task *task, *task_tmp; 110 struct se_cmd *cmd;
114 unsigned long flags; 111 unsigned long flags;
115 int fe_count, tas;
116 /*
117 * TASK_ABORTED status bit, this is configurable via ConfigFS
118 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
119 *
120 * A task aborted status (TAS) bit set to zero specifies that aborted
121 * tasks shall be terminated by the device server without any response
122 * to the application client. A TAS bit set to one specifies that tasks
123 * aborted by the actions of an I_T nexus other than the I_T nexus on
124 * which the command was received shall be completed with TASK ABORTED
125 * status (see SAM-4).
126 */
127 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
128 /*
129 * Determine if this se_tmr is coming from a $FABRIC_MOD
130 * or struct se_device passthrough..
131 */
132 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
133 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
134 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
135 if (tmr_nacl && tmr_tpg) {
136 pr_debug("LUN_RESET: TMR caller fabric: %s"
137 " initiator port %s\n",
138 tmr_tpg->se_tpg_tfo->get_fabric_name(),
139 tmr_nacl->initiatorname);
140 }
141 }
142 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
143 (preempt_and_abort_list) ? "Preempt" : "TMR",
144 dev->transport->name, tas);
145 /* 112 /*
146 * Release all pending and outgoing TMRs aside from the received 113 * Release all pending and outgoing TMRs aside from the received
147 * LUN_RESET tmr.. 114 * LUN_RESET tmr..
148 */ 115 */
149 spin_lock_irq(&dev->se_tmr_lock); 116 spin_lock_irqsave(&dev->se_tmr_lock, flags);
150 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { 117 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
151 /* 118 /*
152 * Allow the received TMR to return with FUNCTION_COMPLETE. 119 * Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -168,29 +135,48 @@ int core_tmr_lun_reset(
168 (core_scsi3_check_cdb_abort_and_preempt( 135 (core_scsi3_check_cdb_abort_and_preempt(
169 preempt_and_abort_list, cmd) != 0)) 136 preempt_and_abort_list, cmd) != 0))
170 continue; 137 continue;
171 spin_unlock_irq(&dev->se_tmr_lock);
172 138
173 spin_lock_irqsave(&cmd->t_state_lock, flags); 139 spin_lock(&cmd->t_state_lock);
174 if (!atomic_read(&cmd->t_transport_active)) { 140 if (!atomic_read(&cmd->t_transport_active)) {
175 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 141 spin_unlock(&cmd->t_state_lock);
176 spin_lock_irq(&dev->se_tmr_lock);
177 continue; 142 continue;
178 } 143 }
179 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 144 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
180 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 145 spin_unlock(&cmd->t_state_lock);
181 spin_lock_irq(&dev->se_tmr_lock);
182 continue; 146 continue;
183 } 147 }
148 spin_unlock(&cmd->t_state_lock);
149
150 list_move_tail(&tmr->tmr_list, &drain_tmr_list);
151 }
152 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
153
154 while (!list_empty(&drain_tmr_list)) {
155 tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
156 list_del(&tmr->tmr_list);
157 cmd = tmr_p->task_cmd;
158
184 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 159 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
185 " Response: 0x%02x, t_state: %d\n", 160 " Response: 0x%02x, t_state: %d\n",
186 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 161 (preempt_and_abort_list) ? "Preempt" : "", tmr,
187 tmr_p->function, tmr_p->response, cmd->t_state); 162 tmr->function, tmr->response, cmd->t_state);
188 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
189 163
190 transport_cmd_finish_abort_tmr(cmd); 164 transport_cmd_finish_abort_tmr(cmd);
191 spin_lock_irq(&dev->se_tmr_lock);
192 } 165 }
193 spin_unlock_irq(&dev->se_tmr_lock); 166}
167
168static void core_tmr_drain_task_list(
169 struct se_device *dev,
170 struct se_cmd *prout_cmd,
171 struct se_node_acl *tmr_nacl,
172 int tas,
173 struct list_head *preempt_and_abort_list)
174{
175 LIST_HEAD(drain_task_list);
176 struct se_cmd *cmd;
177 struct se_task *task, *task_tmp;
178 unsigned long flags;
179 int fe_count;
194 /* 180 /*
195 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. 181 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
196 * This is following sam4r17, section 5.6 Aborting commands, Table 38 182 * This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -235,9 +221,23 @@ int core_tmr_lun_reset(
235 if (prout_cmd == cmd) 221 if (prout_cmd == cmd)
236 continue; 222 continue;
237 223
238 list_del(&task->t_state_list); 224 list_move_tail(&task->t_state_list, &drain_task_list);
239 atomic_set(&task->task_state_active, 0); 225 atomic_set(&task->task_state_active, 0);
240 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 226 /*
227 * Remove from task execute list before processing drain_task_list
228 */
229 if (atomic_read(&task->task_execute_queue) != 0) {
230 list_del(&task->t_execute_list);
231 atomic_set(&task->task_execute_queue, 0);
232 atomic_dec(&dev->execute_tasks);
233 }
234 }
235 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
236
237 while (!list_empty(&drain_task_list)) {
238 task = list_entry(drain_task_list.next, struct se_task, t_state_list);
239 list_del(&task->t_state_list);
240 cmd = task->task_se_cmd;
241 241
242 spin_lock_irqsave(&cmd->t_state_lock, flags); 242 spin_lock_irqsave(&cmd->t_state_lock, flags);
243 pr_debug("LUN_RESET: %s cmd: %p task: %p" 243 pr_debug("LUN_RESET: %s cmd: %p task: %p"
@@ -274,20 +274,14 @@ int core_tmr_lun_reset(
274 274
275 atomic_set(&task->task_active, 0); 275 atomic_set(&task->task_active, 0);
276 atomic_set(&task->task_stop, 0); 276 atomic_set(&task->task_stop, 0);
277 } else {
278 if (atomic_read(&task->task_execute_queue) != 0)
279 transport_remove_task_from_execute_queue(task, dev);
280 } 277 }
281 __transport_stop_task_timer(task, &flags); 278 __transport_stop_task_timer(task, &flags);
282 279
283 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 280 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
284 spin_unlock_irqrestore( 281 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
285 &cmd->t_state_lock, flags);
286 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" 282 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
287 " t_task_cdbs_ex_left: %d\n", task, dev, 283 " t_task_cdbs_ex_left: %d\n", task, dev,
288 atomic_read(&cmd->t_task_cdbs_ex_left)); 284 atomic_read(&cmd->t_task_cdbs_ex_left));
289
290 spin_lock_irqsave(&dev->execute_task_lock, flags);
291 continue; 285 continue;
292 } 286 }
293 fe_count = atomic_read(&cmd->t_fe_count); 287 fe_count = atomic_read(&cmd->t_fe_count);
@@ -297,22 +291,31 @@ int core_tmr_lun_reset(
297 " task: %p, t_fe_count: %d dev: %p\n", task, 291 " task: %p, t_fe_count: %d dev: %p\n", task,
298 fe_count, dev); 292 fe_count, dev);
299 atomic_set(&cmd->t_transport_aborted, 1); 293 atomic_set(&cmd->t_transport_aborted, 1);
300 spin_unlock_irqrestore(&cmd->t_state_lock, 294 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
301 flags);
302 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
303 295
304 spin_lock_irqsave(&dev->execute_task_lock, flags); 296 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
305 continue; 297 continue;
306 } 298 }
307 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," 299 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
308 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 300 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
309 atomic_set(&cmd->t_transport_aborted, 1); 301 atomic_set(&cmd->t_transport_aborted, 1);
310 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 302 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
311 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
312 303
313 spin_lock_irqsave(&dev->execute_task_lock, flags); 304 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
314 } 305 }
315 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 306}
307
308static void core_tmr_drain_cmd_list(
309 struct se_device *dev,
310 struct se_cmd *prout_cmd,
311 struct se_node_acl *tmr_nacl,
312 int tas,
313 struct list_head *preempt_and_abort_list)
314{
315 LIST_HEAD(drain_cmd_list);
316 struct se_queue_obj *qobj = &dev->dev_queue_obj;
317 struct se_cmd *cmd, *tcmd;
318 unsigned long flags;
316 /* 319 /*
317 * Release all commands remaining in the struct se_device cmd queue. 320 * Release all commands remaining in the struct se_device cmd queue.
318 * 321 *
@@ -337,10 +340,15 @@ int core_tmr_lun_reset(
337 if (prout_cmd == cmd) 340 if (prout_cmd == cmd)
338 continue; 341 continue;
339 342
340 atomic_dec(&cmd->t_transport_queue_active); 343 atomic_set(&cmd->t_transport_queue_active, 0);
341 atomic_dec(&qobj->queue_cnt); 344 atomic_dec(&qobj->queue_cnt);
345 list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
346 }
347 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
348
349 while (!list_empty(&drain_cmd_list)) {
350 cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
342 list_del_init(&cmd->se_queue_node); 351 list_del_init(&cmd->se_queue_node);
343 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
344 352
345 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 353 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
346 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 354 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
@@ -353,9 +361,53 @@ int core_tmr_lun_reset(
353 361
354 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 362 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
355 atomic_read(&cmd->t_fe_count)); 363 atomic_read(&cmd->t_fe_count));
356 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
357 } 364 }
358 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 365}
366
367int core_tmr_lun_reset(
368 struct se_device *dev,
369 struct se_tmr_req *tmr,
370 struct list_head *preempt_and_abort_list,
371 struct se_cmd *prout_cmd)
372{
373 struct se_node_acl *tmr_nacl = NULL;
374 struct se_portal_group *tmr_tpg = NULL;
375 int tas;
376 /*
377 * TASK_ABORTED status bit, this is configurable via ConfigFS
378 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
379 *
380 * A task aborted status (TAS) bit set to zero specifies that aborted
381 * tasks shall be terminated by the device server without any response
382 * to the application client. A TAS bit set to one specifies that tasks
383 * aborted by the actions of an I_T nexus other than the I_T nexus on
384 * which the command was received shall be completed with TASK ABORTED
385 * status (see SAM-4).
386 */
387 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
388 /*
389 * Determine if this se_tmr is coming from a $FABRIC_MOD
390 * or struct se_device passthrough..
391 */
392 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
393 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
394 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
395 if (tmr_nacl && tmr_tpg) {
396 pr_debug("LUN_RESET: TMR caller fabric: %s"
397 " initiator port %s\n",
398 tmr_tpg->se_tpg_tfo->get_fabric_name(),
399 tmr_nacl->initiatorname);
400 }
401 }
402 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
403 (preempt_and_abort_list) ? "Preempt" : "TMR",
404 dev->transport->name, tas);
405
406 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
407 core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
408 preempt_and_abort_list);
409 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
410 preempt_and_abort_list);
359 /* 411 /*
360 * Clear any legacy SPC-2 reservation when called during 412 * Clear any legacy SPC-2 reservation when called during
361 * LOGICAL UNIT RESET 413 * LOGICAL UNIT RESET
@@ -378,3 +430,4 @@ int core_tmr_lun_reset(
378 dev->transport->name); 430 dev->transport->name);
379 return 0; 431 return 0;
380} 432}
433