aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-10-12 11:07:03 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 23:21:06 -0400
commit6c76bf951cb099f5573954b1f56c1121c3a41c72 (patch)
treee9b739b4fb28ae7e2be2fdf400dc4772d63360b8
parent42bf829eee0e36371a3df43978b14572c716cbe7 (diff)
target: make more use of the task_flags field in se_task
Replace various atomic_t variables that were mostly under t_state_lock with new flags in task_flags. Note that the execution error path didn't take t_state_lock before, so add it there. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_tmr.c9
-rw-r--r--drivers/target/target_core_transport.c64
-rw-r--r--include/target/target_core_base.h14
3 files changed, 43 insertions, 44 deletions
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index b8dc10fd4ef..b5c18648fa2 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -259,8 +259,8 @@ static void core_tmr_drain_task_list(
259 atomic_read(&cmd->t_transport_stop), 259 atomic_read(&cmd->t_transport_stop),
260 atomic_read(&cmd->t_transport_sent)); 260 atomic_read(&cmd->t_transport_sent));
261 261
262 if (atomic_read(&task->task_active)) { 262 if (task->task_flags & TF_ACTIVE) {
263 atomic_set(&task->task_stop, 1); 263 task->task_flags |= TF_REQUEST_STOP;
264 spin_unlock_irqrestore( 264 spin_unlock_irqrestore(
265 &cmd->t_state_lock, flags); 265 &cmd->t_state_lock, flags);
266 266
@@ -269,11 +269,10 @@ static void core_tmr_drain_task_list(
269 wait_for_completion(&task->task_stop_comp); 269 wait_for_completion(&task->task_stop_comp);
270 pr_debug("LUN_RESET Completed task: %p shutdown for" 270 pr_debug("LUN_RESET Completed task: %p shutdown for"
271 " dev: %p\n", task, dev); 271 " dev: %p\n", task, dev);
272
272 spin_lock_irqsave(&cmd->t_state_lock, flags); 273 spin_lock_irqsave(&cmd->t_state_lock, flags);
273 atomic_dec(&cmd->t_task_cdbs_left); 274 atomic_dec(&cmd->t_task_cdbs_left);
274 275 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
275 atomic_set(&task->task_active, 0);
276 atomic_set(&task->task_stop, 0);
277 } 276 }
278 __transport_stop_task_timer(task, &flags); 277 __transport_stop_task_timer(task, &flags);
279 278
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c935c72da7b..165a60c875a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -440,7 +440,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
440 return; 440 return;
441 441
442 list_for_each_entry(task, &cmd->t_task_list, t_list) { 442 list_for_each_entry(task, &cmd->t_task_list, t_list) {
443 if (atomic_read(&task->task_active)) 443 if (task->task_flags & TF_ACTIVE)
444 continue; 444 continue;
445 445
446 if (!atomic_read(&task->task_state_active)) 446 if (!atomic_read(&task->task_state_active))
@@ -718,7 +718,7 @@ void transport_complete_task(struct se_task *task, int success)
718 atomic_inc(&dev->depth_left); 718 atomic_inc(&dev->depth_left);
719 719
720 spin_lock_irqsave(&cmd->t_state_lock, flags); 720 spin_lock_irqsave(&cmd->t_state_lock, flags);
721 atomic_set(&task->task_active, 0); 721 task->task_flags &= ~TF_ACTIVE;
722 722
723 /* 723 /*
724 * See if any sense data exists, if so set the TASK_SENSE flag. 724 * See if any sense data exists, if so set the TASK_SENSE flag.
@@ -737,14 +737,14 @@ void transport_complete_task(struct se_task *task, int success)
737 * See if we are waiting for outstanding struct se_task 737 * See if we are waiting for outstanding struct se_task
738 * to complete for an exception condition 738 * to complete for an exception condition
739 */ 739 */
740 if (atomic_read(&task->task_stop)) { 740 if (task->task_flags & TF_REQUEST_STOP) {
741 /* 741 /*
742 * Decrement cmd->t_se_count if this task had 742 * Decrement cmd->t_se_count if this task had
743 * previously thrown its timeout exception handler. 743 * previously thrown its timeout exception handler.
744 */ 744 */
745 if (atomic_read(&task->task_timeout)) { 745 if (task->task_flags & TF_TIMEOUT) {
746 atomic_dec(&cmd->t_se_count); 746 atomic_dec(&cmd->t_se_count);
747 atomic_set(&task->task_timeout, 0); 747 task->task_flags &= ~TF_TIMEOUT;
748 } 748 }
749 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 749 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
750 750
@@ -756,7 +756,7 @@ void transport_complete_task(struct se_task *task, int success)
756 * left counter to determine when the struct se_cmd is ready to be queued to 756 * left counter to determine when the struct se_cmd is ready to be queued to
757 * the processing thread. 757 * the processing thread.
758 */ 758 */
759 if (atomic_read(&task->task_timeout)) { 759 if (task->task_flags & TF_TIMEOUT) {
760 if (!atomic_dec_and_test( 760 if (!atomic_dec_and_test(
761 &cmd->t_task_cdbs_timeout_left)) { 761 &cmd->t_task_cdbs_timeout_left)) {
762 spin_unlock_irqrestore(&cmd->t_state_lock, 762 spin_unlock_irqrestore(&cmd->t_state_lock,
@@ -1793,8 +1793,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1793 * If the struct se_task has not been sent and is not active, 1793 * If the struct se_task has not been sent and is not active,
1794 * remove the struct se_task from the execution queue. 1794 * remove the struct se_task from the execution queue.
1795 */ 1795 */
1796 if (!atomic_read(&task->task_sent) && 1796 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1797 !atomic_read(&task->task_active)) {
1798 spin_unlock_irqrestore(&cmd->t_state_lock, 1797 spin_unlock_irqrestore(&cmd->t_state_lock,
1799 flags); 1798 flags);
1800 transport_remove_task_from_execute_queue(task, 1799 transport_remove_task_from_execute_queue(task,
@@ -1810,8 +1809,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1810 * If the struct se_task is active, sleep until it is returned 1809 * If the struct se_task is active, sleep until it is returned
1811 * from the plugin. 1810 * from the plugin.
1812 */ 1811 */
1813 if (atomic_read(&task->task_active)) { 1812 if (task->task_flags & TF_ACTIVE) {
1814 atomic_set(&task->task_stop, 1); 1813 task->task_flags |= TF_REQUEST_STOP;
1815 spin_unlock_irqrestore(&cmd->t_state_lock, 1814 spin_unlock_irqrestore(&cmd->t_state_lock,
1816 flags); 1815 flags);
1817 1816
@@ -1823,9 +1822,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1823 1822
1824 spin_lock_irqsave(&cmd->t_state_lock, flags); 1823 spin_lock_irqsave(&cmd->t_state_lock, flags);
1825 atomic_dec(&cmd->t_task_cdbs_left); 1824 atomic_dec(&cmd->t_task_cdbs_left);
1826 1825 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1827 atomic_set(&task->task_active, 0);
1828 atomic_set(&task->task_stop, 0);
1829 } else { 1826 } else {
1830 pr_debug("task_no[%d] - Did nothing\n", task->task_no); 1827 pr_debug("task_no[%d] - Did nothing\n", task->task_no);
1831 ret++; 1828 ret++;
@@ -2074,18 +2071,18 @@ static void transport_task_timeout_handler(unsigned long data)
2074 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2071 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2075 2072
2076 spin_lock_irqsave(&cmd->t_state_lock, flags); 2073 spin_lock_irqsave(&cmd->t_state_lock, flags);
2077 if (task->task_flags & TF_STOP) { 2074 if (task->task_flags & TF_TIMER_STOP) {
2078 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2075 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2079 return; 2076 return;
2080 } 2077 }
2081 task->task_flags &= ~TF_RUNNING; 2078 task->task_flags &= ~TF_TIMER_RUNNING;
2082 2079
2083 /* 2080 /*
2084 * Determine if transport_complete_task() has already been called. 2081 * Determine if transport_complete_task() has already been called.
2085 */ 2082 */
2086 if (!atomic_read(&task->task_active)) { 2083 if (!(task->task_flags & TF_ACTIVE)) {
2087 pr_debug("transport task: %p cmd: %p timeout task_active" 2084 pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n",
2088 " == 0\n", task, cmd); 2085 task, cmd);
2089 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2086 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2090 return; 2087 return;
2091 } 2088 }
@@ -2094,12 +2091,12 @@ static void transport_task_timeout_handler(unsigned long data)
2094 atomic_inc(&cmd->t_transport_timeout); 2091 atomic_inc(&cmd->t_transport_timeout);
2095 cmd->t_tasks_failed = 1; 2092 cmd->t_tasks_failed = 1;
2096 2093
2097 atomic_set(&task->task_timeout, 1); 2094 task->task_flags |= TF_TIMEOUT;
2098 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; 2095 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2099 task->task_scsi_status = 1; 2096 task->task_scsi_status = 1;
2100 2097
2101 if (atomic_read(&task->task_stop)) { 2098 if (task->task_flags & TF_REQUEST_STOP) {
2102 pr_debug("transport task: %p cmd: %p timeout task_stop" 2099 pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP"
2103 " == 1\n", task, cmd); 2100 " == 1\n", task, cmd);
2104 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2101 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2105 complete(&task->task_stop_comp); 2102 complete(&task->task_stop_comp);
@@ -2129,7 +2126,7 @@ static void transport_start_task_timer(struct se_task *task)
2129 struct se_device *dev = task->task_se_cmd->se_dev; 2126 struct se_device *dev = task->task_se_cmd->se_dev;
2130 int timeout; 2127 int timeout;
2131 2128
2132 if (task->task_flags & TF_RUNNING) 2129 if (task->task_flags & TF_TIMER_RUNNING)
2133 return; 2130 return;
2134 /* 2131 /*
2135 * If the task_timeout is disabled, exit now. 2132 * If the task_timeout is disabled, exit now.
@@ -2143,7 +2140,7 @@ static void transport_start_task_timer(struct se_task *task)
2143 task->task_timer.data = (unsigned long) task; 2140 task->task_timer.data = (unsigned long) task;
2144 task->task_timer.function = transport_task_timeout_handler; 2141 task->task_timer.function = transport_task_timeout_handler;
2145 2142
2146 task->task_flags |= TF_RUNNING; 2143 task->task_flags |= TF_TIMER_RUNNING;
2147 add_timer(&task->task_timer); 2144 add_timer(&task->task_timer);
2148#if 0 2145#if 0
2149 pr_debug("Starting task timer for cmd: %p task: %p seconds:" 2146 pr_debug("Starting task timer for cmd: %p task: %p seconds:"
@@ -2158,17 +2155,17 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2158{ 2155{
2159 struct se_cmd *cmd = task->task_se_cmd; 2156 struct se_cmd *cmd = task->task_se_cmd;
2160 2157
2161 if (!task->task_flags & TF_RUNNING) 2158 if (!(task->task_flags & TF_TIMER_RUNNING))
2162 return; 2159 return;
2163 2160
2164 task->task_flags |= TF_STOP; 2161 task->task_flags |= TF_TIMER_STOP;
2165 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2162 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2166 2163
2167 del_timer_sync(&task->task_timer); 2164 del_timer_sync(&task->task_timer);
2168 2165
2169 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2166 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2170 task->task_flags &= ~TF_RUNNING; 2167 task->task_flags &= ~TF_TIMER_RUNNING;
2171 task->task_flags &= ~TF_STOP; 2168 task->task_flags &= ~TF_TIMER_STOP;
2172} 2169}
2173 2170
2174static void transport_stop_all_task_timers(struct se_cmd *cmd) 2171static void transport_stop_all_task_timers(struct se_cmd *cmd)
@@ -2360,8 +2357,7 @@ check_depth:
2360 cmd = task->task_se_cmd; 2357 cmd = task->task_se_cmd;
2361 2358
2362 spin_lock_irqsave(&cmd->t_state_lock, flags); 2359 spin_lock_irqsave(&cmd->t_state_lock, flags);
2363 atomic_set(&task->task_active, 1); 2360 task->task_flags |= (TF_ACTIVE | TF_SENT);
2364 atomic_set(&task->task_sent, 1);
2365 atomic_inc(&cmd->t_task_cdbs_sent); 2361 atomic_inc(&cmd->t_task_cdbs_sent);
2366 2362
2367 if (atomic_read(&cmd->t_task_cdbs_sent) == 2363 if (atomic_read(&cmd->t_task_cdbs_sent) ==
@@ -2379,7 +2375,9 @@ check_depth:
2379 error = cmd->transport_emulate_cdb(cmd); 2375 error = cmd->transport_emulate_cdb(cmd);
2380 if (error != 0) { 2376 if (error != 0) {
2381 cmd->transport_error_status = error; 2377 cmd->transport_error_status = error;
2382 atomic_set(&task->task_active, 0); 2378 spin_lock_irqsave(&cmd->t_state_lock, flags);
2379 task->task_flags &= ~TF_ACTIVE;
2380 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2383 atomic_set(&cmd->transport_sent, 0); 2381 atomic_set(&cmd->transport_sent, 0);
2384 transport_stop_tasks_for_cmd(cmd); 2382 transport_stop_tasks_for_cmd(cmd);
2385 transport_generic_request_failure(cmd, dev, 0, 1); 2383 transport_generic_request_failure(cmd, dev, 0, 1);
@@ -2415,7 +2413,9 @@ check_depth:
2415 2413
2416 if (error != 0) { 2414 if (error != 0) {
2417 cmd->transport_error_status = error; 2415 cmd->transport_error_status = error;
2418 atomic_set(&task->task_active, 0); 2416 spin_lock_irqsave(&cmd->t_state_lock, flags);
2417 task->task_flags &= ~TF_ACTIVE;
2418 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2419 atomic_set(&cmd->transport_sent, 0); 2419 atomic_set(&cmd->transport_sent, 0);
2420 transport_stop_tasks_for_cmd(cmd); 2420 transport_stop_tasks_for_cmd(cmd);
2421 transport_generic_request_failure(cmd, dev, 0, 1); 2421 transport_generic_request_failure(cmd, dev, 0, 1);
@@ -3613,7 +3613,7 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3613 spin_lock_irqsave(&cmd->t_state_lock, flags); 3613 spin_lock_irqsave(&cmd->t_state_lock, flags);
3614 list_for_each_entry_safe(task, task_tmp, 3614 list_for_each_entry_safe(task, task_tmp,
3615 &cmd->t_task_list, t_list) { 3615 &cmd->t_task_list, t_list) {
3616 if (atomic_read(&task->task_active)) 3616 if (task->task_flags & TF_ACTIVE)
3617 continue; 3617 continue;
3618 3618
3619 kfree(task->task_sg_bidi); 3619 kfree(task->task_sg_bidi);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6c49db40320..5e3dd1418ba 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -72,9 +72,13 @@ enum transport_tpg_type_table {
72}; 72};
73 73
74/* Used for generate timer flags */ 74/* Used for generate timer flags */
75enum timer_flags_table { 75enum se_task_flags {
76 TF_RUNNING = 0x01, 76 TF_ACTIVE = (1 << 0),
77 TF_STOP = 0x02, 77 TF_SENT = (1 << 1),
78 TF_TIMEOUT = (1 << 2),
79 TF_REQUEST_STOP = (1 << 3),
80 TF_TIMER_RUNNING = (1 << 4),
81 TF_TIMER_STOP = (1 << 5),
78}; 82};
79 83
80/* Special transport agnostic struct se_cmd->t_states */ 84/* Special transport agnostic struct se_cmd->t_states */
@@ -413,11 +417,7 @@ struct se_task {
413 enum dma_data_direction task_data_direction; 417 enum dma_data_direction task_data_direction;
414 struct se_cmd *task_se_cmd; 418 struct se_cmd *task_se_cmd;
415 struct completion task_stop_comp; 419 struct completion task_stop_comp;
416 atomic_t task_active;
417 atomic_t task_execute_queue; 420 atomic_t task_execute_queue;
418 atomic_t task_timeout;
419 atomic_t task_sent;
420 atomic_t task_stop;
421 atomic_t task_state_active; 421 atomic_t task_state_active;
422 struct timer_list task_timer; 422 struct timer_list task_timer;
423 struct list_head t_list; 423 struct list_head t_list;