diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-10-17 13:56:48 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-10-23 23:21:44 -0400 |
commit | cc5d0f0f61645ca43d9a7320ec2f268bad5016c5 (patch) | |
tree | 4e6857339fed505ed1b373941126ff1e1e37a0ba /drivers/target | |
parent | e99d48a62bfc6e64548e0d5085240c5024eca471 (diff) |
target: stop task timers earlier
Currently we stop the timers for all tasks in a command fairly late during
I/O completion, which is fairly pointless and requires all kinds of safety
checks.
Instead delete pending timers early on in transport_complete_task, thus
ensuring no new timers firest after that. We take t_state_lock a bit later
in that function thus making sure currenly running timers are out of the
criticial section. To be completely sure the timer has finished we also
add another del_timer_sync call when freeing the task.
This also allows removing TF_TIMER_RUNNING as it would be equivalent
to TF_ACTIVE now.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_transport.c | 64 |
1 files changed, 14 insertions, 50 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a36f4451e5db..19ea1c38d70a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -84,7 +84,6 @@ static int transport_generic_get_mem(struct se_cmd *cmd); | |||
84 | static void transport_put_cmd(struct se_cmd *cmd); | 84 | static void transport_put_cmd(struct se_cmd *cmd); |
85 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); | 85 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); |
86 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 86 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
87 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | ||
88 | 87 | ||
89 | int init_se_kmem_caches(void) | 88 | int init_se_kmem_caches(void) |
90 | { | 89 | { |
@@ -712,6 +711,8 @@ void transport_complete_task(struct se_task *task, int success) | |||
712 | if (dev) | 711 | if (dev) |
713 | atomic_inc(&dev->depth_left); | 712 | atomic_inc(&dev->depth_left); |
714 | 713 | ||
714 | del_timer(&task->task_timer); | ||
715 | |||
715 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 716 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
716 | task->task_flags &= ~TF_ACTIVE; | 717 | task->task_flags &= ~TF_ACTIVE; |
717 | 718 | ||
@@ -1507,6 +1508,7 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1507 | INIT_LIST_HEAD(&task->t_list); | 1508 | INIT_LIST_HEAD(&task->t_list); |
1508 | INIT_LIST_HEAD(&task->t_execute_list); | 1509 | INIT_LIST_HEAD(&task->t_execute_list); |
1509 | INIT_LIST_HEAD(&task->t_state_list); | 1510 | INIT_LIST_HEAD(&task->t_state_list); |
1511 | init_timer(&task->task_timer); | ||
1510 | init_completion(&task->task_stop_comp); | 1512 | init_completion(&task->task_stop_comp); |
1511 | task->task_se_cmd = cmd; | 1513 | task->task_se_cmd = cmd; |
1512 | task->task_data_direction = data_direction; | 1514 | task->task_data_direction = data_direction; |
@@ -1776,6 +1778,7 @@ bool target_stop_task(struct se_task *task, unsigned long *flags) | |||
1776 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | 1778 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
1777 | 1779 | ||
1778 | pr_debug("Task %p waiting to complete\n", task); | 1780 | pr_debug("Task %p waiting to complete\n", task); |
1781 | del_timer_sync(&task->task_timer); | ||
1779 | wait_for_completion(&task->task_stop_comp); | 1782 | wait_for_completion(&task->task_stop_comp); |
1780 | pr_debug("Task %p stopped successfully\n", task); | 1783 | pr_debug("Task %p stopped successfully\n", task); |
1781 | 1784 | ||
@@ -1785,7 +1788,6 @@ bool target_stop_task(struct se_task *task, unsigned long *flags) | |||
1785 | was_active = true; | 1788 | was_active = true; |
1786 | } | 1789 | } |
1787 | 1790 | ||
1788 | __transport_stop_task_timer(task, flags); | ||
1789 | return was_active; | 1791 | return was_active; |
1790 | } | 1792 | } |
1791 | 1793 | ||
@@ -1860,8 +1862,6 @@ static void transport_generic_request_failure( | |||
1860 | atomic_read(&cmd->t_transport_stop), | 1862 | atomic_read(&cmd->t_transport_stop), |
1861 | atomic_read(&cmd->t_transport_sent)); | 1863 | atomic_read(&cmd->t_transport_sent)); |
1862 | 1864 | ||
1863 | transport_stop_all_task_timers(cmd); | ||
1864 | |||
1865 | if (dev) | 1865 | if (dev) |
1866 | atomic_inc(&dev->depth_left); | 1866 | atomic_inc(&dev->depth_left); |
1867 | /* | 1867 | /* |
@@ -2066,7 +2066,6 @@ static void transport_task_timeout_handler(unsigned long data) | |||
2066 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | 2066 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
2067 | 2067 | ||
2068 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2068 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2069 | task->task_flags &= ~TF_TIMER_RUNNING; | ||
2070 | 2069 | ||
2071 | /* | 2070 | /* |
2072 | * Determine if transport_complete_task() has already been called. | 2071 | * Determine if transport_complete_task() has already been called. |
@@ -2109,16 +2108,11 @@ static void transport_task_timeout_handler(unsigned long data) | |||
2109 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); | 2108 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); |
2110 | } | 2109 | } |
2111 | 2110 | ||
2112 | /* | ||
2113 | * Called with cmd->t_state_lock held. | ||
2114 | */ | ||
2115 | static void transport_start_task_timer(struct se_task *task) | 2111 | static void transport_start_task_timer(struct se_task *task) |
2116 | { | 2112 | { |
2117 | struct se_device *dev = task->task_se_cmd->se_dev; | 2113 | struct se_device *dev = task->task_se_cmd->se_dev; |
2118 | int timeout; | 2114 | int timeout; |
2119 | 2115 | ||
2120 | if (task->task_flags & TF_TIMER_RUNNING) | ||
2121 | return; | ||
2122 | /* | 2116 | /* |
2123 | * If the task_timeout is disabled, exit now. | 2117 | * If the task_timeout is disabled, exit now. |
2124 | */ | 2118 | */ |
@@ -2126,47 +2120,10 @@ static void transport_start_task_timer(struct se_task *task) | |||
2126 | if (!timeout) | 2120 | if (!timeout) |
2127 | return; | 2121 | return; |
2128 | 2122 | ||
2129 | init_timer(&task->task_timer); | ||
2130 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | 2123 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); |
2131 | task->task_timer.data = (unsigned long) task; | 2124 | task->task_timer.data = (unsigned long) task; |
2132 | task->task_timer.function = transport_task_timeout_handler; | 2125 | task->task_timer.function = transport_task_timeout_handler; |
2133 | |||
2134 | task->task_flags |= TF_TIMER_RUNNING; | ||
2135 | add_timer(&task->task_timer); | 2126 | add_timer(&task->task_timer); |
2136 | #if 0 | ||
2137 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" | ||
2138 | " %d\n", task->task_se_cmd, task, timeout); | ||
2139 | #endif | ||
2140 | } | ||
2141 | |||
2142 | /* | ||
2143 | * Called with spin_lock_irq(&cmd->t_state_lock) held. | ||
2144 | */ | ||
2145 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | ||
2146 | { | ||
2147 | struct se_cmd *cmd = task->task_se_cmd; | ||
2148 | |||
2149 | if (!(task->task_flags & TF_TIMER_RUNNING)) | ||
2150 | return; | ||
2151 | |||
2152 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | ||
2153 | |||
2154 | del_timer_sync(&task->task_timer); | ||
2155 | |||
2156 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | ||
2157 | task->task_flags &= ~TF_TIMER_RUNNING; | ||
2158 | } | ||
2159 | |||
2160 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | ||
2161 | { | ||
2162 | struct se_task *task = NULL, *task_tmp; | ||
2163 | unsigned long flags; | ||
2164 | |||
2165 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2166 | list_for_each_entry_safe(task, task_tmp, | ||
2167 | &cmd->t_task_list, t_list) | ||
2168 | __transport_stop_task_timer(task, &flags); | ||
2169 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2170 | } | 2127 | } |
2171 | 2128 | ||
2172 | static inline int transport_tcq_window_closed(struct se_device *dev) | 2129 | static inline int transport_tcq_window_closed(struct se_device *dev) |
@@ -2365,6 +2322,7 @@ check_depth: | |||
2365 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2322 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2366 | task->task_flags &= ~TF_ACTIVE; | 2323 | task->task_flags &= ~TF_ACTIVE; |
2367 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2324 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2325 | del_timer_sync(&task->task_timer); | ||
2368 | atomic_set(&cmd->transport_sent, 0); | 2326 | atomic_set(&cmd->transport_sent, 0); |
2369 | transport_stop_tasks_for_cmd(cmd); | 2327 | transport_stop_tasks_for_cmd(cmd); |
2370 | transport_generic_request_failure(cmd, dev, 0, 1); | 2328 | transport_generic_request_failure(cmd, dev, 0, 1); |
@@ -2403,6 +2361,7 @@ check_depth: | |||
2403 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2361 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2404 | task->task_flags &= ~TF_ACTIVE; | 2362 | task->task_flags &= ~TF_ACTIVE; |
2405 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2363 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2364 | del_timer_sync(&task->task_timer); | ||
2406 | atomic_set(&cmd->transport_sent, 0); | 2365 | atomic_set(&cmd->transport_sent, 0); |
2407 | transport_stop_tasks_for_cmd(cmd); | 2366 | transport_stop_tasks_for_cmd(cmd); |
2408 | transport_generic_request_failure(cmd, dev, 0, 1); | 2367 | transport_generic_request_failure(cmd, dev, 0, 1); |
@@ -3431,7 +3390,6 @@ static void transport_complete_qf(struct se_cmd *cmd) | |||
3431 | { | 3390 | { |
3432 | int ret = 0; | 3391 | int ret = 0; |
3433 | 3392 | ||
3434 | transport_stop_all_task_timers(cmd); | ||
3435 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3393 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3436 | transport_complete_task_attr(cmd); | 3394 | transport_complete_task_attr(cmd); |
3437 | 3395 | ||
@@ -3601,6 +3559,14 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3601 | while (!list_empty(&dispose_list)) { | 3559 | while (!list_empty(&dispose_list)) { |
3602 | task = list_first_entry(&dispose_list, struct se_task, t_list); | 3560 | task = list_first_entry(&dispose_list, struct se_task, t_list); |
3603 | 3561 | ||
3562 | /* | ||
3563 | * We already cancelled all pending timers in | ||
3564 | * transport_complete_task, but that was just a pure del_timer, | ||
3565 | * so do a full del_timer_sync here to make sure any handler | ||
3566 | * that was running at that point has finished execution. | ||
3567 | */ | ||
3568 | del_timer_sync(&task->task_timer); | ||
3569 | |||
3604 | kfree(task->task_sg_bidi); | 3570 | kfree(task->task_sg_bidi); |
3605 | kfree(task->task_sg); | 3571 | kfree(task->task_sg); |
3606 | 3572 | ||
@@ -4820,7 +4786,6 @@ get_cmd: | |||
4820 | transport_generic_process_write(cmd); | 4786 | transport_generic_process_write(cmd); |
4821 | break; | 4787 | break; |
4822 | case TRANSPORT_COMPLETE_OK: | 4788 | case TRANSPORT_COMPLETE_OK: |
4823 | transport_stop_all_task_timers(cmd); | ||
4824 | transport_generic_complete_ok(cmd); | 4789 | transport_generic_complete_ok(cmd); |
4825 | break; | 4790 | break; |
4826 | case TRANSPORT_REMOVE: | 4791 | case TRANSPORT_REMOVE: |
@@ -4836,7 +4801,6 @@ get_cmd: | |||
4836 | transport_generic_request_failure(cmd, NULL, 1, 1); | 4801 | transport_generic_request_failure(cmd, NULL, 1, 1); |
4837 | break; | 4802 | break; |
4838 | case TRANSPORT_COMPLETE_TIMEOUT: | 4803 | case TRANSPORT_COMPLETE_TIMEOUT: |
4839 | transport_stop_all_task_timers(cmd); | ||
4840 | transport_generic_request_timeout(cmd); | 4804 | transport_generic_request_timeout(cmd); |
4841 | break; | 4805 | break; |
4842 | case TRANSPORT_COMPLETE_QF_WP: | 4806 | case TRANSPORT_COMPLETE_QF_WP: |