diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-02-01 18:23:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-01 22:27:45 -0500 |
commit | ed50d6cbc394cd0966469d3e249353c9dd1d38b9 (patch) | |
tree | 8df8f5d8b4ce057bf0df7ee2996603fdd18e0e7e /kernel/softlockup.c | |
parent | aa6299926950c8dfe2fea638276cad6def092bc9 (diff) |
debug: softlockup looping fix
Rafael J. Wysocki reported weird, multi-seconds delays during
suspend/resume and bisected it back to:
commit 82a1fcb90287052aabfa235e7ffc693ea003fe69
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 25 21:08:02 2008 +0100
softlockup: automatically detect hung TASK_UNINTERRUPTIBLE tasks
fix it:
- restore the old wakeup mechanism
- fix break usage in do_each_thread() { } while_each_thread().
- fix the hotplug switch stmt, a fall-through case was broken.
Bisected-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r-- | kernel/softlockup.c | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index c1d76552446e..7c2da88db4ed 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -101,6 +101,10 @@ void softlockup_tick(void) | |||
101 | 101 | ||
102 | now = get_timestamp(this_cpu); | 102 | now = get_timestamp(this_cpu); |
103 | 103 | ||
104 | /* Wake up the high-prio watchdog task every second: */ | ||
105 | if (now > (touch_timestamp + 1)) | ||
106 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | ||
107 | |||
104 | /* Warn about unreasonable delays: */ | 108 | /* Warn about unreasonable delays: */ |
105 | if (now <= (touch_timestamp + softlockup_thresh)) | 109 | if (now <= (touch_timestamp + softlockup_thresh)) |
106 | return; | 110 | return; |
@@ -191,11 +195,11 @@ static void check_hung_uninterruptible_tasks(int this_cpu) | |||
191 | read_lock(&tasklist_lock); | 195 | read_lock(&tasklist_lock); |
192 | do_each_thread(g, t) { | 196 | do_each_thread(g, t) { |
193 | if (!--max_count) | 197 | if (!--max_count) |
194 | break; | 198 | goto unlock; |
195 | if (t->state & TASK_UNINTERRUPTIBLE) | 199 | if (t->state & TASK_UNINTERRUPTIBLE) |
196 | check_hung_task(t, now); | 200 | check_hung_task(t, now); |
197 | } while_each_thread(g, t); | 201 | } while_each_thread(g, t); |
198 | 202 | unlock: | |
199 | read_unlock(&tasklist_lock); | 203 | read_unlock(&tasklist_lock); |
200 | } | 204 | } |
201 | 205 | ||
@@ -218,14 +222,19 @@ static int watchdog(void *__bind_cpu) | |||
218 | * debug-printout triggers in softlockup_tick(). | 222 | * debug-printout triggers in softlockup_tick(). |
219 | */ | 223 | */ |
220 | while (!kthread_should_stop()) { | 224 | while (!kthread_should_stop()) { |
225 | set_current_state(TASK_INTERRUPTIBLE); | ||
221 | touch_softlockup_watchdog(); | 226 | touch_softlockup_watchdog(); |
222 | msleep_interruptible(10000); | 227 | schedule(); |
228 | |||
229 | if (kthread_should_stop()) | ||
230 | break; | ||
223 | 231 | ||
224 | if (this_cpu != check_cpu) | 232 | if (this_cpu != check_cpu) |
225 | continue; | 233 | continue; |
226 | 234 | ||
227 | if (sysctl_hung_task_timeout_secs) | 235 | if (sysctl_hung_task_timeout_secs) |
228 | check_hung_uninterruptible_tasks(this_cpu); | 236 | check_hung_uninterruptible_tasks(this_cpu); |
237 | |||
229 | } | 238 | } |
230 | 239 | ||
231 | return 0; | 240 | return 0; |
@@ -259,13 +268,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
259 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 268 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
260 | break; | 269 | break; |
261 | #ifdef CONFIG_HOTPLUG_CPU | 270 | #ifdef CONFIG_HOTPLUG_CPU |
262 | case CPU_UP_CANCELED: | ||
263 | case CPU_UP_CANCELED_FROZEN: | ||
264 | if (!per_cpu(watchdog_task, hotcpu)) | ||
265 | break; | ||
266 | /* Unbind so it can run. Fall thru. */ | ||
267 | kthread_bind(per_cpu(watchdog_task, hotcpu), | ||
268 | any_online_cpu(cpu_online_map)); | ||
269 | case CPU_DOWN_PREPARE: | 271 | case CPU_DOWN_PREPARE: |
270 | case CPU_DOWN_PREPARE_FROZEN: | 272 | case CPU_DOWN_PREPARE_FROZEN: |
271 | if (hotcpu == check_cpu) { | 273 | if (hotcpu == check_cpu) { |
@@ -275,6 +277,14 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
275 | check_cpu = any_online_cpu(temp_cpu_online_map); | 277 | check_cpu = any_online_cpu(temp_cpu_online_map); |
276 | } | 278 | } |
277 | break; | 279 | break; |
280 | |||
281 | case CPU_UP_CANCELED: | ||
282 | case CPU_UP_CANCELED_FROZEN: | ||
283 | if (!per_cpu(watchdog_task, hotcpu)) | ||
284 | break; | ||
285 | /* Unbind so it can run. Fall thru. */ | ||
286 | kthread_bind(per_cpu(watchdog_task, hotcpu), | ||
287 | any_online_cpu(cpu_online_map)); | ||
278 | case CPU_DEAD: | 288 | case CPU_DEAD: |
279 | case CPU_DEAD_FROZEN: | 289 | case CPU_DEAD_FROZEN: |
280 | p = per_cpu(watchdog_task, hotcpu); | 290 | p = per_cpu(watchdog_task, hotcpu); |