diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-03-24 06:18:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:30 -0500 |
commit | 6687a97d4041f996f725902d2990e5de6ef5cbe5 (patch) | |
tree | 6ab982091cde7179d94cf592f9c669fd22d93a23 /kernel/softlockup.c | |
parent | 6a4d11c2abc57ed7ca42041e5f68ae4f7f640a81 (diff) |
[PATCH] timer-irq-driven soft-watchdog, cleanups
Make the softlockup detector purely timer-interrupt driven, removing
softirq-context (timer) dependencies. This means that if the softlockup
watchdog triggers, it has truly observed a longer than 10 seconds
scheduling delay of a SCHED_FIFO prio 99 task.
(the patch also turns off the softlockup detector during the initial bootup
phase and does small style fixes)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r-- | kernel/softlockup.c | 54 |
1 files changed, 30 insertions, 24 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index c67189a25d52..dd9524fa649a 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -1,12 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Detect Soft Lockups | 2 | * Detect Soft Lockups |
3 | * | 3 | * |
4 | * started by Ingo Molnar, (C) 2005, Red Hat | 4 | * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. |
5 | * | 5 | * |
6 | * this code detects soft lockups: incidents in where on a CPU | 6 | * this code detects soft lockups: incidents in where on a CPU |
7 | * the kernel does not reschedule for 10 seconds or more. | 7 | * the kernel does not reschedule for 10 seconds or more. |
8 | */ | 8 | */ |
9 | |||
10 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
11 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -17,13 +16,14 @@ | |||
17 | 16 | ||
18 | static DEFINE_SPINLOCK(print_lock); | 17 | static DEFINE_SPINLOCK(print_lock); |
19 | 18 | ||
20 | static DEFINE_PER_CPU(unsigned long, timestamp) = 0; | 19 | static DEFINE_PER_CPU(unsigned long, touch_timestamp); |
21 | static DEFINE_PER_CPU(unsigned long, print_timestamp) = 0; | 20 | static DEFINE_PER_CPU(unsigned long, print_timestamp); |
22 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 21 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
23 | 22 | ||
24 | static int did_panic = 0; | 23 | static int did_panic = 0; |
25 | static int softlock_panic(struct notifier_block *this, unsigned long event, | 24 | |
26 | void *ptr) | 25 | static int |
26 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
27 | { | 27 | { |
28 | did_panic = 1; | 28 | did_panic = 1; |
29 | 29 | ||
@@ -36,7 +36,7 @@ static struct notifier_block panic_block = { | |||
36 | 36 | ||
37 | void touch_softlockup_watchdog(void) | 37 | void touch_softlockup_watchdog(void) |
38 | { | 38 | { |
39 | per_cpu(timestamp, raw_smp_processor_id()) = jiffies; | 39 | per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies; |
40 | } | 40 | } |
41 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 41 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
42 | 42 | ||
@@ -44,25 +44,35 @@ EXPORT_SYMBOL(touch_softlockup_watchdog); | |||
44 | * This callback runs from the timer interrupt, and checks | 44 | * This callback runs from the timer interrupt, and checks |
45 | * whether the watchdog thread has hung or not: | 45 | * whether the watchdog thread has hung or not: |
46 | */ | 46 | */ |
47 | void softlockup_tick(struct pt_regs *regs) | 47 | void softlockup_tick(void) |
48 | { | 48 | { |
49 | int this_cpu = smp_processor_id(); | 49 | int this_cpu = smp_processor_id(); |
50 | unsigned long timestamp = per_cpu(timestamp, this_cpu); | 50 | unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); |
51 | 51 | ||
52 | if (per_cpu(print_timestamp, this_cpu) == timestamp) | 52 | /* prevent double reports: */ |
53 | if (per_cpu(print_timestamp, this_cpu) == touch_timestamp || | ||
54 | did_panic || | ||
55 | !per_cpu(watchdog_task, this_cpu)) | ||
53 | return; | 56 | return; |
54 | 57 | ||
55 | /* Do not cause a second panic when there already was one */ | 58 | /* do not print during early bootup: */ |
56 | if (did_panic) | 59 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
60 | touch_softlockup_watchdog(); | ||
57 | return; | 61 | return; |
62 | } | ||
58 | 63 | ||
59 | if (time_after(jiffies, timestamp + 10*HZ)) { | 64 | /* Wake up the high-prio watchdog task every second: */ |
60 | per_cpu(print_timestamp, this_cpu) = timestamp; | 65 | if (time_after(jiffies, touch_timestamp + HZ)) |
66 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | ||
67 | |||
68 | /* Warn about unreasonable 10+ seconds delays: */ | ||
69 | if (time_after(jiffies, touch_timestamp + 10*HZ)) { | ||
70 | per_cpu(print_timestamp, this_cpu) = touch_timestamp; | ||
61 | 71 | ||
62 | spin_lock(&print_lock); | 72 | spin_lock(&print_lock); |
63 | printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n", | 73 | printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n", |
64 | this_cpu); | 74 | this_cpu); |
65 | show_regs(regs); | 75 | dump_stack(); |
66 | spin_unlock(&print_lock); | 76 | spin_unlock(&print_lock); |
67 | } | 77 | } |
68 | } | 78 | } |
@@ -77,18 +87,16 @@ static int watchdog(void * __bind_cpu) | |||
77 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 87 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
78 | current->flags |= PF_NOFREEZE; | 88 | current->flags |= PF_NOFREEZE; |
79 | 89 | ||
80 | set_current_state(TASK_INTERRUPTIBLE); | ||
81 | |||
82 | /* | 90 | /* |
83 | * Run briefly once per second - if this gets delayed for | 91 | * Run briefly once per second to reset the softlockup timestamp. |
84 | * more than 10 seconds then the debug-printout triggers | 92 | * If this gets delayed for more than 10 seconds then the |
85 | * in softlockup_tick(): | 93 | * debug-printout triggers in softlockup_tick(). |
86 | */ | 94 | */ |
87 | while (!kthread_should_stop()) { | 95 | while (!kthread_should_stop()) { |
88 | msleep_interruptible(1000); | 96 | set_current_state(TASK_INTERRUPTIBLE); |
89 | touch_softlockup_watchdog(); | 97 | touch_softlockup_watchdog(); |
98 | schedule(); | ||
90 | } | 99 | } |
91 | __set_current_state(TASK_RUNNING); | ||
92 | 100 | ||
93 | return 0; | 101 | return 0; |
94 | } | 102 | } |
@@ -114,7 +122,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
114 | kthread_bind(p, hotcpu); | 122 | kthread_bind(p, hotcpu); |
115 | break; | 123 | break; |
116 | case CPU_ONLINE: | 124 | case CPU_ONLINE: |
117 | |||
118 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 125 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
119 | break; | 126 | break; |
120 | #ifdef CONFIG_HOTPLUG_CPU | 127 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -146,4 +153,3 @@ __init void spawn_softlockup_task(void) | |||
146 | 153 | ||
147 | notifier_chain_register(&panic_notifier_list, &panic_block); | 154 | notifier_chain_register(&panic_notifier_list, &panic_block); |
148 | } | 155 | } |
149 | |||