diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-15 07:46:29 -0400 |
commit | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch) | |
tree | 53ccb1c2c14751fe69cf93102e76e97021f6df07 /kernel/softlockup.c | |
parent | 4f962d4d65923d7b722192e729840cfb79af0a5a (diff) | |
parent | 278429cff8809958d25415ba0ed32b59866ab1a8 (diff) |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/kernel/Makefile
include/asm-x86/pda.h
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r-- | kernel/softlockup.c | 74 |
1 files changed, 63 insertions, 11 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index c828c2339cc9..cb838ee93a82 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/freezer.h> | 14 | #include <linux/freezer.h> |
15 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
16 | #include <linux/lockdep.h> | ||
16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | 19 | ||
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); | |||
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 26 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | 27 | ||
27 | static int __read_mostly did_panic; | 28 | static int __read_mostly did_panic; |
28 | unsigned long __read_mostly softlockup_thresh = 60; | 29 | int __read_mostly softlockup_thresh = 60; |
30 | |||
31 | /* | ||
32 | * Should we panic (and reboot, if panic_timeout= is set) when a | ||
33 | * soft-lockup occurs: | ||
34 | */ | ||
35 | unsigned int __read_mostly softlockup_panic = | ||
36 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
37 | |||
38 | static int __init softlockup_panic_setup(char *str) | ||
39 | { | ||
40 | softlockup_panic = simple_strtoul(str, NULL, 0); | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | __setup("softlockup_panic=", softlockup_panic_setup); | ||
29 | 45 | ||
30 | static int | 46 | static int |
31 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | 47 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) |
@@ -84,6 +100,14 @@ void softlockup_tick(void) | |||
84 | struct pt_regs *regs = get_irq_regs(); | 100 | struct pt_regs *regs = get_irq_regs(); |
85 | unsigned long now; | 101 | unsigned long now; |
86 | 102 | ||
103 | /* Is detection switched off? */ | ||
104 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | ||
105 | /* Be sure we don't false trigger if switched back on */ | ||
106 | if (touch_timestamp) | ||
107 | per_cpu(touch_timestamp, this_cpu) = 0; | ||
108 | return; | ||
109 | } | ||
110 | |||
87 | if (touch_timestamp == 0) { | 111 | if (touch_timestamp == 0) { |
88 | __touch_softlockup_watchdog(); | 112 | __touch_softlockup_watchdog(); |
89 | return; | 113 | return; |
@@ -92,11 +116,8 @@ void softlockup_tick(void) | |||
92 | print_timestamp = per_cpu(print_timestamp, this_cpu); | 116 | print_timestamp = per_cpu(print_timestamp, this_cpu); |
93 | 117 | ||
94 | /* report at most once a second */ | 118 | /* report at most once a second */ |
95 | if ((print_timestamp >= touch_timestamp && | 119 | if (print_timestamp == touch_timestamp || did_panic) |
96 | print_timestamp < (touch_timestamp + 1)) || | ||
97 | did_panic || !per_cpu(watchdog_task, this_cpu)) { | ||
98 | return; | 120 | return; |
99 | } | ||
100 | 121 | ||
101 | /* do not print during early bootup: */ | 122 | /* do not print during early bootup: */ |
102 | if (unlikely(system_state != SYSTEM_RUNNING)) { | 123 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
@@ -106,8 +127,11 @@ void softlockup_tick(void) | |||
106 | 127 | ||
107 | now = get_timestamp(this_cpu); | 128 | now = get_timestamp(this_cpu); |
108 | 129 | ||
109 | /* Wake up the high-prio watchdog task every second: */ | 130 | /* |
110 | if (now > (touch_timestamp + 1)) | 131 | * Wake up the high-prio watchdog task twice per |
132 | * threshold timespan. | ||
133 | */ | ||
134 | if (now > touch_timestamp + softlockup_thresh/2) | ||
111 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | 135 | wake_up_process(per_cpu(watchdog_task, this_cpu)); |
112 | 136 | ||
113 | /* Warn about unreasonable delays: */ | 137 | /* Warn about unreasonable delays: */ |
@@ -120,11 +144,16 @@ void softlockup_tick(void) | |||
120 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", | 144 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", |
121 | this_cpu, now - touch_timestamp, | 145 | this_cpu, now - touch_timestamp, |
122 | current->comm, task_pid_nr(current)); | 146 | current->comm, task_pid_nr(current)); |
147 | print_modules(); | ||
148 | print_irqtrace_events(current); | ||
123 | if (regs) | 149 | if (regs) |
124 | show_regs(regs); | 150 | show_regs(regs); |
125 | else | 151 | else |
126 | dump_stack(); | 152 | dump_stack(); |
127 | spin_unlock(&print_lock); | 153 | spin_unlock(&print_lock); |
154 | |||
155 | if (softlockup_panic) | ||
156 | panic("softlockup: hung tasks"); | ||
128 | } | 157 | } |
129 | 158 | ||
130 | /* | 159 | /* |
@@ -177,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now) | |||
177 | 206 | ||
178 | t->last_switch_timestamp = now; | 207 | t->last_switch_timestamp = now; |
179 | touch_nmi_watchdog(); | 208 | touch_nmi_watchdog(); |
209 | |||
210 | if (softlockup_panic) | ||
211 | panic("softlockup: blocked tasks"); | ||
180 | } | 212 | } |
181 | 213 | ||
182 | /* | 214 | /* |
@@ -201,7 +233,8 @@ static void check_hung_uninterruptible_tasks(int this_cpu) | |||
201 | do_each_thread(g, t) { | 233 | do_each_thread(g, t) { |
202 | if (!--max_count) | 234 | if (!--max_count) |
203 | goto unlock; | 235 | goto unlock; |
204 | if (t->state & TASK_UNINTERRUPTIBLE) | 236 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ |
237 | if (t->state == TASK_UNINTERRUPTIBLE) | ||
205 | check_hung_task(t, now); | 238 | check_hung_task(t, now); |
206 | } while_each_thread(g, t); | 239 | } while_each_thread(g, t); |
207 | unlock: | 240 | unlock: |
@@ -306,14 +339,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
306 | .notifier_call = cpu_callback | 339 | .notifier_call = cpu_callback |
307 | }; | 340 | }; |
308 | 341 | ||
309 | __init void spawn_softlockup_task(void) | 342 | static int __initdata nosoftlockup; |
343 | |||
344 | static int __init nosoftlockup_setup(char *str) | ||
345 | { | ||
346 | nosoftlockup = 1; | ||
347 | return 1; | ||
348 | } | ||
349 | __setup("nosoftlockup", nosoftlockup_setup); | ||
350 | |||
351 | static int __init spawn_softlockup_task(void) | ||
310 | { | 352 | { |
311 | void *cpu = (void *)(long)smp_processor_id(); | 353 | void *cpu = (void *)(long)smp_processor_id(); |
312 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 354 | int err; |
355 | |||
356 | if (nosoftlockup) | ||
357 | return 0; | ||
313 | 358 | ||
314 | BUG_ON(err == NOTIFY_BAD); | 359 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
360 | if (err == NOTIFY_BAD) { | ||
361 | BUG(); | ||
362 | return 1; | ||
363 | } | ||
315 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 364 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
316 | register_cpu_notifier(&cpu_nfb); | 365 | register_cpu_notifier(&cpu_nfb); |
317 | 366 | ||
318 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | 367 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
368 | |||
369 | return 0; | ||
319 | } | 370 | } |
371 | early_initcall(spawn_softlockup_task); | ||