diff options
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r-- | kernel/watchdog.c | 41 |
1 files changed, 39 insertions, 2 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 516203e665fc..c3319bd1b040 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -31,6 +31,12 @@ | |||
31 | 31 | ||
32 | int watchdog_user_enabled = 1; | 32 | int watchdog_user_enabled = 1; |
33 | int __read_mostly watchdog_thresh = 10; | 33 | int __read_mostly watchdog_thresh = 10; |
34 | #ifdef CONFIG_SMP | ||
35 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
36 | #else | ||
37 | #define sysctl_softlockup_all_cpu_backtrace 0 | ||
38 | #endif | ||
39 | |||
34 | static int __read_mostly watchdog_running; | 40 | static int __read_mostly watchdog_running; |
35 | static u64 __read_mostly sample_period; | 41 | static u64 __read_mostly sample_period; |
36 | 42 | ||
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | |||
47 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | 53 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
48 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 54 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
49 | #endif | 55 | #endif |
56 | static unsigned long soft_lockup_nmi_warn; | ||
50 | 57 | ||
51 | /* boot commands */ | 58 | /* boot commands */ |
52 | /* | 59 | /* |
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str) | |||
95 | } | 102 | } |
96 | __setup("nosoftlockup", nosoftlockup_setup); | 103 | __setup("nosoftlockup", nosoftlockup_setup); |
97 | /* */ | 104 | /* */ |
105 | #ifdef CONFIG_SMP | ||
106 | static int __init softlockup_all_cpu_backtrace_setup(char *str) | ||
107 | { | ||
108 | sysctl_softlockup_all_cpu_backtrace = | ||
109 | !!simple_strtol(str, NULL, 0); | ||
110 | return 1; | ||
111 | } | ||
112 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); | ||
113 | #endif | ||
98 | 114 | ||
99 | /* | 115 | /* |
100 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- | 116 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
271 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); | 287 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
272 | struct pt_regs *regs = get_irq_regs(); | 288 | struct pt_regs *regs = get_irq_regs(); |
273 | int duration; | 289 | int duration; |
290 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; | ||
274 | 291 | ||
275 | /* kick the hardlockup detector */ | 292 | /* kick the hardlockup detector */ |
276 | watchdog_interrupt_count(); | 293 | watchdog_interrupt_count(); |
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
317 | if (__this_cpu_read(soft_watchdog_warn) == true) | 334 | if (__this_cpu_read(soft_watchdog_warn) == true) |
318 | return HRTIMER_RESTART; | 335 | return HRTIMER_RESTART; |
319 | 336 | ||
337 | if (softlockup_all_cpu_backtrace) { | ||
338 | /* Prevent multiple soft-lockup reports if one cpu is already | ||
339 | * engaged in dumping cpu back traces | ||
340 | */ | ||
341 | if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { | ||
342 | /* Someone else will report us. Let's give up */ | ||
343 | __this_cpu_write(soft_watchdog_warn, true); | ||
344 | return HRTIMER_RESTART; | ||
345 | } | ||
346 | } | ||
347 | |||
320 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 348 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
321 | smp_processor_id(), duration, | 349 | smp_processor_id(), duration, |
322 | current->comm, task_pid_nr(current)); | 350 | current->comm, task_pid_nr(current)); |
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
327 | else | 355 | else |
328 | dump_stack(); | 356 | dump_stack(); |
329 | 357 | ||
358 | if (softlockup_all_cpu_backtrace) { | ||
359 | /* Avoid generating two back traces for current | ||
360 | * given that one is already made above | ||
361 | */ | ||
362 | trigger_allbutself_cpu_backtrace(); | ||
363 | |||
364 | clear_bit(0, &soft_lockup_nmi_warn); | ||
365 | /* Barrier to sync with other cpus */ | ||
366 | smp_mb__after_atomic(); | ||
367 | } | ||
368 | |||
330 | if (softlockup_panic) | 369 | if (softlockup_panic) |
331 | panic("softlockup: hung tasks"); | 370 | panic("softlockup: hung tasks"); |
332 | __this_cpu_write(soft_watchdog_warn, true); | 371 | __this_cpu_write(soft_watchdog_warn, true); |
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void) | |||
527 | int cpu; | 566 | int cpu; |
528 | 567 | ||
529 | get_online_cpus(); | 568 | get_online_cpus(); |
530 | preempt_disable(); | ||
531 | for_each_online_cpu(cpu) | 569 | for_each_online_cpu(cpu) |
532 | update_timers(cpu); | 570 | update_timers(cpu); |
533 | preempt_enable(); | ||
534 | put_online_cpus(); | 571 | put_online_cpus(); |
535 | } | 572 | } |
536 | 573 | ||