aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softlockup.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r--kernel/softlockup.c48
1 files changed, 41 insertions, 7 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 01b6522fd92b..6b682d86bddf 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -25,7 +25,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp);
25static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 25static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
26 26
27static int __read_mostly did_panic; 27static int __read_mostly did_panic;
28unsigned long __read_mostly softlockup_thresh = 60; 28int __read_mostly softlockup_thresh = 60;
29
30/*
31 * Should we panic (and reboot, if panic_timeout= is set) when a
32 * soft-lockup occurs:
33 */
34unsigned int __read_mostly softlockup_panic =
35 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
36
37static int __init softlockup_panic_setup(char *str)
38{
39 softlockup_panic = simple_strtoul(str, NULL, 0);
40
41 return 1;
42}
43__setup("softlockup_panic=", softlockup_panic_setup);
29 44
30static int 45static int
31softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) 46softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -49,12 +64,17 @@ static unsigned long get_timestamp(int this_cpu)
49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 64 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
50} 65}
51 66
52void touch_softlockup_watchdog(void) 67static void __touch_softlockup_watchdog(void)
53{ 68{
54 int this_cpu = raw_smp_processor_id(); 69 int this_cpu = raw_smp_processor_id();
55 70
56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 71 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
57} 72}
73
74void touch_softlockup_watchdog(void)
75{
76 __raw_get_cpu_var(touch_timestamp) = 0;
77}
58EXPORT_SYMBOL(touch_softlockup_watchdog); 78EXPORT_SYMBOL(touch_softlockup_watchdog);
59 79
60void touch_all_softlockup_watchdogs(void) 80void touch_all_softlockup_watchdogs(void)
@@ -79,8 +99,16 @@ void softlockup_tick(void)
79 struct pt_regs *regs = get_irq_regs(); 99 struct pt_regs *regs = get_irq_regs();
80 unsigned long now; 100 unsigned long now;
81 101
102 /* Is detection switched off? */
103 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
104 /* Be sure we don't false trigger if switched back on */
105 if (touch_timestamp)
106 per_cpu(touch_timestamp, this_cpu) = 0;
107 return;
108 }
109
82 if (touch_timestamp == 0) { 110 if (touch_timestamp == 0) {
83 touch_softlockup_watchdog(); 111 __touch_softlockup_watchdog();
84 return; 112 return;
85 } 113 }
86 114
@@ -89,13 +117,13 @@ void softlockup_tick(void)
89 /* report at most once a second */ 117 /* report at most once a second */
90 if ((print_timestamp >= touch_timestamp && 118 if ((print_timestamp >= touch_timestamp &&
91 print_timestamp < (touch_timestamp + 1)) || 119 print_timestamp < (touch_timestamp + 1)) ||
92 did_panic || !per_cpu(watchdog_task, this_cpu)) { 120 did_panic) {
93 return; 121 return;
94 } 122 }
95 123
96 /* do not print during early bootup: */ 124 /* do not print during early bootup: */
97 if (unlikely(system_state != SYSTEM_RUNNING)) { 125 if (unlikely(system_state != SYSTEM_RUNNING)) {
98 touch_softlockup_watchdog(); 126 __touch_softlockup_watchdog();
99 return; 127 return;
100 } 128 }
101 129
@@ -120,6 +148,9 @@ void softlockup_tick(void)
120 else 148 else
121 dump_stack(); 149 dump_stack();
122 spin_unlock(&print_lock); 150 spin_unlock(&print_lock);
151
152 if (softlockup_panic)
153 panic("softlockup: hung tasks");
123} 154}
124 155
125/* 156/*
@@ -172,6 +203,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
172 203
173 t->last_switch_timestamp = now; 204 t->last_switch_timestamp = now;
174 touch_nmi_watchdog(); 205 touch_nmi_watchdog();
206
207 if (softlockup_panic)
208 panic("softlockup: blocked tasks");
175} 209}
176 210
177/* 211/*
@@ -214,7 +248,7 @@ static int watchdog(void *__bind_cpu)
214 sched_setscheduler(current, SCHED_FIFO, &param); 248 sched_setscheduler(current, SCHED_FIFO, &param);
215 249
216 /* initialize timestamp */ 250 /* initialize timestamp */
217 touch_softlockup_watchdog(); 251 __touch_softlockup_watchdog();
218 252
219 set_current_state(TASK_INTERRUPTIBLE); 253 set_current_state(TASK_INTERRUPTIBLE);
220 /* 254 /*
@@ -223,7 +257,7 @@ static int watchdog(void *__bind_cpu)
223 * debug-printout triggers in softlockup_tick(). 257 * debug-printout triggers in softlockup_tick().
224 */ 258 */
225 while (!kthread_should_stop()) { 259 while (!kthread_should_stop()) {
226 touch_softlockup_watchdog(); 260 __touch_softlockup_watchdog();
227 schedule(); 261 schedule();
228 262
229 if (kthread_should_stop()) 263 if (kthread_should_stop())