diff options
-rw-r--r-- | kernel/watchdog.c | 35 |
1 files changed, 17 insertions, 18 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e53622c1465e..91b0b26adc67 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -115,7 +115,7 @@ static unsigned long get_sample_period(void) | |||
115 | /* Commands for resetting the watchdog */ | 115 | /* Commands for resetting the watchdog */ |
116 | static void __touch_watchdog(void) | 116 | static void __touch_watchdog(void) |
117 | { | 117 | { |
118 | int this_cpu = raw_smp_processor_id(); | 118 | int this_cpu = smp_processor_id(); |
119 | 119 | ||
120 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); | 120 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); |
121 | } | 121 | } |
@@ -157,21 +157,21 @@ void touch_softlockup_watchdog_sync(void) | |||
157 | 157 | ||
158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
159 | /* watchdog detector functions */ | 159 | /* watchdog detector functions */ |
160 | static int is_hardlockup(int cpu) | 160 | static int is_hardlockup(void) |
161 | { | 161 | { |
162 | unsigned long hrint = per_cpu(hrtimer_interrupts, cpu); | 162 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); |
163 | 163 | ||
164 | if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) | 164 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) |
165 | return 1; | 165 | return 1; |
166 | 166 | ||
167 | per_cpu(hrtimer_interrupts_saved, cpu) = hrint; | 167 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; |
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | static int is_softlockup(unsigned long touch_ts, int cpu) | 172 | static int is_softlockup(unsigned long touch_ts) |
173 | { | 173 | { |
174 | unsigned long now = get_timestamp(cpu); | 174 | unsigned long now = get_timestamp(smp_processor_id()); |
175 | 175 | ||
176 | /* Warn about unreasonable delays: */ | 176 | /* Warn about unreasonable delays: */ |
177 | if (time_after(now, touch_ts + softlockup_thresh)) | 177 | if (time_after(now, touch_ts + softlockup_thresh)) |
@@ -206,8 +206,6 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
206 | struct perf_sample_data *data, | 206 | struct perf_sample_data *data, |
207 | struct pt_regs *regs) | 207 | struct pt_regs *regs) |
208 | { | 208 | { |
209 | int this_cpu = smp_processor_id(); | ||
210 | |||
211 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | 209 | if (__get_cpu_var(watchdog_nmi_touch) == true) { |
212 | __get_cpu_var(watchdog_nmi_touch) = false; | 210 | __get_cpu_var(watchdog_nmi_touch) = false; |
213 | return; | 211 | return; |
@@ -219,7 +217,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
219 | * fired multiple times before we overflow'd. If it hasn't | 217 | * fired multiple times before we overflow'd. If it hasn't |
220 | * then this is a good indication the cpu is stuck | 218 | * then this is a good indication the cpu is stuck |
221 | */ | 219 | */ |
222 | if (is_hardlockup(this_cpu)) { | 220 | if (is_hardlockup()) { |
221 | int this_cpu = smp_processor_id(); | ||
222 | |||
223 | /* only print hardlockups once */ | 223 | /* only print hardlockups once */ |
224 | if (__get_cpu_var(hard_watchdog_warn) == true) | 224 | if (__get_cpu_var(hard_watchdog_warn) == true) |
225 | return; | 225 | return; |
@@ -247,7 +247,6 @@ static inline void watchdog_interrupt_count(void) { return; } | |||
247 | /* watchdog kicker functions */ | 247 | /* watchdog kicker functions */ |
248 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 248 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
249 | { | 249 | { |
250 | int this_cpu = smp_processor_id(); | ||
251 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); | 250 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); |
252 | struct pt_regs *regs = get_irq_regs(); | 251 | struct pt_regs *regs = get_irq_regs(); |
253 | int duration; | 252 | int duration; |
@@ -262,12 +261,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
262 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | 261 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
263 | 262 | ||
264 | if (touch_ts == 0) { | 263 | if (touch_ts == 0) { |
265 | if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) { | 264 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { |
266 | /* | 265 | /* |
267 | * If the time stamp was touched atomically | 266 | * If the time stamp was touched atomically |
268 | * make sure the scheduler tick is up to date. | 267 | * make sure the scheduler tick is up to date. |
269 | */ | 268 | */ |
270 | per_cpu(softlockup_touch_sync, this_cpu) = false; | 269 | __get_cpu_var(softlockup_touch_sync) = false; |
271 | sched_clock_tick(); | 270 | sched_clock_tick(); |
272 | } | 271 | } |
273 | __touch_watchdog(); | 272 | __touch_watchdog(); |
@@ -280,14 +279,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
280 | * indicate it is getting cpu time. If it hasn't then | 279 | * indicate it is getting cpu time. If it hasn't then |
281 | * this is a good indication some task is hogging the cpu | 280 | * this is a good indication some task is hogging the cpu |
282 | */ | 281 | */ |
283 | duration = is_softlockup(touch_ts, this_cpu); | 282 | duration = is_softlockup(touch_ts); |
284 | if (unlikely(duration)) { | 283 | if (unlikely(duration)) { |
285 | /* only warn once */ | 284 | /* only warn once */ |
286 | if (__get_cpu_var(soft_watchdog_warn) == true) | 285 | if (__get_cpu_var(soft_watchdog_warn) == true) |
287 | return HRTIMER_RESTART; | 286 | return HRTIMER_RESTART; |
288 | 287 | ||
289 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 288 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
290 | this_cpu, duration, | 289 | smp_processor_id(), duration, |
291 | current->comm, task_pid_nr(current)); | 290 | current->comm, task_pid_nr(current)); |
292 | print_modules(); | 291 | print_modules(); |
293 | print_irqtrace_events(current); | 292 | print_irqtrace_events(current); |
@@ -309,10 +308,10 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
309 | /* | 308 | /* |
310 | * The watchdog thread - touches the timestamp. | 309 | * The watchdog thread - touches the timestamp. |
311 | */ | 310 | */ |
312 | static int watchdog(void *__bind_cpu) | 311 | static int watchdog(void *unused) |
313 | { | 312 | { |
314 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 313 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
315 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu); | 314 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
316 | 315 | ||
317 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 316 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
318 | 317 | ||
@@ -328,7 +327,7 @@ static int watchdog(void *__bind_cpu) | |||
328 | /* | 327 | /* |
329 | * Run briefly once per second to reset the softlockup timestamp. | 328 | * Run briefly once per second to reset the softlockup timestamp. |
330 | * If this gets delayed for more than 60 seconds then the | 329 | * If this gets delayed for more than 60 seconds then the |
331 | * debug-printout triggers in softlockup_tick(). | 330 | * debug-printout triggers in watchdog_timer_fn(). |
332 | */ | 331 | */ |
333 | while (!kthread_should_stop()) { | 332 | while (!kthread_should_stop()) { |
334 | __touch_watchdog(); | 333 | __touch_watchdog(); |