aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/nmi_64.c
diff options
context:
space:
mode:
authorCyrill Gorcunov <gorcunov@gmail.com>2008-05-24 11:36:41 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 16:32:51 -0400
commit1798bc22b2790bf2a956588e6b17c36ef79ceff7 (patch)
tree68dd3275aa52221fa4edeca4982f05be8e21a437 /arch/x86/kernel/nmi_64.c
parentfd5cea02de100197a4c26d9e103508cf09b50a82 (diff)
x86: nmi_32/64.c - merge down nmi_32.c and nmi_64.c to nmi.c
Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Cc: hpa@zytor.com Cc: mingo@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/nmi_64.c')
-rw-r--r--arch/x86/kernel/nmi_64.c477
1 files changed, 0 insertions, 477 deletions
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
deleted file mode 100644
index c6802447262c..000000000000
--- a/arch/x86/kernel/nmi_64.c
+++ /dev/null
@@ -1,477 +0,0 @@
1/*
2 * NMI watchdog support on APIC systems
3 *
4 * Started by Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Pavel Machek and
10 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
11 */
12
13#include <linux/nmi.h>
14#include <linux/mm.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/sysdev.h>
19#include <linux/sysctl.h>
20#include <linux/kprobes.h>
21#include <linux/cpumask.h>
22#include <linux/kdebug.h>
23
24#include <asm/smp.h>
25#include <asm/nmi.h>
26#include <asm/proto.h>
27#include <asm/mce.h>
28
29#include <mach_traps.h>
30
31int unknown_nmi_panic;
32int nmi_watchdog_enabled;
33
34static cpumask_t backtrace_mask = CPU_MASK_NONE;
35
36/* nmi_active:
37 * >0: the lapic NMI watchdog is active, but can be disabled
38 * <0: the lapic NMI watchdog has not been set up, and cannot
39 * be enabled
40 * 0: the lapic NMI watchdog is disabled, but can be enabled
41 */
42atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
43static int panic_on_timeout;
44
45unsigned int nmi_watchdog = NMI_DEFAULT;
46static unsigned int nmi_hz = HZ;
47
48static DEFINE_PER_CPU(short, wd_enabled);
49
50static int endflag __initdata = 0;
51
52static inline unsigned int get_nmi_count(int cpu)
53{
54 return cpu_pda(cpu)->__nmi_count;
55}
56
57static inline int mce_in_progress(void)
58{
59#ifdef CONFIG_X86_MCE
60 return atomic_read(&mce_entry) > 0;
61#endif
62 return 0;
63}
64
65/*
66 * Take the local apic timer and PIT/HPET into account. We don't
67 * know which one is active, when we have highres/dyntick on
68 */
69static inline unsigned int get_timer_irqs(int cpu)
70{
71 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
72}
73
74/* Run after command line and cpu_init init, but before all other checks */
75void nmi_watchdog_default(void)
76{
77 if (nmi_watchdog != NMI_DEFAULT)
78 return;
79 nmi_watchdog = NMI_NONE;
80}
81
82#ifdef CONFIG_SMP
83/* The performance counters used by NMI_LOCAL_APIC don't trigger when
84 * the CPU is idle. To make sure the NMI watchdog really ticks on all
85 * CPUs during the test make them busy.
86 */
87static __init void nmi_cpu_busy(void *data)
88{
89 local_irq_enable_in_hardirq();
90 /* Intentionally don't use cpu_relax here. This is
91 to make sure that the performance counter really ticks,
92 even if there is a simulator or similar that catches the
93 pause instruction. On a real HT machine this is fine because
94 all other CPUs are busy with "useless" delay loops and don't
95 care if they get somewhat less cycles. */
96 while (endflag == 0)
97 mb();
98}
99#endif
100
101int __init check_nmi_watchdog(void)
102{
103 unsigned int *prev_nmi_count;
104 int cpu;
105
106 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
107 return 0;
108
109 if (!atomic_read(&nmi_active))
110 return 0;
111
112 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
113 if (!prev_nmi_count)
114 return -1;
115
116 printk(KERN_INFO "Testing NMI watchdog ... ");
117
118#ifdef CONFIG_SMP
119 if (nmi_watchdog == NMI_LOCAL_APIC)
120 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
121#endif
122
123 for_each_possible_cpu(cpu)
124 prev_nmi_count[cpu] = get_nmi_count(cpu);
125 local_irq_enable();
126 mdelay((20*1000)/nmi_hz); // wait 20 ticks
127
128 for_each_online_cpu(cpu) {
129 if (!per_cpu(wd_enabled, cpu))
130 continue;
131 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
132 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
133 "appears to be stuck (%d->%d)!\n",
134 cpu,
135 prev_nmi_count[cpu],
136 get_nmi_count(cpu));
137 per_cpu(wd_enabled, cpu) = 0;
138 atomic_dec(&nmi_active);
139 }
140 }
141 endflag = 1;
142 if (!atomic_read(&nmi_active)) {
143 kfree(prev_nmi_count);
144 atomic_set(&nmi_active, -1);
145 return -1;
146 }
147 printk("OK.\n");
148
149 /* now that we know it works we can reduce NMI frequency to
150 something more reasonable; makes a difference in some configs */
151 if (nmi_watchdog == NMI_LOCAL_APIC)
152 nmi_hz = lapic_adjust_nmi_hz(1);
153
154 kfree(prev_nmi_count);
155 return 0;
156}
157
158static int __init setup_nmi_watchdog(char *str)
159{
160 int nmi;
161
162 if (!strncmp(str,"panic",5)) {
163 panic_on_timeout = 1;
164 str = strchr(str, ',');
165 if (!str)
166 return 1;
167 ++str;
168 }
169
170 get_option(&str, &nmi);
171
172 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
173 return 0;
174
175 nmi_watchdog = nmi;
176 return 1;
177}
178
179__setup("nmi_watchdog=", setup_nmi_watchdog);
180
181#ifdef CONFIG_PM
182
183static int nmi_pm_active; /* nmi_active before suspend */
184
185static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
186{
187 /* only CPU0 goes here, other CPUs should be offline */
188 nmi_pm_active = atomic_read(&nmi_active);
189 stop_apic_nmi_watchdog(NULL);
190 BUG_ON(atomic_read(&nmi_active) != 0);
191 return 0;
192}
193
194static int lapic_nmi_resume(struct sys_device *dev)
195{
196 /* only CPU0 goes here, other CPUs should be offline */
197 if (nmi_pm_active > 0) {
198 setup_apic_nmi_watchdog(NULL);
199 touch_nmi_watchdog();
200 }
201 return 0;
202}
203
204static struct sysdev_class nmi_sysclass = {
205 .name = "lapic_nmi",
206 .resume = lapic_nmi_resume,
207 .suspend = lapic_nmi_suspend,
208};
209
210static struct sys_device device_lapic_nmi = {
211 .id = 0,
212 .cls = &nmi_sysclass,
213};
214
215static int __init init_lapic_nmi_sysfs(void)
216{
217 int error;
218
219 /* should really be a BUG_ON but b/c this is an
220 * init call, it just doesn't work. -dcz
221 */
222 if (nmi_watchdog != NMI_LOCAL_APIC)
223 return 0;
224
225 if (atomic_read(&nmi_active) < 0)
226 return 0;
227
228 error = sysdev_class_register(&nmi_sysclass);
229 if (!error)
230 error = sysdev_register(&device_lapic_nmi);
231 return error;
232}
233/* must come after the local APIC's device_initcall() */
234late_initcall(init_lapic_nmi_sysfs);
235
236#endif /* CONFIG_PM */
237
238static void __acpi_nmi_enable(void *__unused)
239{
240 apic_write_around(APIC_LVT0, APIC_DM_NMI);
241}
242
243/*
244 * Enable timer based NMIs on all CPUs:
245 */
246void acpi_nmi_enable(void)
247{
248 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
249 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
250}
251
252static void __acpi_nmi_disable(void *__unused)
253{
254 apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
255}
256
257/*
258 * Disable timer based NMIs on all CPUs:
259 */
260void acpi_nmi_disable(void)
261{
262 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
263 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
264}
265
266void setup_apic_nmi_watchdog(void *unused)
267{
268 if (__get_cpu_var(wd_enabled))
269 return;
270
271 /* cheap hack to support suspend/resume */
272 /* if cpu0 is not active neither should the other cpus */
273 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
274 return;
275
276 switch (nmi_watchdog) {
277 case NMI_LOCAL_APIC:
278 __get_cpu_var(wd_enabled) = 1;
279 if (lapic_watchdog_init(nmi_hz) < 0) {
280 __get_cpu_var(wd_enabled) = 0;
281 return;
282 }
283 /* FALL THROUGH */
284 case NMI_IO_APIC:
285 __get_cpu_var(wd_enabled) = 1;
286 atomic_inc(&nmi_active);
287 }
288}
289
290void stop_apic_nmi_watchdog(void *unused)
291{
292 /* only support LOCAL and IO APICs for now */
293 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
294 (nmi_watchdog != NMI_IO_APIC))
295 return;
296 if (__get_cpu_var(wd_enabled) == 0)
297 return;
298 if (nmi_watchdog == NMI_LOCAL_APIC)
299 lapic_watchdog_stop();
300 __get_cpu_var(wd_enabled) = 0;
301 atomic_dec(&nmi_active);
302}
303
304/*
305 * the best way to detect whether a CPU has a 'hard lockup' problem
306 * is to check it's local APIC timer IRQ counts. If they are not
307 * changing then that CPU has some problem.
308 *
309 * as these watchdog NMI IRQs are generated on every CPU, we only
310 * have to check the current processor.
311 */
312
313static DEFINE_PER_CPU(unsigned, last_irq_sum);
314static DEFINE_PER_CPU(local_t, alert_counter);
315static DEFINE_PER_CPU(int, nmi_touch);
316
317void touch_nmi_watchdog(void)
318{
319 if (nmi_watchdog > 0) {
320 unsigned cpu;
321
322 /*
323 * Tell other CPUs to reset their alert counters. We cannot
324 * do it ourselves because the alert count increase is not
325 * atomic.
326 */
327 for_each_present_cpu(cpu) {
328 if (per_cpu(nmi_touch, cpu) != 1)
329 per_cpu(nmi_touch, cpu) = 1;
330 }
331 }
332
333 touch_softlockup_watchdog();
334}
335EXPORT_SYMBOL(touch_nmi_watchdog);
336
337notrace __kprobes int
338nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
339{
340 unsigned int sum;
341 int touched = 0;
342 int cpu = smp_processor_id();
343 int rc = 0;
344
345 /* check for other users first */
346 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
347 == NOTIFY_STOP) {
348 rc = 1;
349 touched = 1;
350 }
351
352 sum = get_timer_irqs(cpu);
353
354 if (__get_cpu_var(nmi_touch)) {
355 __get_cpu_var(nmi_touch) = 0;
356 touched = 1;
357 }
358
359 if (cpu_isset(cpu, backtrace_mask)) {
360 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
361
362 spin_lock(&lock);
363 printk("NMI backtrace for cpu %d\n", cpu);
364 dump_stack();
365 spin_unlock(&lock);
366 cpu_clear(cpu, backtrace_mask);
367 }
368
369 if (mce_in_progress())
370 touched = 1;
371
372 /* if the apic timer isn't firing, this cpu isn't doing much */
373 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
374 /*
375 * Ayiee, looks like this CPU is stuck ...
376 * wait a few IRQs (5 seconds) before doing the oops ...
377 */
378 local_inc(&__get_cpu_var(alert_counter));
379 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
380 die_nmi("NMI Watchdog detected LOCKUP",
381 regs, panic_on_timeout);
382 } else {
383 __get_cpu_var(last_irq_sum) = sum;
384 local_set(&__get_cpu_var(alert_counter), 0);
385 }
386
387 /* see if the nmi watchdog went off */
388 if (!__get_cpu_var(wd_enabled))
389 return rc;
390 switch (nmi_watchdog) {
391 case NMI_LOCAL_APIC:
392 rc |= lapic_wd_event(nmi_hz);
393 break;
394 case NMI_IO_APIC:
395 /* don't know how to accurately check for this.
396 * just assume it was a watchdog timer interrupt
397 * This matches the old behaviour.
398 */
399 rc = 1;
400 break;
401 }
402 return rc;
403}
404
405#ifdef CONFIG_SYSCTL
406
407static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
408{
409 unsigned char reason = get_nmi_reason();
410 char buf[64];
411
412 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
413 die_nmi(buf, regs, 1); /* Always panic here */
414 return 0;
415}
416
417/*
418 * proc handler for /proc/sys/kernel/nmi
419 */
420int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
421 void __user *buffer, size_t *length, loff_t *ppos)
422{
423 int old_state;
424
425 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
426 old_state = nmi_watchdog_enabled;
427 proc_dointvec(table, write, file, buffer, length, ppos);
428 if (!!old_state == !!nmi_watchdog_enabled)
429 return 0;
430
431 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
432 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
433 return -EIO;
434 }
435
436 /* if nmi_watchdog is not set yet, then set it */
437 nmi_watchdog_default();
438
439 if (nmi_watchdog == NMI_LOCAL_APIC) {
440 if (nmi_watchdog_enabled)
441 enable_lapic_nmi_watchdog();
442 else
443 disable_lapic_nmi_watchdog();
444 } else {
445 printk( KERN_WARNING
446 "NMI watchdog doesn't know what hardware to touch\n");
447 return -EIO;
448 }
449 return 0;
450}
451
452#endif
453
454int do_nmi_callback(struct pt_regs *regs, int cpu)
455{
456#ifdef CONFIG_SYSCTL
457 if (unknown_nmi_panic)
458 return unknown_nmi_panic_callback(regs, cpu);
459#endif
460 return 0;
461}
462
463void __trigger_all_cpu_backtrace(void)
464{
465 int i;
466
467 backtrace_mask = cpu_online_map;
468 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
469 for (i = 0; i < 10 * 1000; i++) {
470 if (cpus_empty(backtrace_mask))
471 break;
472 mdelay(1);
473 }
474}
475
476EXPORT_SYMBOL(nmi_active);
477EXPORT_SYMBOL(nmi_watchdog);