aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/nmi_64.c
diff options
context:
space:
mode:
authorCyrill Gorcunov <gorcunov@gmail.com>2008-05-24 11:36:40 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 16:32:51 -0400
commitfd5cea02de100197a4c26d9e103508cf09b50a82 (patch)
tree92f56b9deab1524fb09d1ab4eef7a4d0520031ba /arch/x86/kernel/nmi_64.c
parent7c2ba83f9a479eee6f302147767a30f3187fbd4b (diff)
x86: nmi_32/64.c - add helper functions to hide arch specific data
Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Cc: hpa@zytor.com Cc: mingo@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/nmi_64.c')
-rw-r--r--arch/x86/kernel/nmi_64.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index d98b21cd4928..c6802447262c 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -47,6 +47,30 @@ static unsigned int nmi_hz = HZ;
47 47
48static DEFINE_PER_CPU(short, wd_enabled); 48static DEFINE_PER_CPU(short, wd_enabled);
49 49
50static int endflag __initdata = 0;
51
52static inline unsigned int get_nmi_count(int cpu)
53{
54 return cpu_pda(cpu)->__nmi_count;
55}
56
57static inline int mce_in_progress(void)
58{
59#ifdef CONFIG_X86_MCE
60 return atomic_read(&mce_entry) > 0;
61#endif
62 return 0;
63}
64
65/*
66 * Take the local apic timer and PIT/HPET into account. We don't
67 * know which one is active, when we have highres/dyntick on
68 */
69static inline unsigned int get_timer_irqs(int cpu)
70{
71 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
72}
73
50/* Run after command line and cpu_init init, but before all other checks */ 74/* Run after command line and cpu_init init, but before all other checks */
51void nmi_watchdog_default(void) 75void nmi_watchdog_default(void)
52{ 76{
@@ -55,8 +79,6 @@ void nmi_watchdog_default(void)
55 nmi_watchdog = NMI_NONE; 79 nmi_watchdog = NMI_NONE;
56} 80}
57 81
58static int endflag __initdata = 0;
59
60#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
61/* The performance counters used by NMI_LOCAL_APIC don't trigger when 83/* The performance counters used by NMI_LOCAL_APIC don't trigger when
62 * the CPU is idle. To make sure the NMI watchdog really ticks on all 84 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -99,19 +121,19 @@ int __init check_nmi_watchdog(void)
99#endif 121#endif
100 122
101 for_each_possible_cpu(cpu) 123 for_each_possible_cpu(cpu)
102 prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; 124 prev_nmi_count[cpu] = get_nmi_count(cpu);
103 local_irq_enable(); 125 local_irq_enable();
104 mdelay((20*1000)/nmi_hz); // wait 20 ticks 126 mdelay((20*1000)/nmi_hz); // wait 20 ticks
105 127
106 for_each_online_cpu(cpu) { 128 for_each_online_cpu(cpu) {
107 if (!per_cpu(wd_enabled, cpu)) 129 if (!per_cpu(wd_enabled, cpu))
108 continue; 130 continue;
109 if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { 131 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
110 printk(KERN_WARNING "WARNING: CPU#%d: NMI " 132 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
111 "appears to be stuck (%d->%d)!\n", 133 "appears to be stuck (%d->%d)!\n",
112 cpu, 134 cpu,
113 prev_nmi_count[cpu], 135 prev_nmi_count[cpu],
114 cpu_pda(cpu)->__nmi_count); 136 get_nmi_count(cpu));
115 per_cpu(wd_enabled, cpu) = 0; 137 per_cpu(wd_enabled, cpu) = 0;
116 atomic_dec(&nmi_active); 138 atomic_dec(&nmi_active);
117 } 139 }
@@ -327,7 +349,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
327 touched = 1; 349 touched = 1;
328 } 350 }
329 351
330 sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs); 352 sum = get_timer_irqs(cpu);
353
331 if (__get_cpu_var(nmi_touch)) { 354 if (__get_cpu_var(nmi_touch)) {
332 __get_cpu_var(nmi_touch) = 0; 355 __get_cpu_var(nmi_touch) = 0;
333 touched = 1; 356 touched = 1;
@@ -343,12 +366,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
343 cpu_clear(cpu, backtrace_mask); 366 cpu_clear(cpu, backtrace_mask);
344 } 367 }
345 368
346#ifdef CONFIG_X86_MCE 369 if (mce_in_progress())
347 /* Could check oops_in_progress here too, but it's safer
348 not too */
349 if (atomic_read(&mce_entry) > 0)
350 touched = 1; 370 touched = 1;
351#endif 371
352 /* if the apic timer isn't firing, this cpu isn't doing much */ 372 /* if the apic timer isn't firing, this cpu isn't doing much */
353 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 373 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
354 /* 374 /*