diff options
author | Cyrill Gorcunov <gorcunov@gmail.com> | 2008-05-24 11:36:40 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-25 16:32:51 -0400 |
commit | fd5cea02de100197a4c26d9e103508cf09b50a82 (patch) | |
tree | 92f56b9deab1524fb09d1ab4eef7a4d0520031ba /arch/x86/kernel/nmi_32.c | |
parent | 7c2ba83f9a479eee6f302147767a30f3187fbd4b (diff) |
x86: nmi_32/64.c - add helper functions to hide arch specific data
Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: hpa@zytor.com
Cc: mingo@redhat.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/nmi_32.c')
-rw-r--r-- | arch/x86/kernel/nmi_32.c | 44 |
1 files changed, 32 insertions, 12 deletions
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index c6d0777aaf90..a94dbedf2b74 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -51,6 +51,26 @@ static DEFINE_PER_CPU(short, wd_enabled); | |||
51 | 51 | ||
52 | static int endflag __initdata = 0; | 52 | static int endflag __initdata = 0; |
53 | 53 | ||
54 | static inline unsigned int get_nmi_count(int cpu) | ||
55 | { | ||
56 | return nmi_count(cpu); | ||
57 | } | ||
58 | |||
59 | static inline int mce_in_progress(void) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Take the local apic timer and PIT/HPET into account. We don't | ||
66 | * know which one is active, when we have highres/dyntick on | ||
67 | */ | ||
68 | static inline unsigned int get_timer_irqs(int cpu) | ||
69 | { | ||
70 | return per_cpu(irq_stat, cpu).apic_timer_irqs + | ||
71 | per_cpu(irq_stat, cpu).irq0_irqs; | ||
72 | } | ||
73 | |||
54 | /* Run after command line and cpu_init init, but before all other checks */ | 74 | /* Run after command line and cpu_init init, but before all other checks */ |
55 | void nmi_watchdog_default(void) | 75 | void nmi_watchdog_default(void) |
56 | { | 76 | { |
@@ -104,19 +124,19 @@ int __init check_nmi_watchdog(void) | |||
104 | #endif | 124 | #endif |
105 | 125 | ||
106 | for_each_possible_cpu(cpu) | 126 | for_each_possible_cpu(cpu) |
107 | prev_nmi_count[cpu] = nmi_count(cpu); | 127 | prev_nmi_count[cpu] = get_nmi_count(cpu); |
108 | local_irq_enable(); | 128 | local_irq_enable(); |
109 | mdelay((20*1000)/nmi_hz); // wait 20 ticks | 129 | mdelay((20*1000)/nmi_hz); // wait 20 ticks |
110 | 130 | ||
111 | for_each_online_cpu(cpu) { | 131 | for_each_online_cpu(cpu) { |
112 | if (!per_cpu(wd_enabled, cpu)) | 132 | if (!per_cpu(wd_enabled, cpu)) |
113 | continue; | 133 | continue; |
114 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { | 134 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { |
115 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " | 135 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " |
116 | "appears to be stuck (%d->%d)!\n", | 136 | "appears to be stuck (%d->%d)!\n", |
117 | cpu, | 137 | cpu, |
118 | prev_nmi_count[cpu], | 138 | prev_nmi_count[cpu], |
119 | nmi_count(cpu)); | 139 | get_nmi_count(cpu)); |
120 | per_cpu(wd_enabled, cpu) = 0; | 140 | per_cpu(wd_enabled, cpu) = 0; |
121 | atomic_dec(&nmi_active); | 141 | atomic_dec(&nmi_active); |
122 | } | 142 | } |
@@ -355,6 +375,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
355 | touched = 1; | 375 | touched = 1; |
356 | } | 376 | } |
357 | 377 | ||
378 | sum = get_timer_irqs(cpu); | ||
379 | |||
380 | if (__get_cpu_var(nmi_touch)) { | ||
381 | __get_cpu_var(nmi_touch) = 0; | ||
382 | touched = 1; | ||
383 | } | ||
384 | |||
358 | if (cpu_isset(cpu, backtrace_mask)) { | 385 | if (cpu_isset(cpu, backtrace_mask)) { |
359 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 386 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
360 | 387 | ||
@@ -365,16 +392,9 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
365 | cpu_clear(cpu, backtrace_mask); | 392 | cpu_clear(cpu, backtrace_mask); |
366 | } | 393 | } |
367 | 394 | ||
368 | /* | 395 | /* Could check oops_in_progress here too, but it's safer not to */ |
369 | * Take the local apic timer and PIT/HPET into account. We don't | 396 | if (mce_in_progress()) |
370 | * know which one is active, when we have highres/dyntick on | ||
371 | */ | ||
372 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs + | ||
373 | per_cpu(irq_stat, cpu).irq0_irqs; | ||
374 | if (__get_cpu_var(nmi_touch)) { | ||
375 | __get_cpu_var(nmi_touch) = 0; | ||
376 | touched = 1; | 397 | touched = 1; |
377 | } | ||
378 | 398 | ||
379 | /* if the none of the timers isn't firing, this cpu isn't doing much */ | 399 | /* if the none of the timers isn't firing, this cpu isn't doing much */ |
380 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 400 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |