diff options
| -rw-r--r-- | arch/x86/kernel/apic/hw_nmi.c | 58 |
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 8c3edfb89c2b..3b40082f0371 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
| @@ -9,74 +9,16 @@ | |||
| 9 | * | 9 | * |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <asm/apic.h> | ||
| 13 | #include <linux/smp.h> | ||
| 14 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
| 15 | #include <linux/sched.h> | ||
| 16 | #include <linux/percpu.h> | ||
| 17 | #include <linux/cpumask.h> | ||
| 18 | #include <linux/kernel_stat.h> | ||
| 19 | #include <asm/mce.h> | ||
| 20 | #include <linux/kdebug.h> | 13 | #include <linux/kdebug.h> |
| 21 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
| 22 | #include <linux/kprobes.h> | 15 | #include <linux/kprobes.h> |
| 23 | |||
| 24 | |||
| 25 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
| 26 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 27 | 18 | ||
| 28 | /* For reliability, we're prepared to waste bits here. */ | 19 | /* For reliability, we're prepared to waste bits here. */ |
| 29 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | 20 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
| 30 | 21 | ||
| 31 | static DEFINE_PER_CPU(unsigned, last_irq_sum); | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Take the local apic timer and PIT/HPET into account. We don't | ||
| 35 | * know which one is active, when we have highres/dyntick on | ||
| 36 | */ | ||
| 37 | static inline unsigned int get_timer_irqs(int cpu) | ||
| 38 | { | ||
| 39 | unsigned int irqs = per_cpu(irq_stat, cpu).irq0_irqs; | ||
| 40 | |||
| 41 | #if defined(CONFIG_X86_LOCAL_APIC) | ||
| 42 | irqs += per_cpu(irq_stat, cpu).apic_timer_irqs; | ||
| 43 | #endif | ||
| 44 | |||
| 45 | return irqs; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline int mce_in_progress(void) | ||
| 49 | { | ||
| 50 | #if defined(CONFIG_X86_MCE) | ||
| 51 | return atomic_read(&mce_entry) > 0; | ||
| 52 | #endif | ||
| 53 | return 0; | ||
| 54 | } | ||
| 55 | |||
| 56 | int hw_nmi_is_cpu_stuck(struct pt_regs *regs) | ||
| 57 | { | ||
| 58 | unsigned int sum; | ||
| 59 | int cpu = smp_processor_id(); | ||
| 60 | |||
| 61 | /* if we are doing an mce, just assume the cpu is not stuck */ | ||
| 62 | /* Could check oops_in_progress here too, but it's safer not to */ | ||
| 63 | if (mce_in_progress()) | ||
| 64 | return 0; | ||
| 65 | |||
| 66 | /* We determine if the cpu is stuck by checking whether any | ||
| 67 | * interrupts have happened since we last checked. Of course | ||
| 68 | * an nmi storm could create false positives, but the higher | ||
| 69 | * level logic should account for that | ||
| 70 | */ | ||
| 71 | sum = get_timer_irqs(cpu); | ||
| 72 | if (__get_cpu_var(last_irq_sum) == sum) { | ||
| 73 | return 1; | ||
| 74 | } else { | ||
| 75 | __get_cpu_var(last_irq_sum) = sum; | ||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | } | ||
| 79 | |||
| 80 | u64 hw_nmi_get_sample_period(void) | 22 | u64 hw_nmi_get_sample_period(void) |
| 81 | { | 23 | { |
| 82 | return (u64)(cpu_khz) * 1000 * 60; | 24 | return (u64)(cpu_khz) * 1000 * 60; |
