aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/hw_nmi.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/apic/hw_nmi.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/apic/hw_nmi.c')
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index cefd6942f0e9..d5e57db0f7be 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -16,20 +16,33 @@
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/delay.h>
19 20
21#ifdef CONFIG_HARDLOCKUP_DETECTOR
22u64 hw_nmi_get_sample_period(int watchdog_thresh)
23{
24 return (u64)(cpu_khz) * 1000 * watchdog_thresh;
25}
26#endif
27
28#ifdef arch_trigger_all_cpu_backtrace
20/* For reliability, we're prepared to waste bits here. */ 29/* For reliability, we're prepared to waste bits here. */
21static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; 30static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
22 31
23u64 hw_nmi_get_sample_period(void) 32/* "in progress" flag of arch_trigger_all_cpu_backtrace */
24{ 33static unsigned long backtrace_flag;
25 return (u64)(cpu_khz) * 1000 * 60;
26}
27 34
28#ifdef ARCH_HAS_NMI_WATCHDOG
29void arch_trigger_all_cpu_backtrace(void) 35void arch_trigger_all_cpu_backtrace(void)
30{ 36{
31 int i; 37 int i;
32 38
39 if (test_and_set_bit(0, &backtrace_flag))
40 /*
41 * If there is already a trigger_all_cpu_backtrace() in progress
42 * (backtrace_flag == 1), don't output double cpu dump infos.
43 */
44 return;
45
33 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 46 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
34 47
35 printk(KERN_INFO "sending NMI to all CPUs:\n"); 48 printk(KERN_INFO "sending NMI to all CPUs:\n");
@@ -41,6 +54,9 @@ void arch_trigger_all_cpu_backtrace(void)
41 break; 54 break;
42 mdelay(1); 55 mdelay(1);
43 } 56 }
57
58 clear_bit(0, &backtrace_flag);
59 smp_mb__after_clear_bit();
44} 60}
45 61
46static int __kprobes 62static int __kprobes
@@ -49,11 +65,10 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
49{ 65{
50 struct die_args *args = __args; 66 struct die_args *args = __args;
51 struct pt_regs *regs; 67 struct pt_regs *regs;
52 int cpu = smp_processor_id(); 68 int cpu;
53 69
54 switch (cmd) { 70 switch (cmd) {
55 case DIE_NMI: 71 case DIE_NMI:
56 case DIE_NMI_IPI:
57 break; 72 break;
58 73
59 default: 74 default:
@@ -61,6 +76,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
61 } 76 }
62 77
63 regs = args->regs; 78 regs = args->regs;
79 cpu = smp_processor_id();
64 80
65 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 81 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
66 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 82 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -68,7 +84,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
68 arch_spin_lock(&lock); 84 arch_spin_lock(&lock);
69 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 85 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
70 show_regs(regs); 86 show_regs(regs);
71 dump_stack();
72 arch_spin_unlock(&lock); 87 arch_spin_unlock(&lock);
73 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 88 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
74 return NOTIFY_STOP; 89 return NOTIFY_STOP;
@@ -80,7 +95,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
80static __read_mostly struct notifier_block backtrace_notifier = { 95static __read_mostly struct notifier_block backtrace_notifier = {
81 .notifier_call = arch_trigger_all_cpu_backtrace_handler, 96 .notifier_call = arch_trigger_all_cpu_backtrace_handler,
82 .next = NULL, 97 .next = NULL,
83 .priority = 1 98 .priority = NMI_LOCAL_LOW_PRIOR,
84}; 99};
85 100
86static int __init register_trigger_all_cpu_backtrace(void) 101static int __init register_trigger_all_cpu_backtrace(void)
@@ -90,18 +105,3 @@ static int __init register_trigger_all_cpu_backtrace(void)
90} 105}
91early_initcall(register_trigger_all_cpu_backtrace); 106early_initcall(register_trigger_all_cpu_backtrace);
92#endif 107#endif
93
94/* STUB calls to mimic old nmi_watchdog behaviour */
95#if defined(CONFIG_X86_LOCAL_APIC)
96unsigned int nmi_watchdog = NMI_NONE;
97EXPORT_SYMBOL(nmi_watchdog);
98void acpi_nmi_enable(void) { return; }
99void acpi_nmi_disable(void) { return; }
100#endif
101atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
102EXPORT_SYMBOL(nmi_active);
103int unknown_nmi_panic;
104void cpu_nmi_set_wd_enabled(void) { return; }
105void stop_apic_nmi_watchdog(void *unused) { return; }
106void setup_apic_nmi_watchdog(void *unused) { return; }
107int __init check_nmi_watchdog(void) { return 0; }