aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c38
1 files changed, 33 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 7af9cb3e2d99..186aebbae32d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -12,14 +12,15 @@
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API. 12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 */ 13 */
14 14
15#include <linux/nmi.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/sysdev.h> 20#include <linux/sysdev.h>
20#include <linux/nmi.h>
21#include <linux/sysctl.h> 21#include <linux/sysctl.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cpumask.h>
23 24
24#include <asm/smp.h> 25#include <asm/smp.h>
25#include <asm/nmi.h> 26#include <asm/nmi.h>
@@ -41,6 +42,8 @@ int panic_on_unrecovered_nmi;
41static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); 42static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
42static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); 43static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
43 44
45static cpumask_t backtrace_mask = CPU_MASK_NONE;
46
44/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 47/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
45 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) 48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
46 */ 49 */
@@ -190,6 +193,8 @@ void nmi_watchdog_default(void)
190 nmi_watchdog = NMI_IO_APIC; 193 nmi_watchdog = NMI_IO_APIC;
191} 194}
192 195
196static int endflag __initdata = 0;
197
193#ifdef CONFIG_SMP 198#ifdef CONFIG_SMP
194/* The performance counters used by NMI_LOCAL_APIC don't trigger when 199/* The performance counters used by NMI_LOCAL_APIC don't trigger when
195 * the CPU is idle. To make sure the NMI watchdog really ticks on all 200 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -197,7 +202,6 @@ void nmi_watchdog_default(void)
197 */ 202 */
198static __init void nmi_cpu_busy(void *data) 203static __init void nmi_cpu_busy(void *data)
199{ 204{
200 volatile int *endflag = data;
201 local_irq_enable_in_hardirq(); 205 local_irq_enable_in_hardirq();
202 /* Intentionally don't use cpu_relax here. This is 206 /* Intentionally don't use cpu_relax here. This is
203 to make sure that the performance counter really ticks, 207 to make sure that the performance counter really ticks,
@@ -205,14 +209,13 @@ static __init void nmi_cpu_busy(void *data)
205 pause instruction. On a real HT machine this is fine because 209 pause instruction. On a real HT machine this is fine because
206 all other CPUs are busy with "useless" delay loops and don't 210 all other CPUs are busy with "useless" delay loops and don't
207 care if they get somewhat less cycles. */ 211 care if they get somewhat less cycles. */
208 while (*endflag == 0) 212 while (endflag == 0)
209 barrier(); 213 mb();
210} 214}
211#endif 215#endif
212 216
213int __init check_nmi_watchdog (void) 217int __init check_nmi_watchdog (void)
214{ 218{
215 volatile int endflag = 0;
216 int *counts; 219 int *counts;
217 int cpu; 220 int cpu;
218 221
@@ -253,6 +256,7 @@ int __init check_nmi_watchdog (void)
253 if (!atomic_read(&nmi_active)) { 256 if (!atomic_read(&nmi_active)) {
254 kfree(counts); 257 kfree(counts);
255 atomic_set(&nmi_active, -1); 258 atomic_set(&nmi_active, -1);
259 endflag = 1;
256 return -1; 260 return -1;
257 } 261 }
258 endflag = 1; 262 endflag = 1;
@@ -782,6 +786,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
782{ 786{
783 int sum; 787 int sum;
784 int touched = 0; 788 int touched = 0;
789 int cpu = smp_processor_id();
785 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 790 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
786 u64 dummy; 791 u64 dummy;
787 int rc=0; 792 int rc=0;
@@ -799,6 +804,16 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
799 touched = 1; 804 touched = 1;
800 } 805 }
801 806
807 if (cpu_isset(cpu, backtrace_mask)) {
808 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
809
810 spin_lock(&lock);
811 printk("NMI backtrace for cpu %d\n", cpu);
812 dump_stack();
813 spin_unlock(&lock);
814 cpu_clear(cpu, backtrace_mask);
815 }
816
802#ifdef CONFIG_X86_MCE 817#ifdef CONFIG_X86_MCE
803 /* Could check oops_in_progress here too, but it's safer 818 /* Could check oops_in_progress here too, but it's safer
804 not too */ 819 not too */
@@ -931,6 +946,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
931 946
932#endif 947#endif
933 948
949void __trigger_all_cpu_backtrace(void)
950{
951 int i;
952
953 backtrace_mask = cpu_online_map;
954 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
955 for (i = 0; i < 10 * 1000; i++) {
956 if (cpus_empty(backtrace_mask))
957 break;
958 mdelay(1);
959 }
960}
961
934EXPORT_SYMBOL(nmi_active); 962EXPORT_SYMBOL(nmi_active);
935EXPORT_SYMBOL(nmi_watchdog); 963EXPORT_SYMBOL(nmi_watchdog);
936EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); 964EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);