aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 7af9cb3e2d99..27e95e7922c1 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -12,14 +12,15 @@
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API. 12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 */ 13 */
14 14
15#include <linux/nmi.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/sysdev.h> 20#include <linux/sysdev.h>
20#include <linux/nmi.h>
21#include <linux/sysctl.h> 21#include <linux/sysctl.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/cpumask.h>
23 24
24#include <asm/smp.h> 25#include <asm/smp.h>
25#include <asm/nmi.h> 26#include <asm/nmi.h>
@@ -41,6 +42,8 @@ int panic_on_unrecovered_nmi;
41static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); 42static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
42static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); 43static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
43 44
45static cpumask_t backtrace_mask = CPU_MASK_NONE;
46
44/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 47/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
45 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) 48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
46 */ 49 */
@@ -782,6 +785,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
782{ 785{
783 int sum; 786 int sum;
784 int touched = 0; 787 int touched = 0;
788 int cpu = smp_processor_id();
785 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 789 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
786 u64 dummy; 790 u64 dummy;
787 int rc=0; 791 int rc=0;
@@ -799,6 +803,16 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
799 touched = 1; 803 touched = 1;
800 } 804 }
801 805
806 if (cpu_isset(cpu, backtrace_mask)) {
807 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
808
809 spin_lock(&lock);
810 printk("NMI backtrace for cpu %d\n", cpu);
811 dump_stack();
812 spin_unlock(&lock);
813 cpu_clear(cpu, backtrace_mask);
814 }
815
802#ifdef CONFIG_X86_MCE 816#ifdef CONFIG_X86_MCE
803 /* Could check oops_in_progress here too, but it's safer 817 /* Could check oops_in_progress here too, but it's safer
804 not too */ 818 not too */
@@ -931,6 +945,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
931 945
932#endif 946#endif
933 947
948void __trigger_all_cpu_backtrace(void)
949{
950 int i;
951
952 backtrace_mask = cpu_online_map;
953 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
954 for (i = 0; i < 10 * 1000; i++) {
955 if (cpus_empty(backtrace_mask))
956 break;
957 mdelay(1);
958 }
959}
960
934EXPORT_SYMBOL(nmi_active); 961EXPORT_SYMBOL(nmi_active);
935EXPORT_SYMBOL(nmi_watchdog); 962EXPORT_SYMBOL(nmi_watchdog);
936EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); 963EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);