diff options
author | Andrew Morton <akpm@osdl.org> | 2006-12-06 20:14:01 -0500 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-12-06 20:14:01 -0500 |
commit | bb81a09e55eaf7e5f798468ab971469b6f66a259 (patch) | |
tree | cf1ed6b0ad75137361228955535044fd4630a57b /arch/i386/kernel/nmi.c | |
parent | e5e3a0428968dcc1f9318ce1c941a918e99f8b84 (diff) |
[PATCH] x86: all cpu backtrace
When a spinlock lockup occurs, arrange for the NMI code to emit an all-cpu
backtrace, so we get to see which CPU is holding the lock, and where.
Cc: Andi Kleen <ak@muc.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index eaafe233a5da..171194ccb7bc 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
23 | #include <linux/dmi.h> | 23 | #include <linux/dmi.h> |
24 | #include <linux/kprobes.h> | 24 | #include <linux/kprobes.h> |
25 | #include <linux/cpumask.h> | ||
25 | 26 | ||
26 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
27 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
@@ -42,6 +43,8 @@ int nmi_watchdog_enabled; | |||
42 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); | 43 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); |
43 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); | 44 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); |
44 | 45 | ||
46 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | ||
47 | |||
45 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | 48 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
46 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | 49 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) |
47 | */ | 50 | */ |
@@ -907,6 +910,16 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
907 | touched = 1; | 910 | touched = 1; |
908 | } | 911 | } |
909 | 912 | ||
913 | if (cpu_isset(cpu, backtrace_mask)) { | ||
914 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | ||
915 | |||
916 | spin_lock(&lock); | ||
917 | printk("NMI backtrace for cpu %d\n", cpu); | ||
918 | dump_stack(); | ||
919 | spin_unlock(&lock); | ||
920 | cpu_clear(cpu, backtrace_mask); | ||
921 | } | ||
922 | |||
910 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; | 923 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs; |
911 | 924 | ||
912 | /* if the apic timer isn't firing, this cpu isn't doing much */ | 925 | /* if the apic timer isn't firing, this cpu isn't doing much */ |
@@ -1033,6 +1046,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, | |||
1033 | 1046 | ||
1034 | #endif | 1047 | #endif |
1035 | 1048 | ||
1049 | void __trigger_all_cpu_backtrace(void) | ||
1050 | { | ||
1051 | int i; | ||
1052 | |||
1053 | backtrace_mask = cpu_online_map; | ||
1054 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | ||
1055 | for (i = 0; i < 10 * 1000; i++) { | ||
1056 | if (cpus_empty(backtrace_mask)) | ||
1057 | break; | ||
1058 | mdelay(1); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1036 | EXPORT_SYMBOL(nmi_active); | 1062 | EXPORT_SYMBOL(nmi_active); |
1037 | EXPORT_SYMBOL(nmi_watchdog); | 1063 | EXPORT_SYMBOL(nmi_watchdog); |
1038 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | 1064 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); |