aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2005-06-25 17:57:58 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:49 -0400
commitc4ac4263a019c791e906f284bb03891d3c25a845 (patch)
tree9bfd0f45824748cbf3287a82b4449e6e18f25332 /arch/i386/kernel
parent5033cba087f6ac773002123aafbea1aad4267682 (diff)
[PATCH] crashdump: x86: add NMI handler to capture other CPUs
One of the dangers when switching from one kernel to another is what happens to all of the other cpus that were running in the crashed kernel. In an attempt to avoid that problem this patch adds a nmi handler and attempts to shoot down the other cpus by sending them non maskable interrupts. The code then waits for 1 second or until all known cpus have stopped running and then jumps from the running kernel that has crashed to the kernel in reserved memory. The kernel spin loop is used for the delay as that should behave continue to be safe even in after a crash. Signed-off-by: Eric Biederman <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/crash.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index fa27a6c2abb6..882779c07874 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -23,12 +23,65 @@
23#include <asm/hardirq.h> 23#include <asm/hardirq.h>
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25#include <asm/hw_irq.h> 25#include <asm/hw_irq.h>
26#include <mach_ipi.h>
26 27
27#define MAX_NOTE_BYTES 1024 28#define MAX_NOTE_BYTES 1024
28typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; 29typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
29 30
30note_buf_t crash_notes[NR_CPUS]; 31note_buf_t crash_notes[NR_CPUS];
31 32
33#ifdef CONFIG_SMP
34static atomic_t waiting_for_crash_ipi;
35
36static int crash_nmi_callback(struct pt_regs *regs, int cpu)
37{
38 local_irq_disable();
39 atomic_dec(&waiting_for_crash_ipi);
40 /* Assume hlt works */
41 __asm__("hlt");
42 for(;;);
43 return 1;
44}
45
46/*
47 * By using the NMI code instead of a vector we just sneak thru the
48 * word generator coming out with just what we want. AND it does
49 * not matter if clustered_apic_mode is set or not.
50 */
51static void smp_send_nmi_allbutself(void)
52{
53 send_IPI_allbutself(APIC_DM_NMI);
54}
55
56static void nmi_shootdown_cpus(void)
57{
58 unsigned long msecs;
59 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
60
61 /* Would it be better to replace the trap vector here? */
62 set_nmi_callback(crash_nmi_callback);
63 /* Ensure the new callback function is set before sending
64 * out the NMI
65 */
66 wmb();
67
68 smp_send_nmi_allbutself();
69
70 msecs = 1000; /* Wait at most a second for the other cpus to stop */
71 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
72 mdelay(1);
73 msecs--;
74 }
75
76 /* Leave the nmi callback set */
77}
78#else
79static void nmi_shootdown_cpus(void)
80{
81 /* There are no cpus to shootdown */
82}
83#endif
84
32void machine_crash_shutdown(void) 85void machine_crash_shutdown(void)
33{ 86{
34 /* This function is only called after the system 87 /* This function is only called after the system
@@ -39,4 +92,7 @@ void machine_crash_shutdown(void)
39 * In practice this means shooting down the other cpus in 92 * In practice this means shooting down the other cpus in
40 * an SMP system. 93 * an SMP system.
41 */ 94 */
95 /* The kernel is broken so disable interrupts */
96 local_irq_disable();
97 nmi_shootdown_cpus();
42} 98}