diff options
author | Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> | 2007-10-19 21:24:20 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-23 16:37:23 -0400 |
commit | 62a31a03b3d2a9d20e7a073e2cd9b27bfb7d6a3f (patch) | |
tree | 70610b045d737169ec834d64d185818c87f38d3b /arch/x86/kernel/crash.c | |
parent | 92f98b19bcce8b56ec6fc067702e211c36f19e88 (diff) |
x86: unify crash_32/64.c
Most of contents in crash are same.
Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/crash.c')
-rw-r--r-- | arch/x86/kernel/crash.c | 144 |
1 files changed, 144 insertions, 0 deletions
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c new file mode 100644 index 000000000000..af0253f94a9a --- /dev/null +++ b/arch/x86/kernel/crash.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Architecture specific (i386/x86_64) functions for kexec based crash dumps. | ||
3 | * | ||
4 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | ||
5 | * | ||
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/reboot.h> | ||
15 | #include <linux/kexec.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/elf.h> | ||
18 | #include <linux/elfcore.h> | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | #include <asm/hardirq.h> | ||
22 | #include <asm/nmi.h> | ||
23 | #include <asm/hw_irq.h> | ||
24 | #include <asm/apic.h> | ||
25 | #include <linux/kdebug.h> | ||
26 | #include <asm/smp.h> | ||
27 | |||
28 | #ifdef X86_32 | ||
29 | #include <mach_ipi.h> | ||
30 | #else | ||
31 | #include <asm/mach_apic.h> | ||
32 | #endif | ||
33 | |||
34 | /* This keeps a track of which one is crashing cpu. */ | ||
35 | static int crashing_cpu; | ||
36 | |||
37 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | ||
38 | static atomic_t waiting_for_crash_ipi; | ||
39 | |||
40 | static int crash_nmi_callback(struct notifier_block *self, | ||
41 | unsigned long val, void *data) | ||
42 | { | ||
43 | struct pt_regs *regs; | ||
44 | #ifdef X86_32 | ||
45 | struct pt_regs fixed_regs; | ||
46 | #endif | ||
47 | int cpu; | ||
48 | |||
49 | if (val != DIE_NMI_IPI) | ||
50 | return NOTIFY_OK; | ||
51 | |||
52 | regs = ((struct die_args *)data)->regs; | ||
53 | cpu = raw_smp_processor_id(); | ||
54 | |||
55 | /* Don't do anything if this handler is invoked on crashing cpu. | ||
56 | * Otherwise, system will completely hang. Crashing cpu can get | ||
57 | * an NMI if system was initially booted with nmi_watchdog parameter. | ||
58 | */ | ||
59 | if (cpu == crashing_cpu) | ||
60 | return NOTIFY_STOP; | ||
61 | local_irq_disable(); | ||
62 | |||
63 | #ifdef X86_32 | ||
64 | if (!user_mode_vm(regs)) { | ||
65 | crash_fixup_ss_esp(&fixed_regs, regs); | ||
66 | regs = &fixed_regs; | ||
67 | } | ||
68 | #endif | ||
69 | crash_save_cpu(regs, cpu); | ||
70 | disable_local_APIC(); | ||
71 | atomic_dec(&waiting_for_crash_ipi); | ||
72 | /* Assume hlt works */ | ||
73 | halt(); | ||
74 | for (;;) | ||
75 | cpu_relax(); | ||
76 | |||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | static void smp_send_nmi_allbutself(void) | ||
81 | { | ||
82 | cpumask_t mask = cpu_online_map; | ||
83 | cpu_clear(safe_smp_processor_id(), mask); | ||
84 | if (!cpus_empty(mask)) | ||
85 | send_IPI_mask(mask, NMI_VECTOR); | ||
86 | } | ||
87 | |||
88 | static struct notifier_block crash_nmi_nb = { | ||
89 | .notifier_call = crash_nmi_callback, | ||
90 | }; | ||
91 | |||
92 | static void nmi_shootdown_cpus(void) | ||
93 | { | ||
94 | unsigned long msecs; | ||
95 | |||
96 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
97 | /* Would it be better to replace the trap vector here? */ | ||
98 | if (register_die_notifier(&crash_nmi_nb)) | ||
99 | return; /* return what? */ | ||
100 | /* Ensure the new callback function is set before sending | ||
101 | * out the NMI | ||
102 | */ | ||
103 | wmb(); | ||
104 | |||
105 | smp_send_nmi_allbutself(); | ||
106 | |||
107 | msecs = 1000; /* Wait at most a second for the other cpus to stop */ | ||
108 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { | ||
109 | mdelay(1); | ||
110 | msecs--; | ||
111 | } | ||
112 | |||
113 | /* Leave the nmi callback set */ | ||
114 | disable_local_APIC(); | ||
115 | } | ||
116 | #else | ||
117 | static void nmi_shootdown_cpus(void) | ||
118 | { | ||
119 | /* There are no cpus to shootdown */ | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | void machine_crash_shutdown(struct pt_regs *regs) | ||
124 | { | ||
125 | /* This function is only called after the system | ||
126 | * has panicked or is otherwise in a critical state. | ||
127 | * The minimum amount of code to allow a kexec'd kernel | ||
128 | * to run successfully needs to happen here. | ||
129 | * | ||
130 | * In practice this means shooting down the other cpus in | ||
131 | * an SMP system. | ||
132 | */ | ||
133 | /* The kernel is broken so disable interrupts */ | ||
134 | local_irq_disable(); | ||
135 | |||
136 | /* Make a note of crashing cpu. Will be used in NMI callback.*/ | ||
137 | crashing_cpu = safe_smp_processor_id(); | ||
138 | nmi_shootdown_cpus(); | ||
139 | lapic_shutdown(); | ||
140 | #if defined(CONFIG_X86_IO_APIC) | ||
141 | disable_IO_APIC(); | ||
142 | #endif | ||
143 | crash_save_cpu(regs, safe_smp_processor_id()); | ||
144 | } | ||