aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/crash_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/crash_64.c')
-rw-r--r--arch/x86_64/kernel/crash_64.c135
1 files changed, 0 insertions, 135 deletions
diff --git a/arch/x86_64/kernel/crash_64.c b/arch/x86_64/kernel/crash_64.c
deleted file mode 100644
index 13432a1ae904..000000000000
--- a/arch/x86_64/kernel/crash_64.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Architecture specific (x86_64) functions for kexec based crash dumps.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/smp.h>
14#include <linux/irq.h>
15#include <linux/reboot.h>
16#include <linux/kexec.h>
17#include <linux/delay.h>
18#include <linux/elf.h>
19#include <linux/elfcore.h>
20#include <linux/kdebug.h>
21
22#include <asm/processor.h>
23#include <asm/hardirq.h>
24#include <asm/nmi.h>
25#include <asm/hw_irq.h>
26#include <asm/mach_apic.h>
27
28/* This keeps a track of which one is crashing cpu. */
29static int crashing_cpu;
30
31#ifdef CONFIG_SMP
32static atomic_t waiting_for_crash_ipi;
33
34static int crash_nmi_callback(struct notifier_block *self,
35 unsigned long val, void *data)
36{
37 struct pt_regs *regs;
38 int cpu;
39
40 if (val != DIE_NMI_IPI)
41 return NOTIFY_OK;
42
43 regs = ((struct die_args *)data)->regs;
44 cpu = raw_smp_processor_id();
45
46 /*
47 * Don't do anything if this handler is invoked on crashing cpu.
48 * Otherwise, system will completely hang. Crashing cpu can get
49 * an NMI if system was initially booted with nmi_watchdog parameter.
50 */
51 if (cpu == crashing_cpu)
52 return NOTIFY_STOP;
53 local_irq_disable();
54
55 crash_save_cpu(regs, cpu);
56 disable_local_APIC();
57 atomic_dec(&waiting_for_crash_ipi);
58 /* Assume hlt works */
59 for(;;)
60 halt();
61
62 return 1;
63}
64
65static void smp_send_nmi_allbutself(void)
66{
67 send_IPI_allbutself(NMI_VECTOR);
68}
69
70/*
71 * This code is a best effort heuristic to get the
72 * other cpus to stop executing. So races with
73 * cpu hotplug shouldn't matter.
74 */
75
76static struct notifier_block crash_nmi_nb = {
77 .notifier_call = crash_nmi_callback,
78};
79
80static void nmi_shootdown_cpus(void)
81{
82 unsigned long msecs;
83
84 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
85 if (register_die_notifier(&crash_nmi_nb))
86 return; /* return what? */
87
88 /*
89 * Ensure the new callback function is set before sending
90 * out the NMI
91 */
92 wmb();
93
94 smp_send_nmi_allbutself();
95
96 msecs = 1000; /* Wait at most a second for the other cpus to stop */
97 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
98 mdelay(1);
99 msecs--;
100 }
101 /* Leave the nmi callback set */
102 disable_local_APIC();
103}
104#else
105static void nmi_shootdown_cpus(void)
106{
107 /* There are no cpus to shootdown */
108}
109#endif
110
111void machine_crash_shutdown(struct pt_regs *regs)
112{
113 /*
114 * This function is only called after the system
115 * has panicked or is otherwise in a critical state.
116 * The minimum amount of code to allow a kexec'd kernel
117 * to run successfully needs to happen here.
118 *
119 * In practice this means shooting down the other cpus in
120 * an SMP system.
121 */
122 /* The kernel is broken so disable interrupts */
123 local_irq_disable();
124
125 /* Make a note of crashing cpu. Will be used in NMI callback.*/
126 crashing_cpu = smp_processor_id();
127 nmi_shootdown_cpus();
128
129 if(cpu_has_apic)
130 disable_local_APIC();
131
132 disable_IO_APIC();
133
134 crash_save_cpu(regs, smp_processor_id());
135}