blob: abc601f36b6eeca8fa16831f1ad238a45403f030 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
/*
* Architecture specific (x86_64) functions for kexec based crash dumps.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
*
* Copyright (C) IBM Corporation, 2004. All rights reserved.
*
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/mach_apic.h>
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
#ifdef CONFIG_SMP
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct pt_regs *regs, int cpu)
{
/*
* Don't do anything if this handler is invoked on crashing cpu.
* Otherwise, system will completely hang. Crashing cpu can get
* an NMI if system was initially booted with nmi_watchdog parameter.
*/
if (cpu == crashing_cpu)
return 1;
local_irq_disable();
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
for(;;)
asm("hlt");
return 1;
}
static void smp_send_nmi_allbutself(void)
{
send_IPI_allbutself(APIC_DM_NMI);
}
/*
* This code is a best effort heuristic to get the
* other cpus to stop executing. So races with
* cpu hotplug shouldn't matter.
*/
static void nmi_shootdown_cpus(void)
{
unsigned long msecs;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
set_nmi_callback(crash_nmi_callback);
/*
* Ensure the new callback function is set before sending
* out the NMI
*/
wmb();
smp_send_nmi_allbutself();
msecs = 1000; /* Wait at most a second for the other cpus to stop */
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
msecs--;
}
/* Leave the nmi callback set */
disable_local_APIC();
}
#else
static void nmi_shootdown_cpus(void)
{
/* There are no cpus to shootdown */
}
#endif
void machine_crash_shutdown(struct pt_regs *regs)
{
/*
* This function is only called after the system
* has paniced or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
* In practice this means shooting down the other cpus in
* an SMP system.
*/
/* The kernel is broken so disable interrupts */
local_irq_disable();
/* Make a note of crashing cpu. Will be used in NMI callback.*/
crashing_cpu = smp_processor_id();
nmi_shootdown_cpus();
if(cpu_has_apic)
disable_local_APIC();
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
}
|