aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid Wilder <dwilder@us.ibm.com>2006-06-23 18:29:34 -0400
committerPaul Mackerras <paulus@samba.org>2006-06-28 01:18:52 -0400
commitc0ce7d0886cf0c2579c604eac41a7e125bc0e96d (patch)
treef31448371b4295e98753f1551178fdddb8d18b0e /arch/powerpc
parent2cd90bc8fba8720ef7f3fdfd1e0c1a5397a18271 (diff)
[POWERPC] Add the use of the firmware soft-reset-nmi to kdump.
With this patch, kdump uses the firmware soft-reset NMI for two purposes: 1) Initiate the kdump (take a crash dump) by issuing a soft-reset. 2) Break a CPU out of a deadlock condition that is detected during kdump processing. When a soft-reset is initiated each CPU will enter system_reset_exception() and set its corresponding bit in the global bit-array cpus_in_sr then call die(). When die() finds the CPU's bit set in cpu_in_sr crash_kexec() is called to initiate a crash dump. The first CPU to enter crash_kexec() is called the "crashing CPU". All other CPUs are "secondary CPUs". The secondary CPU's pass through to crash_kexec_secondary() and sleep. The crashing CPU waits for all CPUs to enter via soft-reset then boots the kdump kernel (see crash_soft_reset_check()) When the system crashes due to a panic or exception, crash_kexec() is called by panic() or die(). The crashing CPU sends an IPI to all other CPUs to notify them of the pending shutdown. If a CPU is in a deadlock or hung state with interrupts disabled, the IPI will not be delivered. The result being, that the kdump kernel is not booted. This problem is solved with the use of a firmware generated soft-reset. After the crashing_cpu has issued the IPI, it waits for 10 sec for all CPUs to enter crash_ipi_callback(). A CPU signifies its entry to crash_ipi_callback() by setting its corresponding bit in the cpus_in_crash bit array. After 10 sec, if one or more CPUs have not set their bit in cpus_in_crash we assume that the CPU(s) is deadlocked. The operator is then prompted to generate a soft-reset to break the deadlock. Each CPU enters the soft reset handler as described above. Two conditions must be handled at this point: 1) The system crashed because the operator generated a soft-reset. See 2) The system had crashed before the soft-reset was generated ( in the case of a Panic or oops). The first CPU to enter crash_kexec() uses the state of the kexec_lock to determine this state. If kexec_lock is already held then condition 2 is true and crash_kexec_secondary() is called, else; this CPU is flagged as the crashing CPU, the kexec_lock is acquired and crash_kexec() proceeds as described above. Each additional CPUs responding to the soft-reset will pass through crash_kexec() to kexec_secondary(). All secondary CPUs call crash_ipi_callback() readying them self's for the shutdown. When ready they clear their bit in cpus_in_sr. The crashing CPU waits in kexec_secondary() until all other CPUs have cleared their bits in cpus_in_sr. The kexec kernel boot is then started. Signed-off-by: Haren Myneni <haren@us.ibm.com> Signed-off-by: David Wilder <dwilder@us.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/crash.c147
-rw-r--r--arch/powerpc/kernel/traps.c27
2 files changed, 132 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index e253a45dcf10..b537cfa4e09b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -24,9 +24,11 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/irq.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/kexec.h>
30#include <asm/kdump.h> 32#include <asm/kdump.h>
31#include <asm/lmb.h> 33#include <asm/lmb.h>
32#include <asm/firmware.h> 34#include <asm/firmware.h>
@@ -41,6 +43,7 @@
41 43
42/* This keeps a track of which one is crashing cpu. */ 44/* This keeps a track of which one is crashing cpu. */
43int crashing_cpu = -1; 45int crashing_cpu = -1;
46static cpumask_t cpus_in_crash = CPU_MASK_NONE;
44 47
45static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 48static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
46 size_t data_len) 49 size_t data_len)
@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
98} 101}
99 102
100#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
101static atomic_t waiting_for_crash_ipi; 104static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
102 105
103void crash_ipi_callback(struct pt_regs *regs) 106void crash_ipi_callback(struct pt_regs *regs)
104{ 107{
105 int cpu = smp_processor_id(); 108 int cpu = smp_processor_id();
106 109
107 if (cpu == crashing_cpu)
108 return;
109
110 if (!cpu_online(cpu)) 110 if (!cpu_online(cpu))
111 return; 111 return;
112 112
113 if (ppc_md.kexec_cpu_down)
114 ppc_md.kexec_cpu_down(1, 1);
115
116 local_irq_disable(); 113 local_irq_disable();
114 if (!cpu_isset(cpu, cpus_in_crash))
115 crash_save_this_cpu(regs, cpu);
116 cpu_set(cpu, cpus_in_crash);
117 117
118 crash_save_this_cpu(regs, cpu); 118 /*
119 atomic_dec(&waiting_for_crash_ipi); 119 * Entered via soft-reset - could be the kdump
120 * process is invoked using soft-reset or user activated
121 * it if some CPU did not respond to an IPI.
122 * For soft-reset, the secondary CPU can enter this func
123 * twice. 1 - using IPI, and 2. soft-reset.
124 * Tell the kexec CPU that entered via soft-reset and ready
125 * to go down.
126 */
127 if (cpu_isset(cpu, cpus_in_sr)) {
128 cpu_clear(cpu, cpus_in_sr);
129 atomic_inc(&enter_on_soft_reset);
130 }
131
132 /*
133 * Starting the kdump boot.
134 * This barrier is needed to make sure that all CPUs are stopped.
135 * If not, soft-reset will be invoked to bring other CPUs.
136 */
137 while (!cpu_isset(crashing_cpu, cpus_in_crash))
138 cpu_relax();
139
140 if (ppc_md.kexec_cpu_down)
141 ppc_md.kexec_cpu_down(1, 1);
120 kexec_smp_wait(); 142 kexec_smp_wait();
121 /* NOTREACHED */ 143 /* NOTREACHED */
122} 144}
123 145
124static void crash_kexec_prepare_cpus(void) 146/*
147 * Wait until all CPUs are entered via soft-reset.
148 */
149static void crash_soft_reset_check(int cpu)
150{
151 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
152
153 cpu_clear(cpu, cpus_in_sr);
154 while (atomic_read(&enter_on_soft_reset) != ncpus)
155 cpu_relax();
156}
157
158
159static void crash_kexec_prepare_cpus(int cpu)
125{ 160{
126 unsigned int msecs; 161 unsigned int msecs;
127 162
128 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 163 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
129 164
130 crash_send_ipi(crash_ipi_callback); 165 crash_send_ipi(crash_ipi_callback);
131 smp_wmb(); 166 smp_wmb();
@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void)
133 /* 168 /*
134 * FIXME: Until we will have the way to stop other CPUSs reliabally, 169 * FIXME: Until we will have the way to stop other CPUSs reliabally,
135 * the crash CPU will send an IPI and wait for other CPUs to 170 * the crash CPU will send an IPI and wait for other CPUs to
136 * respond. If not, proceed the kexec boot even though we failed to 171 * respond.
137 * capture other CPU states.
138 * Delay of at least 10 seconds. 172 * Delay of at least 10 seconds.
139 */ 173 */
140 printk(KERN_ALERT "Sending IPI to other cpus...\n"); 174 printk(KERN_EMERG "Sending IPI to other cpus...\n");
141 msecs = 10000; 175 msecs = 10000;
142 while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { 176 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
143 barrier(); 177 cpu_relax();
144 mdelay(1); 178 mdelay(1);
145 } 179 }
146 180
@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void)
149 /* 183 /*
150 * FIXME: In case if we do not get all CPUs, one possibility: ask the 184 * FIXME: In case if we do not get all CPUs, one possibility: ask the
151 * user to do soft reset such that we get all. 185 * user to do soft reset such that we get all.
152 * IPI handler is already set by the panic cpu initially. Therefore, 186 * Soft-reset will be used until better mechanism is implemented.
153 * all cpus could invoke this handler from die() and the panic CPU 187 */
154 * will call machine_kexec() directly from this handler to do 188 if (cpus_weight(cpus_in_crash) < ncpus) {
155 * kexec boot. 189 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
190 ncpus - cpus_weight(cpus_in_crash));
191 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
192 cpus_in_sr = CPU_MASK_NONE;
193 atomic_set(&enter_on_soft_reset, 0);
194 while (cpus_weight(cpus_in_crash) < ncpus)
195 cpu_relax();
196 }
197 /*
198 * Make sure all CPUs are entered via soft-reset if the kdump is
199 * invoked using soft-reset.
156 */ 200 */
157 if (atomic_read(&waiting_for_crash_ipi)) 201 if (cpu_isset(cpu, cpus_in_sr))
158 printk(KERN_ALERT "done waiting: %d cpus not responding\n", 202 crash_soft_reset_check(cpu);
159 atomic_read(&waiting_for_crash_ipi));
160 /* Leave the IPI callback set */ 203 /* Leave the IPI callback set */
161} 204}
205
206/*
207 * This function will be called by secondary cpus or by kexec cpu
208 * if soft-reset is activated to stop some CPUs.
209 */
210void crash_kexec_secondary(struct pt_regs *regs)
211{
212 int cpu = smp_processor_id();
213 unsigned long flags;
214 int msecs = 5;
215
216 local_irq_save(flags);
217 /* Wait 5ms if the kexec CPU is not entered yet. */
218 while (crashing_cpu < 0) {
219 if (--msecs < 0) {
220 /*
221 * Either kdump image is not loaded or
222 * kdump process is not started - Probably xmon
223 * exited using 'x'(exit and recover) or
224 * kexec_should_crash() failed for all running tasks.
225 */
226 cpu_clear(cpu, cpus_in_sr);
227 local_irq_restore(flags);
228 return;
229 }
230 mdelay(1);
231 cpu_relax();
232 }
233 if (cpu == crashing_cpu) {
234 /*
235 * Panic CPU will enter this func only via soft-reset.
236 * Wait until all secondary CPUs entered and
237 * then start kexec boot.
238 */
239 crash_soft_reset_check(cpu);
240 cpu_set(crashing_cpu, cpus_in_crash);
241 if (ppc_md.kexec_cpu_down)
242 ppc_md.kexec_cpu_down(1, 0);
243 machine_kexec(kexec_crash_image);
244 /* NOTREACHED */
245 }
246 crash_ipi_callback(regs);
247}
248
162#else 249#else
163static void crash_kexec_prepare_cpus(void) 250static void crash_kexec_prepare_cpus(int cpu)
164{ 251{
165 /* 252 /*
166 * move the secondarys to us so that we can copy 253 * move the secondarys to us so that we can copy
@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void)
171 smp_release_cpus(); 258 smp_release_cpus();
172} 259}
173 260
261void crash_kexec_secondary(struct pt_regs *regs)
262{
263 cpus_in_sr = CPU_MASK_NONE;
264}
174#endif 265#endif
175 266
176void default_machine_crash_shutdown(struct pt_regs *regs) 267void default_machine_crash_shutdown(struct pt_regs *regs)
@@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
199 desc->handler->disable(irq); 290 desc->handler->disable(irq);
200 } 291 }
201 292
202 if (ppc_md.kexec_cpu_down)
203 ppc_md.kexec_cpu_down(1, 0);
204
205 /* 293 /*
206 * Make a note of crashing cpu. Will be used in machine_kexec 294 * Make a note of crashing cpu. Will be used in machine_kexec
207 * such that another IPI will not be sent. 295 * such that another IPI will not be sent.
208 */ 296 */
209 crashing_cpu = smp_processor_id(); 297 crashing_cpu = smp_processor_id();
210 crash_kexec_prepare_cpus();
211 crash_save_this_cpu(regs, crashing_cpu); 298 crash_save_this_cpu(regs, crashing_cpu);
299 crash_kexec_prepare_cpus(crashing_cpu);
300 cpu_set(crashing_cpu, cpus_in_crash);
301 if (ppc_md.kexec_cpu_down)
302 ppc_md.kexec_cpu_down(1, 0);
212} 303}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 52f5659534f4..fa6bd97b6b9d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -52,9 +52,13 @@
52#include <asm/firmware.h> 52#include <asm/firmware.h>
53#include <asm/processor.h> 53#include <asm/processor.h>
54#endif 54#endif
55#include <asm/kexec.h>
55 56
56#ifdef CONFIG_PPC64 /* XXX */ 57#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base 58#define _IO_BASE pci_io_base
59#ifdef CONFIG_KEXEC
60cpumask_t cpus_in_sr = CPU_MASK_NONE;
61#endif
58#endif 62#endif
59 63
60#ifdef CONFIG_DEBUGGER 64#ifdef CONFIG_DEBUGGER
@@ -97,7 +101,7 @@ static DEFINE_SPINLOCK(die_lock);
97 101
98int die(const char *str, struct pt_regs *regs, long err) 102int die(const char *str, struct pt_regs *regs, long err)
99{ 103{
100 static int die_counter, crash_dump_start = 0; 104 static int die_counter;
101 105
102 if (debugger(regs)) 106 if (debugger(regs))
103 return 1; 107 return 1;
@@ -137,21 +141,12 @@ int die(const char *str, struct pt_regs *regs, long err)
137 print_modules(); 141 print_modules();
138 show_regs(regs); 142 show_regs(regs);
139 bust_spinlocks(0); 143 bust_spinlocks(0);
144 spin_unlock_irq(&die_lock);
140 145
141 if (!crash_dump_start && kexec_should_crash(current)) { 146 if (kexec_should_crash(current) ||
142 crash_dump_start = 1; 147 kexec_sr_activated(smp_processor_id()))
143 spin_unlock_irq(&die_lock);
144 crash_kexec(regs); 148 crash_kexec(regs);
145 /* NOTREACHED */ 149 crash_kexec_secondary(regs);
146 }
147 spin_unlock_irq(&die_lock);
148 if (crash_dump_start)
149 /*
150 * Only for soft-reset: Other CPUs will be responded to an IPI
151 * sent by first kexec CPU.
152 */
153 for(;;)
154 ;
155 150
156 if (in_interrupt()) 151 if (in_interrupt())
157 panic("Fatal exception in interrupt"); 152 panic("Fatal exception in interrupt");
@@ -215,6 +210,10 @@ void system_reset_exception(struct pt_regs *regs)
215 return; 210 return;
216 } 211 }
217 212
213#ifdef CONFIG_KEXEC
214 cpu_set(smp_processor_id(), cpus_in_sr);
215#endif
216
218 die("System Reset", regs, SIGABRT); 217 die("System Reset", regs, SIGABRT);
219 218
220 /* Must die if the interrupt is not recoverable */ 219 /* Must die if the interrupt is not recoverable */