aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/crash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/crash.c')
-rw-r--r--arch/powerpc/kernel/crash.c153
1 files changed, 122 insertions, 31 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index e253a45dcf10..358cecdc6aef 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -24,9 +24,11 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/irq.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/kexec.h>
30#include <asm/kdump.h> 32#include <asm/kdump.h>
31#include <asm/lmb.h> 33#include <asm/lmb.h>
32#include <asm/firmware.h> 34#include <asm/firmware.h>
@@ -41,6 +43,7 @@
41 43
42/* This keeps a track of which one is crashing cpu. */ 44/* This keeps a track of which one is crashing cpu. */
43int crashing_cpu = -1; 45int crashing_cpu = -1;
46static cpumask_t cpus_in_crash = CPU_MASK_NONE;
44 47
45static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 48static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
46 size_t data_len) 49 size_t data_len)
@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
98} 101}
99 102
100#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
101static atomic_t waiting_for_crash_ipi; 104static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
102 105
103void crash_ipi_callback(struct pt_regs *regs) 106void crash_ipi_callback(struct pt_regs *regs)
104{ 107{
105 int cpu = smp_processor_id(); 108 int cpu = smp_processor_id();
106 109
107 if (cpu == crashing_cpu)
108 return;
109
110 if (!cpu_online(cpu)) 110 if (!cpu_online(cpu))
111 return; 111 return;
112 112
113 if (ppc_md.kexec_cpu_down)
114 ppc_md.kexec_cpu_down(1, 1);
115
116 local_irq_disable(); 113 local_irq_disable();
114 if (!cpu_isset(cpu, cpus_in_crash))
115 crash_save_this_cpu(regs, cpu);
116 cpu_set(cpu, cpus_in_crash);
117 117
118 crash_save_this_cpu(regs, cpu); 118 /*
119 atomic_dec(&waiting_for_crash_ipi); 119 * Entered via soft-reset - could be the kdump
120 * process is invoked using soft-reset or user activated
121 * it if some CPU did not respond to an IPI.
122 * For soft-reset, the secondary CPU can enter this func
123 * twice. 1 - using IPI, and 2. soft-reset.
124 * Tell the kexec CPU that entered via soft-reset and ready
125 * to go down.
126 */
127 if (cpu_isset(cpu, cpus_in_sr)) {
128 cpu_clear(cpu, cpus_in_sr);
129 atomic_inc(&enter_on_soft_reset);
130 }
131
132 /*
133 * Starting the kdump boot.
134 * This barrier is needed to make sure that all CPUs are stopped.
135 * If not, soft-reset will be invoked to bring other CPUs.
136 */
137 while (!cpu_isset(crashing_cpu, cpus_in_crash))
138 cpu_relax();
139
140 if (ppc_md.kexec_cpu_down)
141 ppc_md.kexec_cpu_down(1, 1);
120 kexec_smp_wait(); 142 kexec_smp_wait();
121 /* NOTREACHED */ 143 /* NOTREACHED */
122} 144}
123 145
124static void crash_kexec_prepare_cpus(void) 146/*
147 * Wait until all CPUs are entered via soft-reset.
148 */
149static void crash_soft_reset_check(int cpu)
150{
151 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
152
153 cpu_clear(cpu, cpus_in_sr);
154 while (atomic_read(&enter_on_soft_reset) != ncpus)
155 cpu_relax();
156}
157
158
159static void crash_kexec_prepare_cpus(int cpu)
125{ 160{
126 unsigned int msecs; 161 unsigned int msecs;
127 162
128 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 163 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
129 164
130 crash_send_ipi(crash_ipi_callback); 165 crash_send_ipi(crash_ipi_callback);
131 smp_wmb(); 166 smp_wmb();
@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void)
133 /* 168 /*
134 * FIXME: Until we will have the way to stop other CPUSs reliabally, 169 * FIXME: Until we will have the way to stop other CPUSs reliabally,
135 * the crash CPU will send an IPI and wait for other CPUs to 170 * the crash CPU will send an IPI and wait for other CPUs to
136 * respond. If not, proceed the kexec boot even though we failed to 171 * respond.
137 * capture other CPU states.
138 * Delay of at least 10 seconds. 172 * Delay of at least 10 seconds.
139 */ 173 */
140 printk(KERN_ALERT "Sending IPI to other cpus...\n"); 174 printk(KERN_EMERG "Sending IPI to other cpus...\n");
141 msecs = 10000; 175 msecs = 10000;
142 while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { 176 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
143 barrier(); 177 cpu_relax();
144 mdelay(1); 178 mdelay(1);
145 } 179 }
146 180
@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void)
149 /* 183 /*
150 * FIXME: In case if we do not get all CPUs, one possibility: ask the 184 * FIXME: In case if we do not get all CPUs, one possibility: ask the
151 * user to do soft reset such that we get all. 185 * user to do soft reset such that we get all.
152 * IPI handler is already set by the panic cpu initially. Therefore, 186 * Soft-reset will be used until better mechanism is implemented.
153 * all cpus could invoke this handler from die() and the panic CPU 187 */
154 * will call machine_kexec() directly from this handler to do 188 if (cpus_weight(cpus_in_crash) < ncpus) {
155 * kexec boot. 189 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
190 ncpus - cpus_weight(cpus_in_crash));
191 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
192 cpus_in_sr = CPU_MASK_NONE;
193 atomic_set(&enter_on_soft_reset, 0);
194 while (cpus_weight(cpus_in_crash) < ncpus)
195 cpu_relax();
196 }
197 /*
198 * Make sure all CPUs are entered via soft-reset if the kdump is
199 * invoked using soft-reset.
156 */ 200 */
157 if (atomic_read(&waiting_for_crash_ipi)) 201 if (cpu_isset(cpu, cpus_in_sr))
158 printk(KERN_ALERT "done waiting: %d cpus not responding\n", 202 crash_soft_reset_check(cpu);
159 atomic_read(&waiting_for_crash_ipi));
160 /* Leave the IPI callback set */ 203 /* Leave the IPI callback set */
161} 204}
205
206/*
207 * This function will be called by secondary cpus or by kexec cpu
208 * if soft-reset is activated to stop some CPUs.
209 */
210void crash_kexec_secondary(struct pt_regs *regs)
211{
212 int cpu = smp_processor_id();
213 unsigned long flags;
214 int msecs = 5;
215
216 local_irq_save(flags);
217 /* Wait 5ms if the kexec CPU is not entered yet. */
218 while (crashing_cpu < 0) {
219 if (--msecs < 0) {
220 /*
221 * Either kdump image is not loaded or
222 * kdump process is not started - Probably xmon
223 * exited using 'x'(exit and recover) or
224 * kexec_should_crash() failed for all running tasks.
225 */
226 cpu_clear(cpu, cpus_in_sr);
227 local_irq_restore(flags);
228 return;
229 }
230 mdelay(1);
231 cpu_relax();
232 }
233 if (cpu == crashing_cpu) {
234 /*
235 * Panic CPU will enter this func only via soft-reset.
236 * Wait until all secondary CPUs entered and
237 * then start kexec boot.
238 */
239 crash_soft_reset_check(cpu);
240 cpu_set(crashing_cpu, cpus_in_crash);
241 if (ppc_md.kexec_cpu_down)
242 ppc_md.kexec_cpu_down(1, 0);
243 machine_kexec(kexec_crash_image);
244 /* NOTREACHED */
245 }
246 crash_ipi_callback(regs);
247}
248
162#else 249#else
163static void crash_kexec_prepare_cpus(void) 250static void crash_kexec_prepare_cpus(int cpu)
164{ 251{
165 /* 252 /*
166 * move the secondarys to us so that we can copy 253 * move the secondarys to us so that we can copy
@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void)
171 smp_release_cpus(); 258 smp_release_cpus();
172} 259}
173 260
261void crash_kexec_secondary(struct pt_regs *regs)
262{
263 cpus_in_sr = CPU_MASK_NONE;
264}
174#endif 265#endif
175 266
176void default_machine_crash_shutdown(struct pt_regs *regs) 267void default_machine_crash_shutdown(struct pt_regs *regs)
@@ -190,23 +281,23 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
190 local_irq_disable(); 281 local_irq_disable();
191 282
192 for_each_irq(irq) { 283 for_each_irq(irq) {
193 struct irq_desc *desc = irq_descp(irq); 284 struct irq_desc *desc = irq_desc + irq;
194 285
195 if (desc->status & IRQ_INPROGRESS) 286 if (desc->status & IRQ_INPROGRESS)
196 desc->handler->end(irq); 287 desc->chip->end(irq);
197 288
198 if (!(desc->status & IRQ_DISABLED)) 289 if (!(desc->status & IRQ_DISABLED))
199 desc->handler->disable(irq); 290 desc->chip->disable(irq);
200 } 291 }
201 292
202 if (ppc_md.kexec_cpu_down)
203 ppc_md.kexec_cpu_down(1, 0);
204
205 /* 293 /*
206 * Make a note of crashing cpu. Will be used in machine_kexec 294 * Make a note of crashing cpu. Will be used in machine_kexec
207 * such that another IPI will not be sent. 295 * such that another IPI will not be sent.
208 */ 296 */
209 crashing_cpu = smp_processor_id(); 297 crashing_cpu = smp_processor_id();
210 crash_kexec_prepare_cpus();
211 crash_save_this_cpu(regs, crashing_cpu); 298 crash_save_this_cpu(regs, crashing_cpu);
299 crash_kexec_prepare_cpus(crashing_cpu);
300 cpu_set(crashing_cpu, cpus_in_crash);
301 if (ppc_md.kexec_cpu_down)
302 ppc_md.kexec_cpu_down(1, 0);
212} 303}