aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMatt Evans <matt@ozlabs.org>2010-07-29 14:47:17 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-31 01:05:21 -0400
commite2f7f73717c0a2927bbe7551d90b1ec47a094361 (patch)
treee07dac6b21235a92ba6c456e3c471749a2396191 /arch/powerpc
parent2c48a7d615b82e030196e8b61ab0c7933be16dff (diff)
powerpc/kexec: Add to and tidy debug/comments in machine_kexec64.c
Tidies some typos, KERN_INFO-ise an info msg, and add a debug msg showing when the final sequence starts. Also adds a comment to kexec_prepare_cpus_wait() to make note of a possible problem; the need for kexec to deal with CPUs that failed to originally start up. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 022d2f613b7b..a0bcec3614e1 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -15,6 +15,7 @@
15#include <linux/thread_info.h> 15#include <linux/thread_info.h>
16#include <linux/init_task.h> 16#include <linux/init_task.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h>
18 19
19#include <asm/page.h> 20#include <asm/page.h>
20#include <asm/current.h> 21#include <asm/current.h>
@@ -184,7 +185,20 @@ static void kexec_prepare_cpus_wait(int wait_state)
184 185
185 hw_breakpoint_disable(); 186 hw_breakpoint_disable();
186 my_cpu = get_cpu(); 187 my_cpu = get_cpu();
187 /* Make sure each CPU has atleast made it to the state we need */ 188 /* Make sure each CPU has at least made it to the state we need.
189 *
190 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
191 * are correctly onlined. If somehow we start a CPU on boot with RTAS
192 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
193 * time, the boot CPU will timeout. If it does eventually execute
194 * stuff, the secondary will start up (paca[].cpu_start was written) and
195 * get into a peculiar state. If the platform supports
196 * smp_ops->take_timebase(), the secondary CPU will probably be spinning
197 * in there. If not (i.e. pseries), the secondary will continue on and
198 * try to online itself/idle/etc. If it survives that, we need to find
199 * these possible-but-not-online-but-should-be CPUs and chaperone them
200 * into kexec_smp_wait().
201 */
188 for_each_online_cpu(i) { 202 for_each_online_cpu(i) {
189 if (i == my_cpu) 203 if (i == my_cpu)
190 continue; 204 continue;
@@ -192,9 +206,9 @@ static void kexec_prepare_cpus_wait(int wait_state)
192 while (paca[i].kexec_state < wait_state) { 206 while (paca[i].kexec_state < wait_state) {
193 barrier(); 207 barrier();
194 if (i != notified) { 208 if (i != notified) {
195 printk( "kexec: waiting for cpu %d (physical" 209 printk(KERN_INFO "kexec: waiting for cpu %d "
196 " %d) to enter %i state\n", 210 "(physical %d) to enter %i state\n",
197 i, paca[i].hw_cpu_id, wait_state); 211 i, paca[i].hw_cpu_id, wait_state);
198 notified = i; 212 notified = i;
199 } 213 }
200 } 214 }
@@ -218,7 +232,10 @@ static void kexec_prepare_cpus(void)
218 if (ppc_md.kexec_cpu_down) 232 if (ppc_md.kexec_cpu_down)
219 ppc_md.kexec_cpu_down(0, 0); 233 ppc_md.kexec_cpu_down(0, 0);
220 234
221 /* Before removing MMU mapings make sure all CPUs have entered real mode */ 235 /*
236 * Before removing MMU mappings make sure all CPUs have entered real
237 * mode:
238 */
222 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); 239 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
223 240
224 put_cpu(); 241 put_cpu();
@@ -287,6 +304,8 @@ void default_machine_kexec(struct kimage *image)
287 if (crashing_cpu == -1) 304 if (crashing_cpu == -1)
288 kexec_prepare_cpus(); 305 kexec_prepare_cpus();
289 306
307 pr_debug("kexec: Starting switchover sequence.\n");
308
290 /* switch to a staticly allocated stack. Based on irq stack code. 309 /* switch to a staticly allocated stack. Based on irq stack code.
291 * XXX: the task struct will likely be invalid once we do the copy! 310 * XXX: the task struct will likely be invalid once we do the copy!
292 */ 311 */