aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAlok Kataria <akataria@vmware.com>2010-10-11 17:37:08 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-10-21 16:30:44 -0400
commit76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce (patch)
tree7195772847cfaaafe5839279138fd9a3204028d3 /arch/x86
parent03f1a17cd5c69deccd3cfe1b954b9426d7a686e3 (diff)
x86, kexec: Make sure to stop all CPUs before exiting the kernel
x86 smp_ops now has a new op, stop_other_cpus which takes a parameter "wait" this allows the caller to specify if it wants to stop until all the cpus have processed the stop IPI. This is required specifically for the kexec case where we should wait for all the cpus to be stopped before starting the new kernel. We now wait for the cpus to stop in all cases except for panic/kdump where we expect things to be broken and we are doing our best to make things work anyway. This patch fixes a legitimate regression, which was introduced during 2.6.30, by commit id 4ef702c10b5df18ab04921fc252c26421d4d6c75. Signed-off-by: Alok N Kataria <akataria@vmware.com> LKML-Reference: <1286833028.1372.20.camel@ank32.eng.vmware.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: <stable@kernel.org> v2.6.30-36 Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/smp.c6
5 files changed, 21 insertions, 13 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..4c2f63c7fc1b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
50 void (*smp_prepare_cpus)(unsigned max_cpus); 50 void (*smp_prepare_cpus)(unsigned max_cpus);
51 void (*smp_cpus_done)(unsigned max_cpus); 51 void (*smp_cpus_done)(unsigned max_cpus);
52 52
53 void (*smp_send_stop)(void); 53 void (*stop_other_cpus)(int wait);
54 void (*smp_send_reschedule)(int cpu); 54 void (*smp_send_reschedule)(int cpu);
55 55
56 int (*cpu_up)(unsigned cpu); 56 int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
73 73
74static inline void smp_send_stop(void) 74static inline void smp_send_stop(void)
75{ 75{
76 smp_ops.smp_send_stop(); 76 smp_ops.stop_other_cpus(0);
77}
78
79static inline void stop_other_cpus(void)
80{
81 smp_ops.stop_other_cpus(1);
77} 82}
78 83
79static inline void smp_prepare_boot_cpu(void) 84static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e3af342fe83a..76a0d715a031 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -641,7 +641,7 @@ void native_machine_shutdown(void)
641 /* O.K Now that I'm on the appropriate processor, 641 /* O.K Now that I'm on the appropriate processor,
642 * stop all of the others. 642 * stop all of the others.
643 */ 643 */
644 smp_send_stop(); 644 stop_other_cpus();
645#endif 645#endif
646 646
647 lapic_shutdown(); 647 lapic_shutdown();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210945d6..513deac7228d 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
159 irq_exit(); 159 irq_exit();
160} 160}
161 161
162static void native_smp_send_stop(void) 162static void native_stop_other_cpus(int wait)
163{ 163{
164 unsigned long flags; 164 unsigned long flags;
165 unsigned long wait; 165 unsigned long timeout;
166 166
167 if (reboot_force) 167 if (reboot_force)
168 return; 168 return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
179 if (num_online_cpus() > 1) { 179 if (num_online_cpus() > 1) {
180 apic->send_IPI_allbutself(REBOOT_VECTOR); 180 apic->send_IPI_allbutself(REBOOT_VECTOR);
181 181
182 /* Don't wait longer than a second */ 182 /*
183 wait = USEC_PER_SEC; 183 * Don't wait longer than a second if the caller
184 while (num_online_cpus() > 1 && wait--) 184 * didn't ask us to wait.
185 */
186 timeout = USEC_PER_SEC;
187 while (num_online_cpus() > 1 && (wait || timeout--))
185 udelay(1); 188 udelay(1);
186 } 189 }
187 190
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
227 .smp_prepare_cpus = native_smp_prepare_cpus, 230 .smp_prepare_cpus = native_smp_prepare_cpus,
228 .smp_cpus_done = native_smp_cpus_done, 231 .smp_cpus_done = native_smp_cpus_done,
229 232
230 .smp_send_stop = native_smp_send_stop, 233 .stop_other_cpus = native_stop_other_cpus,
231 .smp_send_reschedule = native_smp_send_reschedule, 234 .smp_send_reschedule = native_smp_send_reschedule,
232 235
233 .cpu_up = native_cpu_up, 236 .cpu_up = native_cpu_up,
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7d46c8441418..44f80861382f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1018,7 +1018,7 @@ static void xen_reboot(int reason)
1018 struct sched_shutdown r = { .reason = reason }; 1018 struct sched_shutdown r = { .reason = reason };
1019 1019
1020#ifdef CONFIG_SMP 1020#ifdef CONFIG_SMP
1021 smp_send_stop(); 1021 stop_other_cpus();
1022#endif 1022#endif
1023 1023
1024 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1024 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a82..f4d010031465 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -400,9 +400,9 @@ static void stop_self(void *v)
400 BUG(); 400 BUG();
401} 401}
402 402
403static void xen_smp_send_stop(void) 403static void xen_stop_other_cpus(int wait)
404{ 404{
405 smp_call_function(stop_self, NULL, 0); 405 smp_call_function(stop_self, NULL, wait);
406} 406}
407 407
408static void xen_smp_send_reschedule(int cpu) 408static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
470 .cpu_disable = xen_cpu_disable, 470 .cpu_disable = xen_cpu_disable,
471 .play_dead = xen_play_dead, 471 .play_dead = xen_play_dead,
472 472
473 .smp_send_stop = xen_smp_send_stop, 473 .stop_other_cpus = xen_stop_other_cpus,
474 .smp_send_reschedule = xen_smp_send_reschedule, 474 .smp_send_reschedule = xen_smp_send_reschedule,
475 475
476 .send_call_func_ipi = xen_smp_send_call_function_ipi, 476 .send_call_func_ipi = xen_smp_send_call_function_ipi,