aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smp.c
diff options
context:
space:
mode:
authorDon Zickus <dzickus@redhat.com>2012-05-11 14:41:13 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-14 05:49:37 -0400
commit5d2b86d90f7cc4a41316cef3d41560da6141f45c (patch)
tree2cc9cc6c83d432e5e805b82d682d320fd264195a /arch/x86/kernel/smp.c
parent144d102b926f887d3d9f909b69a5c4f504ae0d40 (diff)
Revert "x86, reboot: Use NMI instead of REBOOT_VECTOR to stop cpus"
This reverts commit 3603a2512f9e69dc87914ba922eb4a0812b21cd6. Originally I wanted a better hammer to shutdown cpus during panic. However, this really steps on the toes of various spinlocks in the panic path. Sometimes it is easier to wait for the IRQ to become re-enabled to indictate the cpu left the critical region and then shutdown the cpu. The next patch moves the NMI addition after the IRQ part. To make it easier to see the logic of everything, revert this patch and apply the next simpler patch. Signed-off-by: Don Zickus <dzickus@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1336761675-24296-2-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/smp.c')
-rw-r--r--arch/x86/kernel/smp.c59
1 files changed, 2 insertions, 57 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 66c74f481cab..6d20f523bc4e 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -29,7 +29,6 @@
29#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/apic.h> 31#include <asm/apic.h>
32#include <asm/nmi.h>
33/* 32/*
34 * Some notes on x86 processor bugs affecting SMP operation: 33 * Some notes on x86 processor bugs affecting SMP operation:
35 * 34 *
@@ -149,60 +148,6 @@ void native_send_call_func_ipi(const struct cpumask *mask)
149 free_cpumask_var(allbutself); 148 free_cpumask_var(allbutself);
150} 149}
151 150
152static atomic_t stopping_cpu = ATOMIC_INIT(-1);
153
154static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
155{
156 /* We are registered on stopping cpu too, avoid spurious NMI */
157 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
158 return NMI_HANDLED;
159
160 stop_this_cpu(NULL);
161
162 return NMI_HANDLED;
163}
164
165static void native_nmi_stop_other_cpus(int wait)
166{
167 unsigned long flags;
168 unsigned long timeout;
169
170 if (reboot_force)
171 return;
172
173 /*
174 * Use an own vector here because smp_call_function
175 * does lots of things not suitable in a panic situation.
176 */
177 if (num_online_cpus() > 1) {
178 /* did someone beat us here? */
179 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
180 return;
181
182 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
183 NMI_FLAG_FIRST, "smp_stop"))
184 /* Note: we ignore failures here */
185 return;
186
187 /* sync above data before sending NMI */
188 wmb();
189
190 apic->send_IPI_allbutself(NMI_VECTOR);
191
192 /*
193 * Don't wait longer than a second if the caller
194 * didn't ask us to wait.
195 */
196 timeout = USEC_PER_SEC;
197 while (num_online_cpus() > 1 && (wait || timeout--))
198 udelay(1);
199 }
200
201 local_irq_save(flags);
202 disable_local_APIC();
203 local_irq_restore(flags);
204}
205
206/* 151/*
207 * this function calls the 'stop' function on all other CPUs in the system. 152 * this function calls the 'stop' function on all other CPUs in the system.
208 */ 153 */
@@ -215,7 +160,7 @@ asmlinkage void smp_reboot_interrupt(void)
215 irq_exit(); 160 irq_exit();
216} 161}
217 162
218static void native_irq_stop_other_cpus(int wait) 163static void native_stop_other_cpus(int wait)
219{ 164{
220 unsigned long flags; 165 unsigned long flags;
221 unsigned long timeout; 166 unsigned long timeout;
@@ -298,7 +243,7 @@ struct smp_ops smp_ops = {
298 .smp_prepare_cpus = native_smp_prepare_cpus, 243 .smp_prepare_cpus = native_smp_prepare_cpus,
299 .smp_cpus_done = native_smp_cpus_done, 244 .smp_cpus_done = native_smp_cpus_done,
300 245
301 .stop_other_cpus = native_nmi_stop_other_cpus, 246 .stop_other_cpus = native_stop_other_cpus,
302 .smp_send_reschedule = native_smp_send_reschedule, 247 .smp_send_reschedule = native_smp_send_reschedule,
303 248
304 .cpu_up = native_cpu_up, 249 .cpu_up = native_cpu_up,