diff options
Diffstat (limited to 'arch/x86/kernel/smp.c')
-rw-r--r-- | arch/x86/kernel/smp.c | 100 |
1 files changed, 46 insertions, 54 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 66c74f481cab..48d2b7ded422 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -109,6 +109,9 @@ | |||
109 | * about nothing of note with C stepping upwards. | 109 | * about nothing of note with C stepping upwards. |
110 | */ | 110 | */ |
111 | 111 | ||
112 | static atomic_t stopping_cpu = ATOMIC_INIT(-1); | ||
113 | static bool smp_no_nmi_ipi = false; | ||
114 | |||
112 | /* | 115 | /* |
113 | * this function sends a 'reschedule' IPI to another CPU. | 116 | * this function sends a 'reschedule' IPI to another CPU. |
114 | * it goes straight through and wastes no time serializing | 117 | * it goes straight through and wastes no time serializing |
@@ -149,8 +152,6 @@ void native_send_call_func_ipi(const struct cpumask *mask) | |||
149 | free_cpumask_var(allbutself); | 152 | free_cpumask_var(allbutself); |
150 | } | 153 | } |
151 | 154 | ||
152 | static atomic_t stopping_cpu = ATOMIC_INIT(-1); | ||
153 | |||
154 | static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | 155 | static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) |
155 | { | 156 | { |
156 | /* We are registered on stopping cpu too, avoid spurious NMI */ | 157 | /* We are registered on stopping cpu too, avoid spurious NMI */ |
@@ -162,7 +163,19 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | |||
162 | return NMI_HANDLED; | 163 | return NMI_HANDLED; |
163 | } | 164 | } |
164 | 165 | ||
165 | static void native_nmi_stop_other_cpus(int wait) | 166 | /* |
167 | * this function calls the 'stop' function on all other CPUs in the system. | ||
168 | */ | ||
169 | |||
170 | asmlinkage void smp_reboot_interrupt(void) | ||
171 | { | ||
172 | ack_APIC_irq(); | ||
173 | irq_enter(); | ||
174 | stop_this_cpu(NULL); | ||
175 | irq_exit(); | ||
176 | } | ||
177 | |||
178 | static void native_stop_other_cpus(int wait) | ||
166 | { | 179 | { |
167 | unsigned long flags; | 180 | unsigned long flags; |
168 | unsigned long timeout; | 181 | unsigned long timeout; |
@@ -174,20 +187,25 @@ static void native_nmi_stop_other_cpus(int wait) | |||
174 | * Use an own vector here because smp_call_function | 187 | * Use an own vector here because smp_call_function |
175 | * does lots of things not suitable in a panic situation. | 188 | * does lots of things not suitable in a panic situation. |
176 | */ | 189 | */ |
190 | |||
191 | /* | ||
192 | * We start by using the REBOOT_VECTOR irq. | ||
193 | * The irq is treated as a sync point to allow critical | ||
194 | * regions of code on other cpus to release their spin locks | ||
195 | * and re-enable irqs. Jumping straight to an NMI might | ||
196 | * accidentally cause deadlocks with further shutdown/panic | ||
197 | * code. By syncing, we give the cpus up to one second to | ||
198 | * finish their work before we force them off with the NMI. | ||
199 | */ | ||
177 | if (num_online_cpus() > 1) { | 200 | if (num_online_cpus() > 1) { |
178 | /* did someone beat us here? */ | 201 | /* did someone beat us here? */ |
179 | if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) | 202 | if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) |
180 | return; | 203 | return; |
181 | 204 | ||
182 | if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, | 205 | /* sync above data before sending IRQ */ |
183 | NMI_FLAG_FIRST, "smp_stop")) | ||
184 | /* Note: we ignore failures here */ | ||
185 | return; | ||
186 | |||
187 | /* sync above data before sending NMI */ | ||
188 | wmb(); | 206 | wmb(); |
189 | 207 | ||
190 | apic->send_IPI_allbutself(NMI_VECTOR); | 208 | apic->send_IPI_allbutself(REBOOT_VECTOR); |
191 | 209 | ||
192 | /* | 210 | /* |
193 | * Don't wait longer than a second if the caller | 211 | * Don't wait longer than a second if the caller |
@@ -197,63 +215,37 @@ static void native_nmi_stop_other_cpus(int wait) | |||
197 | while (num_online_cpus() > 1 && (wait || timeout--)) | 215 | while (num_online_cpus() > 1 && (wait || timeout--)) |
198 | udelay(1); | 216 | udelay(1); |
199 | } | 217 | } |
218 | |||
219 | /* if the REBOOT_VECTOR didn't work, try with the NMI */ | ||
220 | if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { | ||
221 | if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, | ||
222 | NMI_FLAG_FIRST, "smp_stop")) | ||
223 | /* Note: we ignore failures here */ | ||
224 | /* Hope the REBOOT_IRQ is good enough */ | ||
225 | goto finish; | ||
200 | 226 | ||
201 | local_irq_save(flags); | 227 | /* sync above data before sending IRQ */ |
202 | disable_local_APIC(); | 228 | wmb(); |
203 | local_irq_restore(flags); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * this function calls the 'stop' function on all other CPUs in the system. | ||
208 | */ | ||
209 | |||
210 | asmlinkage void smp_reboot_interrupt(void) | ||
211 | { | ||
212 | ack_APIC_irq(); | ||
213 | irq_enter(); | ||
214 | stop_this_cpu(NULL); | ||
215 | irq_exit(); | ||
216 | } | ||
217 | |||
218 | static void native_irq_stop_other_cpus(int wait) | ||
219 | { | ||
220 | unsigned long flags; | ||
221 | unsigned long timeout; | ||
222 | 229 | ||
223 | if (reboot_force) | 230 | pr_emerg("Shutting down cpus with NMI\n"); |
224 | return; | ||
225 | 231 | ||
226 | /* | 232 | apic->send_IPI_allbutself(NMI_VECTOR); |
227 | * Use an own vector here because smp_call_function | ||
228 | * does lots of things not suitable in a panic situation. | ||
229 | * On most systems we could also use an NMI here, | ||
230 | * but there are a few systems around where NMI | ||
231 | * is problematic so stay with an non NMI for now | ||
232 | * (this implies we cannot stop CPUs spinning with irq off | ||
233 | * currently) | ||
234 | */ | ||
235 | if (num_online_cpus() > 1) { | ||
236 | apic->send_IPI_allbutself(REBOOT_VECTOR); | ||
237 | 233 | ||
238 | /* | 234 | /* |
239 | * Don't wait longer than a second if the caller | 235 | * Don't wait longer than a 10 ms if the caller |
240 | * didn't ask us to wait. | 236 | * didn't ask us to wait. |
241 | */ | 237 | */ |
242 | timeout = USEC_PER_SEC; | 238 | timeout = USEC_PER_MSEC * 10; |
243 | while (num_online_cpus() > 1 && (wait || timeout--)) | 239 | while (num_online_cpus() > 1 && (wait || timeout--)) |
244 | udelay(1); | 240 | udelay(1); |
245 | } | 241 | } |
246 | 242 | ||
243 | finish: | ||
247 | local_irq_save(flags); | 244 | local_irq_save(flags); |
248 | disable_local_APIC(); | 245 | disable_local_APIC(); |
249 | local_irq_restore(flags); | 246 | local_irq_restore(flags); |
250 | } | 247 | } |
251 | 248 | ||
252 | static void native_smp_disable_nmi_ipi(void) | ||
253 | { | ||
254 | smp_ops.stop_other_cpus = native_irq_stop_other_cpus; | ||
255 | } | ||
256 | |||
257 | /* | 249 | /* |
258 | * Reschedule call back. | 250 | * Reschedule call back. |
259 | */ | 251 | */ |
@@ -287,8 +279,8 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | |||
287 | 279 | ||
288 | static int __init nonmi_ipi_setup(char *str) | 280 | static int __init nonmi_ipi_setup(char *str) |
289 | { | 281 | { |
290 | native_smp_disable_nmi_ipi(); | 282 | smp_no_nmi_ipi = true; |
291 | return 1; | 283 | return 1; |
292 | } | 284 | } |
293 | 285 | ||
294 | __setup("nonmi_ipi", nonmi_ipi_setup); | 286 | __setup("nonmi_ipi", nonmi_ipi_setup); |
@@ -298,7 +290,7 @@ struct smp_ops smp_ops = { | |||
298 | .smp_prepare_cpus = native_smp_prepare_cpus, | 290 | .smp_prepare_cpus = native_smp_prepare_cpus, |
299 | .smp_cpus_done = native_smp_cpus_done, | 291 | .smp_cpus_done = native_smp_cpus_done, |
300 | 292 | ||
301 | .stop_other_cpus = native_nmi_stop_other_cpus, | 293 | .stop_other_cpus = native_stop_other_cpus, |
302 | .smp_send_reschedule = native_smp_send_reschedule, | 294 | .smp_send_reschedule = native_smp_send_reschedule, |
303 | 295 | ||
304 | .cpu_up = native_cpu_up, | 296 | .cpu_up = native_cpu_up, |