diff options
Diffstat (limited to 'arch/x86/kernel/reboot.c')
-rw-r--r-- | arch/x86/kernel/reboot.c | 73 |
1 files changed, 64 insertions, 9 deletions
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 61f718df6eec..2b46eb41643b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <asm/proto.h> | 12 | #include <asm/proto.h> |
13 | #include <asm/reboot_fixups.h> | 13 | #include <asm/reboot_fixups.h> |
14 | #include <asm/reboot.h> | 14 | #include <asm/reboot.h> |
15 | #include <asm/pci_x86.h> | ||
16 | #include <asm/virtext.h> | ||
15 | 17 | ||
16 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
17 | # include <linux/dmi.h> | 19 | # include <linux/dmi.h> |
@@ -23,7 +25,6 @@ | |||
23 | 25 | ||
24 | #include <mach_ipi.h> | 26 | #include <mach_ipi.h> |
25 | 27 | ||
26 | |||
27 | /* | 28 | /* |
28 | * Power off function, if any | 29 | * Power off function, if any |
29 | */ | 30 | */ |
@@ -39,6 +40,12 @@ int reboot_force; | |||
39 | static int reboot_cpu = -1; | 40 | static int reboot_cpu = -1; |
40 | #endif | 41 | #endif |
41 | 42 | ||
43 | /* This is set if we need to go through the 'emergency' path. | ||
44 | * When machine_emergency_restart() is called, we may be on | ||
45 | * an inconsistent state and won't be able to do a clean cleanup | ||
46 | */ | ||
47 | static int reboot_emergency; | ||
48 | |||
42 | /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ | 49 | /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ |
43 | bool port_cf9_safe = false; | 50 | bool port_cf9_safe = false; |
44 | 51 | ||
@@ -368,6 +375,48 @@ static inline void kb_wait(void) | |||
368 | } | 375 | } |
369 | } | 376 | } |
370 | 377 | ||
378 | static void vmxoff_nmi(int cpu, struct die_args *args) | ||
379 | { | ||
380 | cpu_emergency_vmxoff(); | ||
381 | } | ||
382 | |||
383 | /* Use NMIs as IPIs to tell all CPUs to disable virtualization | ||
384 | */ | ||
385 | static void emergency_vmx_disable_all(void) | ||
386 | { | ||
387 | /* Just make sure we won't change CPUs while doing this */ | ||
388 | local_irq_disable(); | ||
389 | |||
390 | /* We need to disable VMX on all CPUs before rebooting, otherwise | ||
391 | * we risk hanging up the machine, because the CPU ignore INIT | ||
392 | * signals when VMX is enabled. | ||
393 | * | ||
394 | * We can't take any locks and we may be on an inconsistent | ||
395 | * state, so we use NMIs as IPIs to tell the other CPUs to disable | ||
396 | * VMX and halt. | ||
397 | * | ||
398 | * For safety, we will avoid running the nmi_shootdown_cpus() | ||
399 | * stuff unnecessarily, but we don't have a way to check | ||
400 | * if other CPUs have VMX enabled. So we will call it only if the | ||
401 | * CPU we are running on has VMX enabled. | ||
402 | * | ||
403 | * We will miss cases where VMX is not enabled on all CPUs. This | ||
404 | * shouldn't do much harm because KVM always enable VMX on all | ||
405 | * CPUs anyway. But we can miss it on the small window where KVM | ||
406 | * is still enabling VMX. | ||
407 | */ | ||
408 | if (cpu_has_vmx() && cpu_vmx_enabled()) { | ||
409 | /* Disable VMX on this CPU. | ||
410 | */ | ||
411 | cpu_vmxoff(); | ||
412 | |||
413 | /* Halt and disable VMX on the other CPUs */ | ||
414 | nmi_shootdown_cpus(vmxoff_nmi); | ||
415 | |||
416 | } | ||
417 | } | ||
418 | |||
419 | |||
371 | void __attribute__((weak)) mach_reboot_fixups(void) | 420 | void __attribute__((weak)) mach_reboot_fixups(void) |
372 | { | 421 | { |
373 | } | 422 | } |
@@ -376,6 +425,9 @@ static void native_machine_emergency_restart(void) | |||
376 | { | 425 | { |
377 | int i; | 426 | int i; |
378 | 427 | ||
428 | if (reboot_emergency) | ||
429 | emergency_vmx_disable_all(); | ||
430 | |||
379 | /* Tell the BIOS if we want cold or warm reboot */ | 431 | /* Tell the BIOS if we want cold or warm reboot */ |
380 | *((unsigned short *)__va(0x472)) = reboot_mode; | 432 | *((unsigned short *)__va(0x472)) = reboot_mode; |
381 | 433 | ||
@@ -449,7 +501,7 @@ void native_machine_shutdown(void) | |||
449 | 501 | ||
450 | #ifdef CONFIG_X86_32 | 502 | #ifdef CONFIG_X86_32 |
451 | /* See if there has been given a command line override */ | 503 | /* See if there has been given a command line override */ |
452 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 504 | if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && |
453 | cpu_online(reboot_cpu)) | 505 | cpu_online(reboot_cpu)) |
454 | reboot_cpu_id = reboot_cpu; | 506 | reboot_cpu_id = reboot_cpu; |
455 | #endif | 507 | #endif |
@@ -459,7 +511,7 @@ void native_machine_shutdown(void) | |||
459 | reboot_cpu_id = smp_processor_id(); | 511 | reboot_cpu_id = smp_processor_id(); |
460 | 512 | ||
461 | /* Make certain I only run on the appropriate processor */ | 513 | /* Make certain I only run on the appropriate processor */ |
462 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); | 514 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); |
463 | 515 | ||
464 | /* O.K Now that I'm on the appropriate processor, | 516 | /* O.K Now that I'm on the appropriate processor, |
465 | * stop all of the others. | 517 | * stop all of the others. |
@@ -482,13 +534,19 @@ void native_machine_shutdown(void) | |||
482 | #endif | 534 | #endif |
483 | } | 535 | } |
484 | 536 | ||
537 | static void __machine_emergency_restart(int emergency) | ||
538 | { | ||
539 | reboot_emergency = emergency; | ||
540 | machine_ops.emergency_restart(); | ||
541 | } | ||
542 | |||
485 | static void native_machine_restart(char *__unused) | 543 | static void native_machine_restart(char *__unused) |
486 | { | 544 | { |
487 | printk("machine restart\n"); | 545 | printk("machine restart\n"); |
488 | 546 | ||
489 | if (!reboot_force) | 547 | if (!reboot_force) |
490 | machine_shutdown(); | 548 | machine_shutdown(); |
491 | machine_emergency_restart(); | 549 | __machine_emergency_restart(0); |
492 | } | 550 | } |
493 | 551 | ||
494 | static void native_machine_halt(void) | 552 | static void native_machine_halt(void) |
@@ -532,7 +590,7 @@ void machine_shutdown(void) | |||
532 | 590 | ||
533 | void machine_emergency_restart(void) | 591 | void machine_emergency_restart(void) |
534 | { | 592 | { |
535 | machine_ops.emergency_restart(); | 593 | __machine_emergency_restart(1); |
536 | } | 594 | } |
537 | 595 | ||
538 | void machine_restart(char *cmd) | 596 | void machine_restart(char *cmd) |
@@ -592,10 +650,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
592 | 650 | ||
593 | static void smp_send_nmi_allbutself(void) | 651 | static void smp_send_nmi_allbutself(void) |
594 | { | 652 | { |
595 | cpumask_t mask = cpu_online_map; | 653 | send_IPI_allbutself(NMI_VECTOR); |
596 | cpu_clear(safe_smp_processor_id(), mask); | ||
597 | if (!cpus_empty(mask)) | ||
598 | send_IPI_mask(mask, NMI_VECTOR); | ||
599 | } | 654 | } |
600 | 655 | ||
601 | static struct notifier_block crash_nmi_nb = { | 656 | static struct notifier_block crash_nmi_nb = { |