diff options
author | Mark Brown <broonie@linaro.org> | 2014-04-29 13:01:28 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-04-29 13:01:28 -0400 |
commit | 3e93457b45a1a8c69227ce596ee2005fa06f20dd (patch) | |
tree | 248c27e432533b1af80a9b2240eaa8e48e3b87cc /arch/x86/kernel | |
parent | 290414499cf94284a97cc3c33214d13ccfcd896a (diff) | |
parent | c42ba72ec3a7a1b6aa30122931f1f4b91b601c31 (diff) |
Merge tag 'ib-mfd-regulator-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd into regulator-tps65090
Immutable branch between MFD and Regulator due for v3.16 merge-window.
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_rapl.c | 45 | ||||
-rw-r--r-- | arch/x86/kernel/early-quirks.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 72 |
6 files changed, 94 insertions, 63 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index eeee23ff75ef..68317c80de7f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -598,7 +598,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
598 | { | 598 | { |
599 | struct mce m; | 599 | struct mce m; |
600 | int i; | 600 | int i; |
601 | unsigned long *v; | ||
602 | 601 | ||
603 | this_cpu_inc(mce_poll_count); | 602 | this_cpu_inc(mce_poll_count); |
604 | 603 | ||
@@ -618,8 +617,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
618 | if (!(m.status & MCI_STATUS_VAL)) | 617 | if (!(m.status & MCI_STATUS_VAL)) |
619 | continue; | 618 | continue; |
620 | 619 | ||
621 | v = &get_cpu_var(mce_polled_error); | 620 | this_cpu_write(mce_polled_error, 1); |
622 | set_bit(0, v); | ||
623 | /* | 621 | /* |
624 | * Uncorrected or signalled events are handled by the exception | 622 | * Uncorrected or signalled events are handled by the exception |
625 | * handler when it is enabled, so don't process those here. | 623 | * handler when it is enabled, so don't process those here. |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..9a316b21df8b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | |||
42 | * cmci_discover_lock protects against parallel discovery attempts | 42 | * cmci_discover_lock protects against parallel discovery attempts |
43 | * which could race against each other. | 43 | * which could race against each other. |
44 | */ | 44 | */ |
45 | static DEFINE_RAW_SPINLOCK(cmci_discover_lock); | 45 | static DEFINE_SPINLOCK(cmci_discover_lock); |
46 | 46 | ||
47 | #define CMCI_THRESHOLD 1 | 47 | #define CMCI_THRESHOLD 1 |
48 | #define CMCI_POLL_INTERVAL (30 * HZ) | 48 | #define CMCI_POLL_INTERVAL (30 * HZ) |
@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void) | |||
144 | int bank; | 144 | int bank; |
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = __get_cpu_var(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
152 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 152 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
153 | } | 153 | } |
154 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 154 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
155 | } | 155 | } |
156 | 156 | ||
157 | static bool cmci_storm_detect(void) | 157 | static bool cmci_storm_detect(void) |
@@ -211,7 +211,7 @@ static void cmci_discover(int banks) | |||
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
213 | 213 | ||
214 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 214 | spin_lock_irqsave(&cmci_discover_lock, flags); |
215 | for (i = 0; i < banks; i++) { | 215 | for (i = 0; i < banks; i++) { |
216 | u64 val; | 216 | u64 val; |
217 | int bios_zero_thresh = 0; | 217 | int bios_zero_thresh = 0; |
@@ -266,7 +266,7 @@ static void cmci_discover(int banks) | |||
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
270 | if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { | 270 | if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { |
271 | pr_info_once( | 271 | pr_info_once( |
272 | "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); | 272 | "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); |
@@ -316,10 +316,10 @@ void cmci_clear(void) | |||
316 | 316 | ||
317 | if (!cmci_supported(&banks)) | 317 | if (!cmci_supported(&banks)) |
318 | return; | 318 | return; |
319 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 319 | spin_lock_irqsave(&cmci_discover_lock, flags); |
320 | for (i = 0; i < banks; i++) | 320 | for (i = 0; i < banks; i++) |
321 | __cmci_disable_bank(i); | 321 | __cmci_disable_bank(i); |
322 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 322 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void cmci_rediscover_work_func(void *arg) | 325 | static void cmci_rediscover_work_func(void *arg) |
@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank) | |||
360 | if (!cmci_supported(&banks)) | 360 | if (!cmci_supported(&banks)) |
361 | return; | 361 | return; |
362 | 362 | ||
363 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 363 | spin_lock_irqsave(&cmci_discover_lock, flags); |
364 | __cmci_disable_bank(bank); | 364 | __cmci_disable_bank(bank); |
365 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 365 | spin_unlock_irqrestore(&cmci_discover_lock, flags); |
366 | } | 366 | } |
367 | 367 | ||
368 | static void intel_init_cmci(void) | 368 | static void intel_init_cmci(void) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 059218ed5208..7c87424d4140 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -59,7 +59,7 @@ | |||
59 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ | 59 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ |
60 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ | 60 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ |
61 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ | 61 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ |
62 | #define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ | 62 | #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */ |
63 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ | 63 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ |
64 | 64 | ||
65 | /* Clients have PP0, PKG */ | 65 | /* Clients have PP0, PKG */ |
@@ -72,6 +72,12 @@ | |||
72 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | 72 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
73 | 1<<RAPL_IDX_RAM_NRG_STAT) | 73 | 1<<RAPL_IDX_RAM_NRG_STAT) |
74 | 74 | ||
75 | /* Servers have PP0, PKG, RAM, PP1 */ | ||
76 | #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\ | ||
77 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | ||
78 | 1<<RAPL_IDX_RAM_NRG_STAT|\ | ||
79 | 1<<RAPL_IDX_PP1_NRG_STAT) | ||
80 | |||
75 | /* | 81 | /* |
76 | * event code: LSB 8 bits, passed in attr->config | 82 | * event code: LSB 8 bits, passed in attr->config |
77 | * any other bit is reserved | 83 | * any other bit is reserved |
@@ -425,6 +431,24 @@ static struct attribute *rapl_events_cln_attr[] = { | |||
425 | NULL, | 431 | NULL, |
426 | }; | 432 | }; |
427 | 433 | ||
434 | static struct attribute *rapl_events_hsw_attr[] = { | ||
435 | EVENT_PTR(rapl_cores), | ||
436 | EVENT_PTR(rapl_pkg), | ||
437 | EVENT_PTR(rapl_gpu), | ||
438 | EVENT_PTR(rapl_ram), | ||
439 | |||
440 | EVENT_PTR(rapl_cores_unit), | ||
441 | EVENT_PTR(rapl_pkg_unit), | ||
442 | EVENT_PTR(rapl_gpu_unit), | ||
443 | EVENT_PTR(rapl_ram_unit), | ||
444 | |||
445 | EVENT_PTR(rapl_cores_scale), | ||
446 | EVENT_PTR(rapl_pkg_scale), | ||
447 | EVENT_PTR(rapl_gpu_scale), | ||
448 | EVENT_PTR(rapl_ram_scale), | ||
449 | NULL, | ||
450 | }; | ||
451 | |||
428 | static struct attribute_group rapl_pmu_events_group = { | 452 | static struct attribute_group rapl_pmu_events_group = { |
429 | .name = "events", | 453 | .name = "events", |
430 | .attrs = NULL, /* patched at runtime */ | 454 | .attrs = NULL, /* patched at runtime */ |
@@ -511,6 +535,7 @@ static int rapl_cpu_prepare(int cpu) | |||
511 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | 535 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); |
512 | int phys_id = topology_physical_package_id(cpu); | 536 | int phys_id = topology_physical_package_id(cpu); |
513 | u64 ms; | 537 | u64 ms; |
538 | u64 msr_rapl_power_unit_bits; | ||
514 | 539 | ||
515 | if (pmu) | 540 | if (pmu) |
516 | return 0; | 541 | return 0; |
@@ -518,6 +543,9 @@ static int rapl_cpu_prepare(int cpu) | |||
518 | if (phys_id < 0) | 543 | if (phys_id < 0) |
519 | return -1; | 544 | return -1; |
520 | 545 | ||
546 | if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) | ||
547 | return -1; | ||
548 | |||
521 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | 549 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); |
522 | if (!pmu) | 550 | if (!pmu) |
523 | return -1; | 551 | return -1; |
@@ -531,8 +559,7 @@ static int rapl_cpu_prepare(int cpu) | |||
531 | * | 559 | * |
532 | * we cache in local PMU instance | 560 | * we cache in local PMU instance |
533 | */ | 561 | */ |
534 | rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); | 562 | pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; |
535 | pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; | ||
536 | pmu->pmu = &rapl_pmu_class; | 563 | pmu->pmu = &rapl_pmu_class; |
537 | 564 | ||
538 | /* | 565 | /* |
@@ -631,11 +658,14 @@ static int __init rapl_pmu_init(void) | |||
631 | switch (boot_cpu_data.x86_model) { | 658 | switch (boot_cpu_data.x86_model) { |
632 | case 42: /* Sandy Bridge */ | 659 | case 42: /* Sandy Bridge */ |
633 | case 58: /* Ivy Bridge */ | 660 | case 58: /* Ivy Bridge */ |
634 | case 60: /* Haswell */ | ||
635 | case 69: /* Haswell-Celeron */ | ||
636 | rapl_cntr_mask = RAPL_IDX_CLN; | 661 | rapl_cntr_mask = RAPL_IDX_CLN; |
637 | rapl_pmu_events_group.attrs = rapl_events_cln_attr; | 662 | rapl_pmu_events_group.attrs = rapl_events_cln_attr; |
638 | break; | 663 | break; |
664 | case 60: /* Haswell */ | ||
665 | case 69: /* Haswell-Celeron */ | ||
666 | rapl_cntr_mask = RAPL_IDX_HSW; | ||
667 | rapl_pmu_events_group.attrs = rapl_events_hsw_attr; | ||
668 | break; | ||
639 | case 45: /* Sandy Bridge-EP */ | 669 | case 45: /* Sandy Bridge-EP */ |
640 | case 62: /* IvyTown */ | 670 | case 62: /* IvyTown */ |
641 | rapl_cntr_mask = RAPL_IDX_SRV; | 671 | rapl_cntr_mask = RAPL_IDX_SRV; |
@@ -650,7 +680,9 @@ static int __init rapl_pmu_init(void) | |||
650 | cpu_notifier_register_begin(); | 680 | cpu_notifier_register_begin(); |
651 | 681 | ||
652 | for_each_online_cpu(cpu) { | 682 | for_each_online_cpu(cpu) { |
653 | rapl_cpu_prepare(cpu); | 683 | ret = rapl_cpu_prepare(cpu); |
684 | if (ret) | ||
685 | goto out; | ||
654 | rapl_cpu_init(cpu); | 686 | rapl_cpu_init(cpu); |
655 | } | 687 | } |
656 | 688 | ||
@@ -673,6 +705,7 @@ static int __init rapl_pmu_init(void) | |||
673 | hweight32(rapl_cntr_mask), | 705 | hweight32(rapl_cntr_mask), |
674 | ktime_to_ms(pmu->timer_interval)); | 706 | ktime_to_ms(pmu->timer_interval)); |
675 | 707 | ||
708 | out: | ||
676 | cpu_notifier_register_done(); | 709 | cpu_notifier_register_done(); |
677 | 710 | ||
678 | return 0; | 711 | return 0; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index b0cc3809723d..6e2537c32190 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -240,7 +240,7 @@ static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_s | |||
240 | return base; | 240 | return base; |
241 | } | 241 | } |
242 | 242 | ||
243 | #define KB(x) ((x) * 1024) | 243 | #define KB(x) ((x) * 1024UL) |
244 | #define MB(x) (KB (KB (x))) | 244 | #define MB(x) (KB (KB (x))) |
245 | #define GB(x) (MB (KB (x))) | 245 | #define GB(x) (MB (KB (x))) |
246 | 246 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 79a3f9682871..61b17dc2c277 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -897,9 +897,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
897 | struct kprobe *cur = kprobe_running(); | 897 | struct kprobe *cur = kprobe_running(); |
898 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 898 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
899 | 899 | ||
900 | switch (kcb->kprobe_status) { | 900 | if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { |
901 | case KPROBE_HIT_SS: | 901 | /* This must happen on single-stepping */ |
902 | case KPROBE_REENTER: | 902 | WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && |
903 | kcb->kprobe_status != KPROBE_REENTER); | ||
903 | /* | 904 | /* |
904 | * We are here because the instruction being single | 905 | * We are here because the instruction being single |
905 | * stepped caused a page fault. We reset the current | 906 | * stepped caused a page fault. We reset the current |
@@ -914,9 +915,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
914 | else | 915 | else |
915 | reset_current_kprobe(); | 916 | reset_current_kprobe(); |
916 | preempt_enable_no_resched(); | 917 | preempt_enable_no_resched(); |
917 | break; | 918 | } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE || |
918 | case KPROBE_HIT_ACTIVE: | 919 | kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
919 | case KPROBE_HIT_SSDONE: | ||
920 | /* | 920 | /* |
921 | * We increment the nmissed count for accounting, | 921 | * We increment the nmissed count for accounting, |
922 | * we can also use npre/npostfault count for accounting | 922 | * we can also use npre/npostfault count for accounting |
@@ -945,10 +945,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
945 | * fixup routine could not handle it, | 945 | * fixup routine could not handle it, |
946 | * Let do_page_fault() fix it. | 946 | * Let do_page_fault() fix it. |
947 | */ | 947 | */ |
948 | break; | ||
949 | default: | ||
950 | break; | ||
951 | } | 948 | } |
949 | |||
952 | return 0; | 950 | return 0; |
953 | } | 951 | } |
954 | 952 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 654b46574b91..3399d3a99730 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -114,8 +114,8 @@ EXPORT_SYMBOL(machine_real_restart); | |||
114 | */ | 114 | */ |
115 | static int __init set_pci_reboot(const struct dmi_system_id *d) | 115 | static int __init set_pci_reboot(const struct dmi_system_id *d) |
116 | { | 116 | { |
117 | if (reboot_type != BOOT_CF9) { | 117 | if (reboot_type != BOOT_CF9_FORCE) { |
118 | reboot_type = BOOT_CF9; | 118 | reboot_type = BOOT_CF9_FORCE; |
119 | pr_info("%s series board detected. Selecting %s-method for reboots.\n", | 119 | pr_info("%s series board detected. Selecting %s-method for reboots.\n", |
120 | d->ident, "PCI"); | 120 | d->ident, "PCI"); |
121 | } | 121 | } |
@@ -458,20 +458,23 @@ void __attribute__((weak)) mach_reboot_fixups(void) | |||
458 | } | 458 | } |
459 | 459 | ||
460 | /* | 460 | /* |
461 | * Windows compatible x86 hardware expects the following on reboot: | 461 | * To the best of our knowledge Windows compatible x86 hardware expects |
462 | * the following on reboot: | ||
462 | * | 463 | * |
463 | * 1) If the FADT has the ACPI reboot register flag set, try it | 464 | * 1) If the FADT has the ACPI reboot register flag set, try it |
464 | * 2) If still alive, write to the keyboard controller | 465 | * 2) If still alive, write to the keyboard controller |
465 | * 3) If still alive, write to the ACPI reboot register again | 466 | * 3) If still alive, write to the ACPI reboot register again |
466 | * 4) If still alive, write to the keyboard controller again | 467 | * 4) If still alive, write to the keyboard controller again |
467 | * 5) If still alive, call the EFI runtime service to reboot | 468 | * 5) If still alive, call the EFI runtime service to reboot |
468 | * 6) If still alive, write to the PCI IO port 0xCF9 to reboot | 469 | * 6) If no EFI runtime service, call the BIOS to do a reboot |
469 | * 7) If still alive, inform BIOS to do a proper reboot | ||
470 | * | 470 | * |
471 | * If the machine is still alive at this stage, it gives up. We default to | 471 | * We default to following the same pattern. We also have |
472 | * following the same pattern, except that if we're still alive after (7) we'll | 472 | * two other reboot methods: 'triple fault' and 'PCI', which |
473 | * try to force a triple fault and then cycle between hitting the keyboard | 473 | * can be triggered via the reboot= kernel boot option or |
474 | * controller and doing that | 474 | * via quirks. |
475 | * | ||
476 | * This means that this function can never return, it can misbehave | ||
477 | * by not rebooting properly and hanging. | ||
475 | */ | 478 | */ |
476 | static void native_machine_emergency_restart(void) | 479 | static void native_machine_emergency_restart(void) |
477 | { | 480 | { |
@@ -492,6 +495,11 @@ static void native_machine_emergency_restart(void) | |||
492 | for (;;) { | 495 | for (;;) { |
493 | /* Could also try the reset bit in the Hammer NB */ | 496 | /* Could also try the reset bit in the Hammer NB */ |
494 | switch (reboot_type) { | 497 | switch (reboot_type) { |
498 | case BOOT_ACPI: | ||
499 | acpi_reboot(); | ||
500 | reboot_type = BOOT_KBD; | ||
501 | break; | ||
502 | |||
495 | case BOOT_KBD: | 503 | case BOOT_KBD: |
496 | mach_reboot_fixups(); /* For board specific fixups */ | 504 | mach_reboot_fixups(); /* For board specific fixups */ |
497 | 505 | ||
@@ -509,43 +517,29 @@ static void native_machine_emergency_restart(void) | |||
509 | } | 517 | } |
510 | break; | 518 | break; |
511 | 519 | ||
512 | case BOOT_TRIPLE: | ||
513 | load_idt(&no_idt); | ||
514 | __asm__ __volatile__("int3"); | ||
515 | |||
516 | /* We're probably dead after this, but... */ | ||
517 | reboot_type = BOOT_KBD; | ||
518 | break; | ||
519 | |||
520 | case BOOT_BIOS: | ||
521 | machine_real_restart(MRR_BIOS); | ||
522 | |||
523 | /* We're probably dead after this, but... */ | ||
524 | reboot_type = BOOT_TRIPLE; | ||
525 | break; | ||
526 | |||
527 | case BOOT_ACPI: | ||
528 | acpi_reboot(); | ||
529 | reboot_type = BOOT_KBD; | ||
530 | break; | ||
531 | |||
532 | case BOOT_EFI: | 520 | case BOOT_EFI: |
533 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 521 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
534 | efi.reset_system(reboot_mode == REBOOT_WARM ? | 522 | efi.reset_system(reboot_mode == REBOOT_WARM ? |
535 | EFI_RESET_WARM : | 523 | EFI_RESET_WARM : |
536 | EFI_RESET_COLD, | 524 | EFI_RESET_COLD, |
537 | EFI_SUCCESS, 0, NULL); | 525 | EFI_SUCCESS, 0, NULL); |
538 | reboot_type = BOOT_CF9_COND; | 526 | reboot_type = BOOT_BIOS; |
527 | break; | ||
528 | |||
529 | case BOOT_BIOS: | ||
530 | machine_real_restart(MRR_BIOS); | ||
531 | |||
532 | /* We're probably dead after this, but... */ | ||
533 | reboot_type = BOOT_CF9_SAFE; | ||
539 | break; | 534 | break; |
540 | 535 | ||
541 | case BOOT_CF9: | 536 | case BOOT_CF9_FORCE: |
542 | port_cf9_safe = true; | 537 | port_cf9_safe = true; |
543 | /* Fall through */ | 538 | /* Fall through */ |
544 | 539 | ||
545 | case BOOT_CF9_COND: | 540 | case BOOT_CF9_SAFE: |
546 | if (port_cf9_safe) { | 541 | if (port_cf9_safe) { |
547 | u8 reboot_code = reboot_mode == REBOOT_WARM ? | 542 | u8 reboot_code = reboot_mode == REBOOT_WARM ? 0x06 : 0x0E; |
548 | 0x06 : 0x0E; | ||
549 | u8 cf9 = inb(0xcf9) & ~reboot_code; | 543 | u8 cf9 = inb(0xcf9) & ~reboot_code; |
550 | outb(cf9|2, 0xcf9); /* Request hard reset */ | 544 | outb(cf9|2, 0xcf9); /* Request hard reset */ |
551 | udelay(50); | 545 | udelay(50); |
@@ -553,7 +547,15 @@ static void native_machine_emergency_restart(void) | |||
553 | outb(cf9|reboot_code, 0xcf9); | 547 | outb(cf9|reboot_code, 0xcf9); |
554 | udelay(50); | 548 | udelay(50); |
555 | } | 549 | } |
556 | reboot_type = BOOT_BIOS; | 550 | reboot_type = BOOT_TRIPLE; |
551 | break; | ||
552 | |||
553 | case BOOT_TRIPLE: | ||
554 | load_idt(&no_idt); | ||
555 | __asm__ __volatile__("int3"); | ||
556 | |||
557 | /* We're probably dead after this, but... */ | ||
558 | reboot_type = BOOT_KBD; | ||
557 | break; | 559 | break; |
558 | } | 560 | } |
559 | } | 561 | } |