diff options
author | Andi Kleen <ak@linux.intel.com> | 2009-04-28 13:07:31 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-05-28 12:24:13 -0400 |
commit | 4efc0670baf4b14bc95502e54a83ccf639146125 (patch) | |
tree | e2a4c61f303701d967b0d3fa9eccb4dcb576c690 /arch/x86 | |
parent | d896a940ef4f12a0a6bc432853b249dcfbacabf0 (diff) |
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 33 | ||||
-rw-r--r-- | arch/x86/include/asm/entry_arch.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/nmi.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.h | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/p5.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 4 |
12 files changed, 92 insertions, 21 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a6efe0a2e9ae..c1c5ccd1937f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -789,6 +789,22 @@ config X86_MCE | |||
789 | to disable it. MCE support simply ignores non-MCE processors like | 789 | to disable it. MCE support simply ignores non-MCE processors like |
790 | the 386 and 486, so nearly everyone can say Y here. | 790 | the 386 and 486, so nearly everyone can say Y here. |
791 | 791 | ||
792 | config X86_OLD_MCE | ||
793 | depends on X86_32 && X86_MCE | ||
794 | bool "Use legacy machine check code (will go away)" | ||
795 | default n | ||
796 | select X86_ANCIENT_MCE | ||
797 | ---help--- | ||
798 | Use the old i386 machine check code. This is merely intended for | ||
799 | testing in a transition period. Try this if you run into any machine | ||
800 | check related software problems, but report the problem to | ||
801 | linux-kernel. When in doubt say no. | ||
802 | |||
803 | config X86_NEW_MCE | ||
804 | depends on X86_MCE | ||
805 | bool | ||
806 | default y if (!X86_OLD_MCE && X86_32) || X86_64 | ||
807 | |||
792 | config X86_MCE_INTEL | 808 | config X86_MCE_INTEL |
793 | def_bool y | 809 | def_bool y |
794 | prompt "Intel MCE features" | 810 | prompt "Intel MCE features" |
@@ -805,6 +821,15 @@ config X86_MCE_AMD | |||
805 | Additional support for AMD specific MCE features such as | 821 | Additional support for AMD specific MCE features such as |
806 | the DRAM Error Threshold. | 822 | the DRAM Error Threshold. |
807 | 823 | ||
824 | config X86_ANCIENT_MCE | ||
825 | def_bool n | ||
826 | depends on X86_32 | ||
827 | prompt "Support for old Pentium 5 / WinChip machine checks" | ||
828 | ---help--- | ||
829 | Include support for machine check handling on old Pentium 5 or WinChip | ||
830 | systems. These typically need to be enabled explicitely on the command | ||
831 | line. | ||
832 | |||
808 | config X86_MCE_THRESHOLD | 833 | config X86_MCE_THRESHOLD |
809 | depends on X86_MCE_AMD || X86_MCE_INTEL | 834 | depends on X86_MCE_AMD || X86_MCE_INTEL |
810 | bool | 835 | bool |
@@ -812,7 +837,7 @@ config X86_MCE_THRESHOLD | |||
812 | 837 | ||
813 | config X86_MCE_NONFATAL | 838 | config X86_MCE_NONFATAL |
814 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" | 839 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" |
815 | depends on X86_32 && X86_MCE | 840 | depends on X86_OLD_MCE |
816 | ---help--- | 841 | ---help--- |
817 | Enabling this feature starts a timer that triggers every 5 seconds which | 842 | Enabling this feature starts a timer that triggers every 5 seconds which |
818 | will look at the machine check registers to see if anything happened. | 843 | will look at the machine check registers to see if anything happened. |
@@ -825,11 +850,15 @@ config X86_MCE_NONFATAL | |||
825 | 850 | ||
826 | config X86_MCE_P4THERMAL | 851 | config X86_MCE_P4THERMAL |
827 | bool "check for P4 thermal throttling interrupt." | 852 | bool "check for P4 thermal throttling interrupt." |
828 | depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP) | 853 | depends on X86_OLD_MCE && X86_MCE && (X86_UP_APIC || SMP) |
829 | ---help--- | 854 | ---help--- |
830 | Enabling this feature will cause a message to be printed when the P4 | 855 | Enabling this feature will cause a message to be printed when the P4 |
831 | enters thermal throttling. | 856 | enters thermal throttling. |
832 | 857 | ||
858 | config X86_THERMAL_VECTOR | ||
859 | def_bool y | ||
860 | depends on X86_MCE_P4THERMAL || X86_MCE_INTEL | ||
861 | |||
833 | config VM86 | 862 | config VM86 |
834 | bool "Enable VM86 support" if EMBEDDED | 863 | bool "Enable VM86 support" if EMBEDDED |
835 | default y | 864 | default y |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index c2e6bedaf258..486c9e946f5c 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -52,7 +52,7 @@ BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | |||
52 | BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) | 52 | BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #ifdef CONFIG_X86_MCE_P4THERMAL | 55 | #ifdef CONFIG_X86_THERMAL_VECTOR |
56 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) | 56 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) |
57 | #endif | 57 | #endif |
58 | 58 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f2870920f246..ad532289ef2e 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -843,7 +843,7 @@ void clear_local_APIC(void) | |||
843 | } | 843 | } |
844 | 844 | ||
845 | /* lets not touch this if we didn't frob it */ | 845 | /* lets not touch this if we didn't frob it */ |
846 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | 846 | #ifdef CONFIG_X86_THERMAL_VECTOR |
847 | if (maxlvt >= 5) { | 847 | if (maxlvt >= 5) { |
848 | v = apic_read(APIC_LVTTHMR); | 848 | v = apic_read(APIC_LVTTHMR); |
849 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 849 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
@@ -1962,7 +1962,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1962 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | 1962 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); |
1963 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | 1963 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); |
1964 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | 1964 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); |
1965 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | 1965 | #ifdef CONFIG_X86_THERMAL_VECTOR |
1966 | if (maxlvt >= 5) | 1966 | if (maxlvt >= 5) |
1967 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | 1967 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); |
1968 | #endif | 1968 | #endif |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index ce4fbfa315a1..c4762276c17e 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -66,7 +66,7 @@ static inline unsigned int get_nmi_count(int cpu) | |||
66 | 66 | ||
67 | static inline int mce_in_progress(void) | 67 | static inline int mce_in_progress(void) |
68 | { | 68 | { |
69 | #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) | 69 | #if defined(CONFIG_X86_NEW_MCE) |
70 | return atomic_read(&mce_entry) > 0; | 70 | return atomic_read(&mce_entry) > 0; |
71 | #endif | 71 | #endif |
72 | return 0; | 72 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index 55f01b39a105..5f8b09425d39 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-y = mce.o therm_throt.o | 1 | obj-y = mce.o therm_throt.o |
2 | 2 | ||
3 | obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | 3 | obj-$(CONFIG_X86_OLD_MCE) += k7.o p4.o p6.o |
4 | obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o | ||
4 | obj-$(CONFIG_X86_MCE_P4THERMAL) += mce_intel.o | 5 | obj-$(CONFIG_X86_MCE_P4THERMAL) += mce_intel.o |
5 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o mce_intel.o | 6 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o mce_intel.o |
6 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 7 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index f4d6841d2bdf..e193de44ef19 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -52,7 +52,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = | |||
52 | 52 | ||
53 | int mce_disabled; | 53 | int mce_disabled; |
54 | 54 | ||
55 | #ifdef CONFIG_X86_64 | 55 | #ifdef CONFIG_X86_NEW_MCE |
56 | 56 | ||
57 | #define MISC_MCELOG_MINOR 227 | 57 | #define MISC_MCELOG_MINOR 227 |
58 | 58 | ||
@@ -662,6 +662,21 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
662 | } | 662 | } |
663 | } | 663 | } |
664 | 664 | ||
665 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) | ||
666 | { | ||
667 | if (c->x86 != 5) | ||
668 | return; | ||
669 | switch (c->x86_vendor) { | ||
670 | case X86_VENDOR_INTEL: | ||
671 | if (mce_p5_enabled()) | ||
672 | intel_p5_mcheck_init(c); | ||
673 | break; | ||
674 | case X86_VENDOR_CENTAUR: | ||
675 | winchip_mcheck_init(c); | ||
676 | break; | ||
677 | } | ||
678 | } | ||
679 | |||
665 | static void mce_cpu_features(struct cpuinfo_x86 *c) | 680 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
666 | { | 681 | { |
667 | switch (c->x86_vendor) { | 682 | switch (c->x86_vendor) { |
@@ -695,6 +710,11 @@ static void mce_init_timer(void) | |||
695 | */ | 710 | */ |
696 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 711 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
697 | { | 712 | { |
713 | if (mce_disabled) | ||
714 | return; | ||
715 | |||
716 | mce_ancient_init(c); | ||
717 | |||
698 | if (!mce_available(c)) | 718 | if (!mce_available(c)) |
699 | return; | 719 | return; |
700 | 720 | ||
@@ -893,6 +913,10 @@ static struct miscdevice mce_log_device = { | |||
893 | */ | 913 | */ |
894 | static int __init mcheck_enable(char *str) | 914 | static int __init mcheck_enable(char *str) |
895 | { | 915 | { |
916 | if (*str == 0) | ||
917 | enable_p5_mce(); | ||
918 | if (*str == '=') | ||
919 | str++; | ||
896 | if (!strcmp(str, "off")) | 920 | if (!strcmp(str, "off")) |
897 | mce_disabled = 1; | 921 | mce_disabled = 1; |
898 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) | 922 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
@@ -900,13 +924,13 @@ static int __init mcheck_enable(char *str) | |||
900 | else if (isdigit(str[0])) | 924 | else if (isdigit(str[0])) |
901 | get_option(&str, &tolerant); | 925 | get_option(&str, &tolerant); |
902 | else { | 926 | else { |
903 | printk(KERN_INFO "mce= argument %s ignored. Please use /sys\n", | 927 | printk(KERN_INFO "mce argument %s ignored. Please use /sys\n", |
904 | str); | 928 | str); |
905 | return 0; | 929 | return 0; |
906 | } | 930 | } |
907 | return 1; | 931 | return 1; |
908 | } | 932 | } |
909 | __setup("mce=", mcheck_enable); | 933 | __setup("mce", mcheck_enable); |
910 | 934 | ||
911 | /* | 935 | /* |
912 | * Sysfs support | 936 | * Sysfs support |
@@ -1259,7 +1283,7 @@ static __init int mce_init_device(void) | |||
1259 | 1283 | ||
1260 | device_initcall(mce_init_device); | 1284 | device_initcall(mce_init_device); |
1261 | 1285 | ||
1262 | #else /* CONFIG_X86_32: */ | 1286 | #else /* CONFIG_X86_OLD_MCE: */ |
1263 | 1287 | ||
1264 | int nr_mce_banks; | 1288 | int nr_mce_banks; |
1265 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | 1289 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h index 966ae3c5cb11..84a552b458c8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.h +++ b/arch/x86/kernel/cpu/mcheck/mce.h | |||
@@ -1,17 +1,29 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <asm/mce.h> | 2 | #include <asm/mce.h> |
3 | 3 | ||
4 | #ifdef CONFIG_X86_OLD_MCE | ||
4 | void amd_mcheck_init(struct cpuinfo_x86 *c); | 5 | void amd_mcheck_init(struct cpuinfo_x86 *c); |
5 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c); | 6 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c); |
6 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c); | ||
7 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c); | 7 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c); |
8 | void winchip_mcheck_init(struct cpuinfo_x86 *c); | 8 | #endif |
9 | 9 | ||
10 | #ifdef CONFIG_X86_ANCIENT_MCE | ||
11 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c); | ||
12 | void winchip_mcheck_init(struct cpuinfo_x86 *c); | ||
13 | extern int mce_p5_enable; | ||
14 | static inline int mce_p5_enabled(void) { return mce_p5_enable; } | ||
15 | static inline void enable_p5_mce(void) { mce_p5_enable = 1; } | ||
16 | #else | ||
17 | static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {} | ||
18 | static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | ||
19 | static inline int mce_p5_enabled(void) { return 0; } | ||
20 | static inline void enable_p5_mce(void) { } | ||
21 | #endif | ||
10 | 22 | ||
11 | /* Call the installed machine check handler for this CPU setup. */ | 23 | /* Call the installed machine check handler for this CPU setup. */ |
12 | extern void (*machine_check_vector)(struct pt_regs *, long error_code); | 24 | extern void (*machine_check_vector)(struct pt_regs *, long error_code); |
13 | 25 | ||
14 | #ifdef CONFIG_X86_32 | 26 | #ifdef CONFIG_X86_OLD_MCE |
15 | 27 | ||
16 | extern int nr_mce_banks; | 28 | extern int nr_mce_banks; |
17 | 29 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 8812f5441830..015f481ab1b0 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
@@ -14,6 +14,9 @@ | |||
14 | 14 | ||
15 | #include "mce.h" | 15 | #include "mce.h" |
16 | 16 | ||
17 | /* By default disabled */ | ||
18 | int mce_p5_enable; | ||
19 | |||
17 | /* Machine check handler for Pentium class Intel CPUs: */ | 20 | /* Machine check handler for Pentium class Intel CPUs: */ |
18 | static void pentium_machine_check(struct pt_regs *regs, long error_code) | 21 | static void pentium_machine_check(struct pt_regs *regs, long error_code) |
19 | { | 22 | { |
@@ -44,9 +47,11 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | |||
44 | if (!cpu_has(c, X86_FEATURE_MCE)) | 47 | if (!cpu_has(c, X86_FEATURE_MCE)) |
45 | return; | 48 | return; |
46 | 49 | ||
50 | #ifdef CONFIG_X86_OLD_MCE | ||
47 | /* Default P5 to off as its often misconnected: */ | 51 | /* Default P5 to off as its often misconnected: */ |
48 | if (mce_disabled != -1) | 52 | if (mce_disabled != -1) |
49 | return; | 53 | return; |
54 | #endif | ||
50 | 55 | ||
51 | machine_check_vector = pentium_machine_check; | 56 | machine_check_vector = pentium_machine_check; |
52 | /* Make sure the vector pointer is visible before we enable MCEs: */ | 57 | /* Make sure the vector pointer is visible before we enable MCEs: */ |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index c3fe010d74c8..35eddc9ec99e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -89,7 +89,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
89 | for_each_online_cpu(j) | 89 | for_each_online_cpu(j) |
90 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); | 90 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); |
91 | seq_printf(p, " Thermal event interrupts\n"); | 91 | seq_printf(p, " Thermal event interrupts\n"); |
92 | # ifdef CONFIG_X86_64 | 92 | # ifdef CONFIG_X86_MCE_THRESHOLD |
93 | seq_printf(p, "%*s: ", prec, "THR"); | 93 | seq_printf(p, "%*s: ", prec, "THR"); |
94 | for_each_online_cpu(j) | 94 | for_each_online_cpu(j) |
95 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); | 95 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); |
@@ -176,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
176 | #endif | 176 | #endif |
177 | #ifdef CONFIG_X86_MCE | 177 | #ifdef CONFIG_X86_MCE |
178 | sum += irq_stats(cpu)->irq_thermal_count; | 178 | sum += irq_stats(cpu)->irq_thermal_count; |
179 | # ifdef CONFIG_X86_64 | 179 | # ifdef CONFIG_X86_MCE_THRESHOLD |
180 | sum += irq_stats(cpu)->irq_threshold_count; | 180 | sum += irq_stats(cpu)->irq_threshold_count; |
181 | #endif | 181 | #endif |
182 | #endif | 182 | #endif |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 368b0a8836f9..98846e03211e 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -181,7 +181,7 @@ void __init native_init_IRQ(void) | |||
181 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 181 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) | 184 | #ifdef CONFIG_X86_THERMAL_VECTOR |
185 | /* thermal monitor LVT interrupt */ | 185 | /* thermal monitor LVT interrupt */ |
186 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 186 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
187 | #endif | 187 | #endif |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 14425166b8e3..d0851e3f77eb 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -25,11 +25,11 @@ | |||
25 | #include <asm/ucontext.h> | 25 | #include <asm/ucontext.h> |
26 | #include <asm/i387.h> | 26 | #include <asm/i387.h> |
27 | #include <asm/vdso.h> | 27 | #include <asm/vdso.h> |
28 | #include <asm/mce.h> | ||
28 | 29 | ||
29 | #ifdef CONFIG_X86_64 | 30 | #ifdef CONFIG_X86_64 |
30 | #include <asm/proto.h> | 31 | #include <asm/proto.h> |
31 | #include <asm/ia32_unistd.h> | 32 | #include <asm/ia32_unistd.h> |
32 | #include <asm/mce.h> | ||
33 | #endif /* CONFIG_X86_64 */ | 33 | #endif /* CONFIG_X86_64 */ |
34 | 34 | ||
35 | #include <asm/syscall.h> | 35 | #include <asm/syscall.h> |
@@ -857,7 +857,7 @@ static void do_signal(struct pt_regs *regs) | |||
857 | void | 857 | void |
858 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | 858 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) |
859 | { | 859 | { |
860 | #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) | 860 | #ifdef CONFIG_X86_NEW_MCE |
861 | /* notify userspace of pending MCEs */ | 861 | /* notify userspace of pending MCEs */ |
862 | if (thread_info_flags & _TIF_MCE_NOTIFY) | 862 | if (thread_info_flags & _TIF_MCE_NOTIFY) |
863 | mce_notify_user(); | 863 | mce_notify_user(); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a1d288327ff0..ad771f15bddd 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -798,7 +798,8 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | |||
798 | 798 | ||
799 | return new_kesp; | 799 | return new_kesp; |
800 | } | 800 | } |
801 | #else | 801 | #endif |
802 | |||
802 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 803 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
803 | { | 804 | { |
804 | } | 805 | } |
@@ -806,7 +807,6 @@ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | |||
806 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | 807 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) |
807 | { | 808 | { |
808 | } | 809 | } |
809 | #endif | ||
810 | 810 | ||
811 | /* | 811 | /* |
812 | * 'math_state_restore()' saves the current math information in the | 812 | * 'math_state_restore()' saves the current math information in the |