diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-05 15:49:25 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-05 15:49:25 -0500 |
commit | caab36b593b44c97e3c7707c6a8054b320f8d622 (patch) | |
tree | 70c8d67d51c616c357529d761a82ad382481dad7 | |
parent | a1413c89ae6a4b7a9a43f7768934a81ffb5c629a (diff) | |
parent | 73af76dfd1f998dba71d8e8e785cbe77a990bf17 (diff) |
Merge branch 'x86/mce2' into x86/core
-rw-r--r-- | arch/x86/Kconfig | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/apicdef.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 35 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_32.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 530 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 207 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/threshold.c | 29 |
12 files changed, 709 insertions, 172 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f5cef3fbf9a5..31758378bcd2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -783,6 +783,11 @@ config X86_MCE_AMD | |||
783 | Additional support for AMD specific MCE features such as | 783 | Additional support for AMD specific MCE features such as |
784 | the DRAM Error Threshold. | 784 | the DRAM Error Threshold. |
785 | 785 | ||
786 | config X86_MCE_THRESHOLD | ||
787 | depends on X86_MCE_AMD || X86_MCE_INTEL | ||
788 | bool | ||
789 | default y | ||
790 | |||
786 | config X86_MCE_NONFATAL | 791 | config X86_MCE_NONFATAL |
787 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" | 792 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" |
788 | depends on X86_32 && X86_MCE | 793 | depends on X86_32 && X86_MCE |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 63134e31e8b9..bc9514fb3b13 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #define APIC_ESR_SENDILL 0x00020 | 53 | #define APIC_ESR_SENDILL 0x00020 |
54 | #define APIC_ESR_RECVILL 0x00040 | 54 | #define APIC_ESR_RECVILL 0x00040 |
55 | #define APIC_ESR_ILLREGA 0x00080 | 55 | #define APIC_ESR_ILLREGA 0x00080 |
56 | #define APIC_LVTCMCI 0x2f0 | ||
56 | #define APIC_ICR 0x300 | 57 | #define APIC_ICR 0x300 |
57 | #define APIC_DEST_SELF 0x40000 | 58 | #define APIC_DEST_SELF 0x40000 |
58 | #define APIC_DEST_ALLINC 0x80000 | 59 | #define APIC_DEST_ALLINC 0x80000 |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 32c6e17b960b..563933e06a35 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -11,6 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ | 13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ |
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
14 | 16 | ||
15 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ | 17 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ |
16 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ | 18 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ |
@@ -90,14 +92,29 @@ extern int mce_disabled; | |||
90 | 92 | ||
91 | #include <asm/atomic.h> | 93 | #include <asm/atomic.h> |
92 | 94 | ||
95 | void mce_setup(struct mce *m); | ||
93 | void mce_log(struct mce *m); | 96 | void mce_log(struct mce *m); |
94 | DECLARE_PER_CPU(struct sys_device, device_mce); | 97 | DECLARE_PER_CPU(struct sys_device, device_mce); |
95 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 98 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
96 | 99 | ||
100 | /* | ||
101 | * To support more than 128 would need to escape the predefined | ||
102 | * Linux defined extended banks first. | ||
103 | */ | ||
104 | #define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) | ||
105 | |||
97 | #ifdef CONFIG_X86_MCE_INTEL | 106 | #ifdef CONFIG_X86_MCE_INTEL |
98 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 107 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
108 | void cmci_clear(void); | ||
109 | void cmci_reenable(void); | ||
110 | void cmci_rediscover(int dying); | ||
111 | void cmci_recheck(void); | ||
99 | #else | 112 | #else |
100 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } | 113 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } |
114 | static inline void cmci_clear(void) {} | ||
115 | static inline void cmci_reenable(void) {} | ||
116 | static inline void cmci_rediscover(int dying) {} | ||
117 | static inline void cmci_recheck(void) {} | ||
101 | #endif | 118 | #endif |
102 | 119 | ||
103 | #ifdef CONFIG_X86_MCE_AMD | 120 | #ifdef CONFIG_X86_MCE_AMD |
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c); | |||
106 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } | 123 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } |
107 | #endif | 124 | #endif |
108 | 125 | ||
109 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status); | 126 | extern int mce_available(struct cpuinfo_x86 *c); |
127 | |||
128 | void mce_log_therm_throt_event(__u64 status); | ||
110 | 129 | ||
111 | extern atomic_t mce_entry; | 130 | extern atomic_t mce_entry; |
112 | 131 | ||
113 | extern void do_machine_check(struct pt_regs *, long); | 132 | extern void do_machine_check(struct pt_regs *, long); |
133 | |||
134 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | ||
135 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | ||
136 | |||
137 | enum mcp_flags { | ||
138 | MCP_TIMESTAMP = (1 << 0), /* log time stamp */ | ||
139 | MCP_UC = (1 << 1), /* log uncorrected errors */ | ||
140 | }; | ||
141 | extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | ||
142 | |||
114 | extern int mce_notify_user(void); | 143 | extern int mce_notify_user(void); |
115 | 144 | ||
116 | #endif /* !CONFIG_X86_32 */ | 145 | #endif /* !CONFIG_X86_32 */ |
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c); | |||
120 | #else | 149 | #else |
121 | #define mcheck_init(c) do { } while (0) | 150 | #define mcheck_init(c) do { } while (0) |
122 | #endif | 151 | #endif |
123 | extern void stop_mce(void); | 152 | |
124 | extern void restart_mce(void); | 153 | extern void (*mce_threshold_vector)(void); |
125 | 154 | ||
126 | #endif /* __KERNEL__ */ | 155 | #endif /* __KERNEL__ */ |
127 | #endif /* _ASM_X86_MCE_H */ | 156 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 358acc59ae04..2dbd2314139e 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -77,6 +77,11 @@ | |||
77 | #define MSR_IA32_MC0_ADDR 0x00000402 | 77 | #define MSR_IA32_MC0_ADDR 0x00000402 |
78 | #define MSR_IA32_MC0_MISC 0x00000403 | 78 | #define MSR_IA32_MC0_MISC 0x00000403 |
79 | 79 | ||
80 | /* These are consecutive and not in the normal 4er MCE bank block */ | ||
81 | #define MSR_IA32_MC0_CTL2 0x00000280 | ||
82 | #define CMCI_EN (1ULL << 30) | ||
83 | #define CMCI_THRESHOLD_MASK 0xffffULL | ||
84 | |||
80 | #define MSR_P6_PERFCTR0 0x000000c1 | 85 | #define MSR_P6_PERFCTR0 0x000000c1 |
81 | #define MSR_P6_PERFCTR1 0x000000c2 | 86 | #define MSR_P6_PERFCTR1 0x000000c2 |
82 | #define MSR_P6_EVNTSEL0 0x00000186 | 87 | #define MSR_P6_EVNTSEL0 0x00000186 |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 6907b8e85d52..4c80f1557433 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -414,9 +414,17 @@ void __init alternative_instructions(void) | |||
414 | that might execute the to be patched code. | 414 | that might execute the to be patched code. |
415 | Other CPUs are not running. */ | 415 | Other CPUs are not running. */ |
416 | stop_nmi(); | 416 | stop_nmi(); |
417 | #ifdef CONFIG_X86_MCE | 417 | |
418 | stop_mce(); | 418 | /* |
419 | #endif | 419 | * Don't stop machine check exceptions while patching. |
420 | * MCEs only happen when something got corrupted and in this | ||
421 | * case we must do something about the corruption. | ||
422 | * Ignoring it is worse than a unlikely patching race. | ||
423 | * Also machine checks tend to be broadcast and if one CPU | ||
424 | * goes into machine check the others follow quickly, so we don't | ||
425 | * expect a machine check to cause undue problems during to code | ||
426 | * patching. | ||
427 | */ | ||
420 | 428 | ||
421 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 429 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
422 | 430 | ||
@@ -456,9 +464,6 @@ void __init alternative_instructions(void) | |||
456 | (unsigned long)__smp_locks_end); | 464 | (unsigned long)__smp_locks_end); |
457 | 465 | ||
458 | restart_nmi(); | 466 | restart_nmi(); |
459 | #ifdef CONFIG_X86_MCE | ||
460 | restart_mce(); | ||
461 | #endif | ||
462 | } | 467 | } |
463 | 468 | ||
464 | /** | 469 | /** |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f9cecdfd05c5..30909a258d0f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/idle.h> | 46 | #include <asm/idle.h> |
47 | #include <asm/mtrr.h> | 47 | #include <asm/mtrr.h> |
48 | #include <asm/smp.h> | 48 | #include <asm/smp.h> |
49 | #include <asm/mce.h> | ||
49 | 50 | ||
50 | unsigned int num_processors; | 51 | unsigned int num_processors; |
51 | 52 | ||
@@ -842,6 +843,14 @@ void clear_local_APIC(void) | |||
842 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 843 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
843 | } | 844 | } |
844 | #endif | 845 | #endif |
846 | #ifdef CONFIG_X86_MCE_INTEL | ||
847 | if (maxlvt >= 6) { | ||
848 | v = apic_read(APIC_LVTCMCI); | ||
849 | if (!(v & APIC_LVT_MASKED)) | ||
850 | apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED); | ||
851 | } | ||
852 | #endif | ||
853 | |||
845 | /* | 854 | /* |
846 | * Clean APIC state for other OSs: | 855 | * Clean APIC state for other OSs: |
847 | */ | 856 | */ |
@@ -1241,6 +1250,12 @@ void __cpuinit setup_local_APIC(void) | |||
1241 | apic_write(APIC_LVT1, value); | 1250 | apic_write(APIC_LVT1, value); |
1242 | 1251 | ||
1243 | preempt_enable(); | 1252 | preempt_enable(); |
1253 | |||
1254 | #ifdef CONFIG_X86_MCE_INTEL | ||
1255 | /* Recheck CMCI information after local APIC is up on CPU #0 */ | ||
1256 | if (smp_processor_id() == 0) | ||
1257 | cmci_recheck(); | ||
1258 | #endif | ||
1244 | } | 1259 | } |
1245 | 1260 | ||
1246 | void __cpuinit end_local_APIC_setup(void) | 1261 | void __cpuinit end_local_APIC_setup(void) |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index d7d2323bbb69..b2f89829bbe8 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | |||
4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o |
5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
7 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index dfaebce3633e..3552119b091d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | static unsigned long old_cr4 __initdata; | ||
64 | |||
65 | void __init stop_mce(void) | ||
66 | { | ||
67 | old_cr4 = read_cr4(); | ||
68 | clear_in_cr4(X86_CR4_MCE); | ||
69 | } | ||
70 | |||
71 | void __init restart_mce(void) | ||
72 | { | ||
73 | if (old_cr4 & X86_CR4_MCE) | ||
74 | set_in_cr4(X86_CR4_MCE); | ||
75 | } | ||
76 | |||
77 | static int __init mcheck_disable(char *str) | 63 | static int __init mcheck_disable(char *str) |
78 | { | 64 | { |
79 | mce_disabled = 1; | 65 | mce_disabled = 1; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index fe79985ce0f2..bfbd5323a635 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
4 | * Rest from unknown author(s). | 4 | * Rest from unknown author(s). |
5 | * 2004 Andi Kleen. Rewrote most of it. | 5 | * 2004 Andi Kleen. Rewrote most of it. |
6 | * Copyright 2008 Intel Corporation | ||
7 | * Author: Andi Kleen | ||
6 | */ | 8 | */ |
7 | 9 | ||
8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
@@ -24,6 +26,9 @@ | |||
24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
25 | #include <linux/kmod.h> | 27 | #include <linux/kmod.h> |
26 | #include <linux/kdebug.h> | 28 | #include <linux/kdebug.h> |
29 | #include <linux/kobject.h> | ||
30 | #include <linux/sysfs.h> | ||
31 | #include <linux/ratelimit.h> | ||
27 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
28 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
29 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
@@ -32,7 +37,6 @@ | |||
32 | #include <asm/idle.h> | 37 | #include <asm/idle.h> |
33 | 38 | ||
34 | #define MISC_MCELOG_MINOR 227 | 39 | #define MISC_MCELOG_MINOR 227 |
35 | #define NR_SYSFS_BANKS 6 | ||
36 | 40 | ||
37 | atomic_t mce_entry; | 41 | atomic_t mce_entry; |
38 | 42 | ||
@@ -47,7 +51,7 @@ static int mce_dont_init; | |||
47 | */ | 51 | */ |
48 | static int tolerant = 1; | 52 | static int tolerant = 1; |
49 | static int banks; | 53 | static int banks; |
50 | static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; | 54 | static u64 *bank; |
51 | static unsigned long notify_user; | 55 | static unsigned long notify_user; |
52 | static int rip_msr; | 56 | static int rip_msr; |
53 | static int mce_bootlog = -1; | 57 | static int mce_bootlog = -1; |
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL }; | |||
58 | 62 | ||
59 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | 63 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); |
60 | 64 | ||
65 | /* MCA banks polled by the period polling timer for corrected events */ | ||
66 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
67 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
68 | }; | ||
69 | |||
70 | /* Do initial initialization of a struct mce */ | ||
71 | void mce_setup(struct mce *m) | ||
72 | { | ||
73 | memset(m, 0, sizeof(struct mce)); | ||
74 | m->cpu = smp_processor_id(); | ||
75 | rdtscll(m->tsc); | ||
76 | } | ||
77 | |||
61 | /* | 78 | /* |
62 | * Lockless MCE logging infrastructure. | 79 | * Lockless MCE logging infrastructure. |
63 | * This avoids deadlocks on printk locks without having to break locks. Also | 80 | * This avoids deadlocks on printk locks without having to break locks. Also |
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m) | |||
119 | print_symbol("{%s}", m->ip); | 136 | print_symbol("{%s}", m->ip); |
120 | printk("\n"); | 137 | printk("\n"); |
121 | } | 138 | } |
122 | printk(KERN_EMERG "TSC %Lx ", m->tsc); | 139 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
123 | if (m->addr) | 140 | if (m->addr) |
124 | printk("ADDR %Lx ", m->addr); | 141 | printk("ADDR %llx ", m->addr); |
125 | if (m->misc) | 142 | if (m->misc) |
126 | printk("MISC %Lx ", m->misc); | 143 | printk("MISC %llx ", m->misc); |
127 | printk("\n"); | 144 | printk("\n"); |
128 | printk(KERN_EMERG "This is not a software problem!\n"); | 145 | printk(KERN_EMERG "This is not a software problem!\n"); |
129 | printk(KERN_EMERG "Run through mcelog --ascii to decode " | 146 | printk(KERN_EMERG "Run through mcelog --ascii to decode " |
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start) | |||
149 | panic(msg); | 166 | panic(msg); |
150 | } | 167 | } |
151 | 168 | ||
152 | static int mce_available(struct cpuinfo_x86 *c) | 169 | int mce_available(struct cpuinfo_x86 *c) |
153 | { | 170 | { |
171 | if (mce_dont_init) | ||
172 | return 0; | ||
154 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | 173 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
155 | } | 174 | } |
156 | 175 | ||
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
172 | } | 191 | } |
173 | 192 | ||
174 | /* | 193 | /* |
175 | * The actual machine check handler | 194 | * Poll for corrected events or events that happened before reset. |
195 | * Those are just logged through /dev/mcelog. | ||
196 | * | ||
197 | * This is executed in standard interrupt context. | ||
198 | */ | ||
199 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
200 | { | ||
201 | struct mce m; | ||
202 | int i; | ||
203 | |||
204 | mce_setup(&m); | ||
205 | |||
206 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
207 | for (i = 0; i < banks; i++) { | ||
208 | if (!bank[i] || !test_bit(i, *b)) | ||
209 | continue; | ||
210 | |||
211 | m.misc = 0; | ||
212 | m.addr = 0; | ||
213 | m.bank = i; | ||
214 | m.tsc = 0; | ||
215 | |||
216 | barrier(); | ||
217 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
218 | if (!(m.status & MCI_STATUS_VAL)) | ||
219 | continue; | ||
220 | |||
221 | /* | ||
222 | * Uncorrected events are handled by the exception handler | ||
223 | * when it is enabled. But when the exception is disabled log | ||
224 | * everything. | ||
225 | * | ||
226 | * TBD do the same check for MCI_STATUS_EN here? | ||
227 | */ | ||
228 | if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC)) | ||
229 | continue; | ||
230 | |||
231 | if (m.status & MCI_STATUS_MISCV) | ||
232 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
233 | if (m.status & MCI_STATUS_ADDRV) | ||
234 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
235 | |||
236 | if (!(flags & MCP_TIMESTAMP)) | ||
237 | m.tsc = 0; | ||
238 | /* | ||
239 | * Don't get the IP here because it's unlikely to | ||
240 | * have anything to do with the actual error location. | ||
241 | */ | ||
242 | |||
243 | mce_log(&m); | ||
244 | add_taint(TAINT_MACHINE_CHECK); | ||
245 | |||
246 | /* | ||
247 | * Clear state for this bank. | ||
248 | */ | ||
249 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Don't clear MCG_STATUS here because it's only defined for | ||
254 | * exceptions. | ||
255 | */ | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * The actual machine check handler. This only handles real | ||
260 | * exceptions when something got corrupted coming in through int 18. | ||
261 | * | ||
262 | * This is executed in NMI context not subject to normal locking rules. This | ||
263 | * implies that most kernel services cannot be safely used. Don't even | ||
264 | * think about putting a printk in there! | ||
176 | */ | 265 | */ |
177 | void do_machine_check(struct pt_regs * regs, long error_code) | 266 | void do_machine_check(struct pt_regs * regs, long error_code) |
178 | { | 267 | { |
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
190 | * error. | 279 | * error. |
191 | */ | 280 | */ |
192 | int kill_it = 0; | 281 | int kill_it = 0; |
282 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
193 | 283 | ||
194 | atomic_inc(&mce_entry); | 284 | atomic_inc(&mce_entry); |
195 | 285 | ||
196 | if ((regs | 286 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
197 | && notify_die(DIE_NMI, "machine check", regs, error_code, | ||
198 | 18, SIGKILL) == NOTIFY_STOP) | 287 | 18, SIGKILL) == NOTIFY_STOP) |
199 | || !banks) | 288 | goto out2; |
289 | if (!banks) | ||
200 | goto out2; | 290 | goto out2; |
201 | 291 | ||
202 | memset(&m, 0, sizeof(struct mce)); | 292 | mce_setup(&m); |
203 | m.cpu = smp_processor_id(); | 293 | |
204 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | 294 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); |
205 | /* if the restart IP is not valid, we're done for */ | 295 | /* if the restart IP is not valid, we're done for */ |
206 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | 296 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
210 | barrier(); | 300 | barrier(); |
211 | 301 | ||
212 | for (i = 0; i < banks; i++) { | 302 | for (i = 0; i < banks; i++) { |
213 | if (i < NR_SYSFS_BANKS && !bank[i]) | 303 | __clear_bit(i, toclear); |
304 | if (!bank[i]) | ||
214 | continue; | 305 | continue; |
215 | 306 | ||
216 | m.misc = 0; | 307 | m.misc = 0; |
217 | m.addr = 0; | 308 | m.addr = 0; |
218 | m.bank = i; | 309 | m.bank = i; |
219 | m.tsc = 0; | ||
220 | 310 | ||
221 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | 311 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); |
222 | if ((m.status & MCI_STATUS_VAL) == 0) | 312 | if ((m.status & MCI_STATUS_VAL) == 0) |
223 | continue; | 313 | continue; |
224 | 314 | ||
315 | /* | ||
316 | * Non uncorrected errors are handled by machine_check_poll | ||
317 | * Leave them alone. | ||
318 | */ | ||
319 | if ((m.status & MCI_STATUS_UC) == 0) | ||
320 | continue; | ||
321 | |||
322 | /* | ||
323 | * Set taint even when machine check was not enabled. | ||
324 | */ | ||
325 | add_taint(TAINT_MACHINE_CHECK); | ||
326 | |||
327 | __set_bit(i, toclear); | ||
328 | |||
225 | if (m.status & MCI_STATUS_EN) { | 329 | if (m.status & MCI_STATUS_EN) { |
226 | /* if PCC was set, there's no way out */ | 330 | /* if PCC was set, there's no way out */ |
227 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | 331 | no_way_out |= !!(m.status & MCI_STATUS_PCC); |
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
235 | no_way_out = 1; | 339 | no_way_out = 1; |
236 | kill_it = 1; | 340 | kill_it = 1; |
237 | } | 341 | } |
342 | } else { | ||
343 | /* | ||
344 | * Machine check event was not enabled. Clear, but | ||
345 | * ignore. | ||
346 | */ | ||
347 | continue; | ||
238 | } | 348 | } |
239 | 349 | ||
240 | if (m.status & MCI_STATUS_MISCV) | 350 | if (m.status & MCI_STATUS_MISCV) |
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
243 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | 353 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); |
244 | 354 | ||
245 | mce_get_rip(&m, regs); | 355 | mce_get_rip(&m, regs); |
246 | if (error_code >= 0) | 356 | mce_log(&m); |
247 | rdtscll(m.tsc); | ||
248 | if (error_code != -2) | ||
249 | mce_log(&m); | ||
250 | 357 | ||
251 | /* Did this bank cause the exception? */ | 358 | /* Did this bank cause the exception? */ |
252 | /* Assume that the bank with uncorrectable errors did it, | 359 | /* Assume that the bank with uncorrectable errors did it, |
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
255 | panicm = m; | 362 | panicm = m; |
256 | panicm_found = 1; | 363 | panicm_found = 1; |
257 | } | 364 | } |
258 | |||
259 | add_taint(TAINT_MACHINE_CHECK); | ||
260 | } | 365 | } |
261 | 366 | ||
262 | /* Never do anything final in the polling timer */ | ||
263 | if (!regs) | ||
264 | goto out; | ||
265 | |||
266 | /* If we didn't find an uncorrectable error, pick | 367 | /* If we didn't find an uncorrectable error, pick |
267 | the last one (shouldn't happen, just being safe). */ | 368 | the last one (shouldn't happen, just being safe). */ |
268 | if (!panicm_found) | 369 | if (!panicm_found) |
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
309 | /* notify userspace ASAP */ | 410 | /* notify userspace ASAP */ |
310 | set_thread_flag(TIF_MCE_NOTIFY); | 411 | set_thread_flag(TIF_MCE_NOTIFY); |
311 | 412 | ||
312 | out: | ||
313 | /* the last thing we do is clear state */ | 413 | /* the last thing we do is clear state */ |
314 | for (i = 0; i < banks; i++) | 414 | for (i = 0; i < banks; i++) { |
315 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 415 | if (test_bit(i, toclear)) |
416 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
417 | } | ||
316 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | 418 | wrmsrl(MSR_IA32_MCG_STATUS, 0); |
317 | out2: | 419 | out2: |
318 | atomic_dec(&mce_entry); | 420 | atomic_dec(&mce_entry); |
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
332 | * and historically has been the register value of the | 434 | * and historically has been the register value of the |
333 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | 435 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
334 | */ | 436 | */ |
335 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | 437 | void mce_log_therm_throt_event(__u64 status) |
336 | { | 438 | { |
337 | struct mce m; | 439 | struct mce m; |
338 | 440 | ||
339 | memset(&m, 0, sizeof(m)); | 441 | mce_setup(&m); |
340 | m.cpu = cpu; | ||
341 | m.bank = MCE_THERMAL_BANK; | 442 | m.bank = MCE_THERMAL_BANK; |
342 | m.status = status; | 443 | m.status = status; |
343 | rdtscll(m.tsc); | ||
344 | mce_log(&m); | 444 | mce_log(&m); |
345 | } | 445 | } |
346 | #endif /* CONFIG_X86_MCE_INTEL */ | 446 | #endif /* CONFIG_X86_MCE_INTEL */ |
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
353 | 453 | ||
354 | static int check_interval = 5 * 60; /* 5 minutes */ | 454 | static int check_interval = 5 * 60; /* 5 minutes */ |
355 | static int next_interval; /* in jiffies */ | 455 | static int next_interval; /* in jiffies */ |
356 | static void mcheck_timer(struct work_struct *work); | 456 | static void mcheck_timer(unsigned long); |
357 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); | 457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
358 | 458 | ||
359 | static void mcheck_check_cpu(void *info) | 459 | static void mcheck_timer(unsigned long data) |
360 | { | 460 | { |
361 | if (mce_available(¤t_cpu_data)) | 461 | struct timer_list *t = &per_cpu(mce_timer, data); |
362 | do_machine_check(NULL, 0); | ||
363 | } | ||
364 | 462 | ||
365 | static void mcheck_timer(struct work_struct *work) | 463 | WARN_ON(smp_processor_id() != data); |
366 | { | 464 | |
367 | on_each_cpu(mcheck_check_cpu, NULL, 1); | 465 | if (mce_available(¤t_cpu_data)) |
466 | machine_check_poll(MCP_TIMESTAMP, | ||
467 | &__get_cpu_var(mce_poll_banks)); | ||
368 | 468 | ||
369 | /* | 469 | /* |
370 | * Alert userspace if needed. If we logged an MCE, reduce the | 470 | * Alert userspace if needed. If we logged an MCE, reduce the |
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work) | |||
377 | (int)round_jiffies_relative(check_interval*HZ)); | 477 | (int)round_jiffies_relative(check_interval*HZ)); |
378 | } | 478 | } |
379 | 479 | ||
380 | schedule_delayed_work(&mcheck_work, next_interval); | 480 | t->expires = jiffies + next_interval; |
481 | add_timer(t); | ||
482 | } | ||
483 | |||
484 | static void mce_do_trigger(struct work_struct *work) | ||
485 | { | ||
486 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
381 | } | 487 | } |
382 | 488 | ||
489 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
490 | |||
383 | /* | 491 | /* |
384 | * This is only called from process context. This is where we do | 492 | * Notify the user(s) about new machine check events. |
385 | * anything we need to alert userspace about new MCEs. This is called | 493 | * Can be called from interrupt context, but not from machine check/NMI |
386 | * directly from the poller and also from entry.S and idle, thanks to | 494 | * context. |
387 | * TIF_MCE_NOTIFY. | ||
388 | */ | 495 | */ |
389 | int mce_notify_user(void) | 496 | int mce_notify_user(void) |
390 | { | 497 | { |
498 | /* Not more than two messages every minute */ | ||
499 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
500 | |||
391 | clear_thread_flag(TIF_MCE_NOTIFY); | 501 | clear_thread_flag(TIF_MCE_NOTIFY); |
392 | if (test_and_clear_bit(0, ¬ify_user)) { | 502 | if (test_and_clear_bit(0, ¬ify_user)) { |
393 | static unsigned long last_print; | ||
394 | unsigned long now = jiffies; | ||
395 | |||
396 | wake_up_interruptible(&mce_wait); | 503 | wake_up_interruptible(&mce_wait); |
397 | if (trigger[0]) | ||
398 | call_usermodehelper(trigger, trigger_argv, NULL, | ||
399 | UMH_NO_WAIT); | ||
400 | 504 | ||
401 | if (time_after_eq(now, last_print + (check_interval*HZ))) { | 505 | /* |
402 | last_print = now; | 506 | * There is no risk of missing notifications because |
507 | * work_pending is always cleared before the function is | ||
508 | * executed. | ||
509 | */ | ||
510 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
511 | schedule_work(&mce_trigger_work); | ||
512 | |||
513 | if (__ratelimit(&ratelimit)) | ||
403 | printk(KERN_INFO "Machine check events logged\n"); | 514 | printk(KERN_INFO "Machine check events logged\n"); |
404 | } | ||
405 | 515 | ||
406 | return 1; | 516 | return 1; |
407 | } | 517 | } |
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = { | |||
425 | 535 | ||
426 | static __init int periodic_mcheck_init(void) | 536 | static __init int periodic_mcheck_init(void) |
427 | { | 537 | { |
428 | next_interval = check_interval * HZ; | 538 | idle_notifier_register(&mce_idle_notifier); |
429 | if (next_interval) | 539 | return 0; |
430 | schedule_delayed_work(&mcheck_work, | ||
431 | round_jiffies_relative(next_interval)); | ||
432 | idle_notifier_register(&mce_idle_notifier); | ||
433 | return 0; | ||
434 | } | 540 | } |
435 | __initcall(periodic_mcheck_init); | 541 | __initcall(periodic_mcheck_init); |
436 | 542 | ||
437 | |||
438 | /* | 543 | /* |
439 | * Initialize Machine Checks for a CPU. | 544 | * Initialize Machine Checks for a CPU. |
440 | */ | 545 | */ |
441 | static void mce_init(void *dummy) | 546 | static int mce_cap_init(void) |
442 | { | 547 | { |
443 | u64 cap; | 548 | u64 cap; |
444 | int i; | 549 | unsigned b; |
445 | 550 | ||
446 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 551 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
447 | banks = cap & 0xff; | 552 | b = cap & 0xff; |
448 | if (banks > MCE_EXTENDED_BANK) { | 553 | if (b > MAX_NR_BANKS) { |
449 | banks = MCE_EXTENDED_BANK; | 554 | printk(KERN_WARNING |
450 | printk(KERN_INFO "MCE: warning: using only %d banks\n", | 555 | "MCE: Using only %u machine check banks out of %u\n", |
451 | MCE_EXTENDED_BANK); | 556 | MAX_NR_BANKS, b); |
557 | b = MAX_NR_BANKS; | ||
452 | } | 558 | } |
559 | |||
560 | /* Don't support asymmetric configurations today */ | ||
561 | WARN_ON(banks != 0 && b != banks); | ||
562 | banks = b; | ||
563 | if (!bank) { | ||
564 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
565 | if (!bank) | ||
566 | return -ENOMEM; | ||
567 | memset(bank, 0xff, banks * sizeof(u64)); | ||
568 | } | ||
569 | |||
453 | /* Use accurate RIP reporting if available. */ | 570 | /* Use accurate RIP reporting if available. */ |
454 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | 571 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) |
455 | rip_msr = MSR_IA32_MCG_EIP; | 572 | rip_msr = MSR_IA32_MCG_EIP; |
456 | 573 | ||
457 | /* Log the machine checks left over from the previous reset. | 574 | return 0; |
458 | This also clears all registers */ | 575 | } |
459 | do_machine_check(NULL, mce_bootlog ? -1 : -2); | 576 | |
577 | static void mce_init(void *dummy) | ||
578 | { | ||
579 | u64 cap; | ||
580 | int i; | ||
581 | mce_banks_t all_banks; | ||
582 | |||
583 | /* | ||
584 | * Log the machine checks left over from the previous reset. | ||
585 | */ | ||
586 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
587 | machine_check_poll(MCP_UC, &all_banks); | ||
460 | 588 | ||
461 | set_in_cr4(X86_CR4_MCE); | 589 | set_in_cr4(X86_CR4_MCE); |
462 | 590 | ||
591 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
463 | if (cap & MCG_CTL_P) | 592 | if (cap & MCG_CTL_P) |
464 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 593 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
465 | 594 | ||
466 | for (i = 0; i < banks; i++) { | 595 | for (i = 0; i < banks; i++) { |
467 | if (i < NR_SYSFS_BANKS) | 596 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); |
468 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
469 | else | ||
470 | wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL); | ||
471 | |||
472 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 597 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
473 | } | 598 | } |
474 | } | 599 | } |
475 | 600 | ||
476 | /* Add per CPU specific workarounds here */ | 601 | /* Add per CPU specific workarounds here */ |
477 | static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | 602 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) |
478 | { | 603 | { |
479 | /* This should be disabled by the BIOS, but isn't always */ | 604 | /* This should be disabled by the BIOS, but isn't always */ |
480 | if (c->x86_vendor == X86_VENDOR_AMD) { | 605 | if (c->x86_vendor == X86_VENDOR_AMD) { |
481 | if(c->x86 == 15) | 606 | if (c->x86 == 15 && banks > 4) |
482 | /* disable GART TBL walk error reporting, which trips off | 607 | /* disable GART TBL walk error reporting, which trips off |
483 | incorrectly with the IOMMU & 3ware & Cerberus. */ | 608 | incorrectly with the IOMMU & 3ware & Cerberus. */ |
484 | clear_bit(10, &bank[4]); | 609 | clear_bit(10, (unsigned long *)&bank[4]); |
485 | if(c->x86 <= 17 && mce_bootlog < 0) | 610 | if(c->x86 <= 17 && mce_bootlog < 0) |
486 | /* Lots of broken BIOS around that don't clear them | 611 | /* Lots of broken BIOS around that don't clear them |
487 | by default and leave crap in there. Don't log. */ | 612 | by default and leave crap in there. Don't log. */ |
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
504 | } | 629 | } |
505 | } | 630 | } |
506 | 631 | ||
632 | static void mce_init_timer(void) | ||
633 | { | ||
634 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
635 | |||
636 | /* data race harmless because everyone sets to the same value */ | ||
637 | if (!next_interval) | ||
638 | next_interval = check_interval * HZ; | ||
639 | if (!next_interval) | ||
640 | return; | ||
641 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
642 | t->expires = round_jiffies_relative(jiffies + next_interval); | ||
643 | add_timer(t); | ||
644 | } | ||
645 | |||
507 | /* | 646 | /* |
508 | * Called for each booted CPU to set up machine checks. | 647 | * Called for each booted CPU to set up machine checks. |
509 | * Must be called with preempt off. | 648 | * Must be called with preempt off. |
510 | */ | 649 | */ |
511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 650 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
512 | { | 651 | { |
513 | mce_cpu_quirks(c); | 652 | if (!mce_available(c)) |
653 | return; | ||
514 | 654 | ||
515 | if (mce_dont_init || | 655 | if (mce_cap_init() < 0) { |
516 | !mce_available(c)) | 656 | mce_dont_init = 1; |
517 | return; | 657 | return; |
658 | } | ||
659 | mce_cpu_quirks(c); | ||
518 | 660 | ||
519 | mce_init(NULL); | 661 | mce_init(NULL); |
520 | mce_cpu_features(c); | 662 | mce_cpu_features(c); |
663 | mce_init_timer(); | ||
521 | } | 664 | } |
522 | 665 | ||
523 | /* | 666 | /* |
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
573 | { | 716 | { |
574 | unsigned long *cpu_tsc; | 717 | unsigned long *cpu_tsc; |
575 | static DEFINE_MUTEX(mce_read_mutex); | 718 | static DEFINE_MUTEX(mce_read_mutex); |
576 | unsigned next; | 719 | unsigned prev, next; |
577 | char __user *buf = ubuf; | 720 | char __user *buf = ubuf; |
578 | int i, err; | 721 | int i, err; |
579 | 722 | ||
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
592 | } | 735 | } |
593 | 736 | ||
594 | err = 0; | 737 | err = 0; |
595 | for (i = 0; i < next; i++) { | 738 | prev = 0; |
596 | unsigned long start = jiffies; | 739 | do { |
597 | 740 | for (i = prev; i < next; i++) { | |
598 | while (!mcelog.entry[i].finished) { | 741 | unsigned long start = jiffies; |
599 | if (time_after_eq(jiffies, start + 2)) { | 742 | |
600 | memset(mcelog.entry + i,0, sizeof(struct mce)); | 743 | while (!mcelog.entry[i].finished) { |
601 | goto timeout; | 744 | if (time_after_eq(jiffies, start + 2)) { |
745 | memset(mcelog.entry + i, 0, | ||
746 | sizeof(struct mce)); | ||
747 | goto timeout; | ||
748 | } | ||
749 | cpu_relax(); | ||
602 | } | 750 | } |
603 | cpu_relax(); | 751 | smp_rmb(); |
752 | err |= copy_to_user(buf, mcelog.entry + i, | ||
753 | sizeof(struct mce)); | ||
754 | buf += sizeof(struct mce); | ||
755 | timeout: | ||
756 | ; | ||
604 | } | 757 | } |
605 | smp_rmb(); | ||
606 | err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); | ||
607 | buf += sizeof(struct mce); | ||
608 | timeout: | ||
609 | ; | ||
610 | } | ||
611 | 758 | ||
612 | memset(mcelog.entry, 0, next * sizeof(struct mce)); | 759 | memset(mcelog.entry + prev, 0, |
613 | mcelog.next = 0; | 760 | (next - prev) * sizeof(struct mce)); |
761 | prev = next; | ||
762 | next = cmpxchg(&mcelog.next, prev, 0); | ||
763 | } while (next != prev); | ||
614 | 764 | ||
615 | synchronize_sched(); | 765 | synchronize_sched(); |
616 | 766 | ||
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = { | |||
680 | &mce_chrdev_ops, | 830 | &mce_chrdev_ops, |
681 | }; | 831 | }; |
682 | 832 | ||
683 | static unsigned long old_cr4 __initdata; | ||
684 | |||
685 | void __init stop_mce(void) | ||
686 | { | ||
687 | old_cr4 = read_cr4(); | ||
688 | clear_in_cr4(X86_CR4_MCE); | ||
689 | } | ||
690 | |||
691 | void __init restart_mce(void) | ||
692 | { | ||
693 | if (old_cr4 & X86_CR4_MCE) | ||
694 | set_in_cr4(X86_CR4_MCE); | ||
695 | } | ||
696 | |||
697 | /* | 833 | /* |
698 | * Old style boot options parsing. Only for compatibility. | 834 | * Old style boot options parsing. Only for compatibility. |
699 | */ | 835 | */ |
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str) | |||
703 | return 1; | 839 | return 1; |
704 | } | 840 | } |
705 | 841 | ||
706 | /* mce=off disables machine check. Note you can re-enable it later | 842 | /* mce=off disables machine check. |
707 | using sysfs. | ||
708 | mce=TOLERANCELEVEL (number, see above) | 843 | mce=TOLERANCELEVEL (number, see above) |
709 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | 844 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
710 | mce=nobootlog Don't log MCEs from before booting. */ | 845 | mce=nobootlog Don't log MCEs from before booting. */ |
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable); | |||
728 | * Sysfs support | 863 | * Sysfs support |
729 | */ | 864 | */ |
730 | 865 | ||
866 | /* | ||
867 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
868 | * them later. | ||
869 | */ | ||
870 | static int mce_disable(void) | ||
871 | { | ||
872 | int i; | ||
873 | |||
874 | for (i = 0; i < banks; i++) | ||
875 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
880 | { | ||
881 | return mce_disable(); | ||
882 | } | ||
883 | |||
884 | static int mce_shutdown(struct sys_device *dev) | ||
885 | { | ||
886 | return mce_disable(); | ||
887 | } | ||
888 | |||
731 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | 889 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
732 | Only one CPU is active at this time, the others get readded later using | 890 | Only one CPU is active at this time, the others get readded later using |
733 | CPU hotplug. */ | 891 | CPU hotplug. */ |
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev) | |||
738 | return 0; | 896 | return 0; |
739 | } | 897 | } |
740 | 898 | ||
899 | static void mce_cpu_restart(void *data) | ||
900 | { | ||
901 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
902 | if (mce_available(¤t_cpu_data)) | ||
903 | mce_init(NULL); | ||
904 | mce_init_timer(); | ||
905 | } | ||
906 | |||
741 | /* Reinit MCEs after user configuration changes */ | 907 | /* Reinit MCEs after user configuration changes */ |
742 | static void mce_restart(void) | 908 | static void mce_restart(void) |
743 | { | 909 | { |
744 | if (next_interval) | ||
745 | cancel_delayed_work(&mcheck_work); | ||
746 | /* Timer race is harmless here */ | ||
747 | on_each_cpu(mce_init, NULL, 1); | ||
748 | next_interval = check_interval * HZ; | 910 | next_interval = check_interval * HZ; |
749 | if (next_interval) | 911 | on_each_cpu(mce_cpu_restart, NULL, 1); |
750 | schedule_delayed_work(&mcheck_work, | ||
751 | round_jiffies_relative(next_interval)); | ||
752 | } | 912 | } |
753 | 913 | ||
754 | static struct sysdev_class mce_sysclass = { | 914 | static struct sysdev_class mce_sysclass = { |
915 | .suspend = mce_suspend, | ||
916 | .shutdown = mce_shutdown, | ||
755 | .resume = mce_resume, | 917 | .resume = mce_resume, |
756 | .name = "machinecheck", | 918 | .name = "machinecheck", |
757 | }; | 919 | }; |
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit | |||
778 | } \ | 940 | } \ |
779 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 941 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
780 | 942 | ||
781 | /* | 943 | static struct sysdev_attribute *bank_attrs; |
782 | * TBD should generate these dynamically based on number of available banks. | 944 | |
783 | * Have only 6 contol banks in /sysfs until then. | 945 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, |
784 | */ | 946 | char *buf) |
785 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 947 | { |
786 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 948 | u64 b = bank[attr - bank_attrs]; |
787 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 949 | return sprintf(buf, "%llx\n", b); |
788 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | 950 | } |
789 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | 951 | |
790 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | 952 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, |
953 | const char *buf, size_t siz) | ||
954 | { | ||
955 | char *end; | ||
956 | u64 new = simple_strtoull(buf, &end, 0); | ||
957 | if (end == buf) | ||
958 | return -EINVAL; | ||
959 | bank[attr - bank_attrs] = new; | ||
960 | mce_restart(); | ||
961 | return end-buf; | ||
962 | } | ||
791 | 963 | ||
792 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, | 964 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, |
793 | char *buf) | 965 | char *buf) |
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | |||
814 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | 986 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); |
815 | ACCESSOR(check_interval,check_interval,mce_restart()) | 987 | ACCESSOR(check_interval,check_interval,mce_restart()) |
816 | static struct sysdev_attribute *mce_attributes[] = { | 988 | static struct sysdev_attribute *mce_attributes[] = { |
817 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
818 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
819 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, | 989 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, |
820 | NULL | 990 | NULL |
821 | }; | 991 | }; |
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
845 | if (err) | 1015 | if (err) |
846 | goto error; | 1016 | goto error; |
847 | } | 1017 | } |
1018 | for (i = 0; i < banks; i++) { | ||
1019 | err = sysdev_create_file(&per_cpu(device_mce, cpu), | ||
1020 | &bank_attrs[i]); | ||
1021 | if (err) | ||
1022 | goto error2; | ||
1023 | } | ||
848 | cpu_set(cpu, mce_device_initialized); | 1024 | cpu_set(cpu, mce_device_initialized); |
849 | 1025 | ||
850 | return 0; | 1026 | return 0; |
1027 | error2: | ||
1028 | while (--i >= 0) { | ||
1029 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1030 | &bank_attrs[i]); | ||
1031 | } | ||
851 | error: | 1032 | error: |
852 | while (i--) { | 1033 | while (--i >= 0) { |
853 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1034 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
854 | mce_attributes[i]); | 1035 | mce_attributes[i]); |
855 | } | 1036 | } |
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
868 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
869 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1050 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
870 | mce_attributes[i]); | 1051 | mce_attributes[i]); |
1052 | for (i = 0; i < banks; i++) | ||
1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1054 | &bank_attrs[i]); | ||
871 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
872 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpu_clear(cpu, mce_device_initialized); |
873 | } | 1057 | } |
874 | 1058 | ||
1059 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
1060 | static void mce_disable_cpu(void *h) | ||
1061 | { | ||
1062 | int i; | ||
1063 | unsigned long action = *(unsigned long *)h; | ||
1064 | |||
1065 | if (!mce_available(¤t_cpu_data)) | ||
1066 | return; | ||
1067 | if (!(action & CPU_TASKS_FROZEN)) | ||
1068 | cmci_clear(); | ||
1069 | for (i = 0; i < banks; i++) | ||
1070 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
1071 | } | ||
1072 | |||
1073 | static void mce_reenable_cpu(void *h) | ||
1074 | { | ||
1075 | int i; | ||
1076 | unsigned long action = *(unsigned long *)h; | ||
1077 | |||
1078 | if (!mce_available(¤t_cpu_data)) | ||
1079 | return; | ||
1080 | if (!(action & CPU_TASKS_FROZEN)) | ||
1081 | cmci_reenable(); | ||
1082 | for (i = 0; i < banks; i++) | ||
1083 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
1084 | } | ||
1085 | |||
875 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 1086 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
876 | static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | 1087 | static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, |
877 | unsigned long action, void *hcpu) | 1088 | unsigned long action, void *hcpu) |
878 | { | 1089 | { |
879 | unsigned int cpu = (unsigned long)hcpu; | 1090 | unsigned int cpu = (unsigned long)hcpu; |
1091 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
880 | 1092 | ||
881 | switch (action) { | 1093 | switch (action) { |
882 | case CPU_ONLINE: | 1094 | case CPU_ONLINE: |
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
891 | threshold_cpu_callback(action, cpu); | 1103 | threshold_cpu_callback(action, cpu); |
892 | mce_remove_device(cpu); | 1104 | mce_remove_device(cpu); |
893 | break; | 1105 | break; |
1106 | case CPU_DOWN_PREPARE: | ||
1107 | case CPU_DOWN_PREPARE_FROZEN: | ||
1108 | del_timer_sync(t); | ||
1109 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
1110 | break; | ||
1111 | case CPU_DOWN_FAILED: | ||
1112 | case CPU_DOWN_FAILED_FROZEN: | ||
1113 | t->expires = round_jiffies_relative(jiffies + next_interval); | ||
1114 | add_timer_on(t, cpu); | ||
1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
1116 | break; | ||
1117 | case CPU_POST_DEAD: | ||
1118 | /* intentionally ignoring frozen here */ | ||
1119 | cmci_rediscover(cpu); | ||
1120 | break; | ||
894 | } | 1121 | } |
895 | return NOTIFY_OK; | 1122 | return NOTIFY_OK; |
896 | } | 1123 | } |
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = { | |||
899 | .notifier_call = mce_cpu_callback, | 1126 | .notifier_call = mce_cpu_callback, |
900 | }; | 1127 | }; |
901 | 1128 | ||
1129 | static __init int mce_init_banks(void) | ||
1130 | { | ||
1131 | int i; | ||
1132 | |||
1133 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
1134 | GFP_KERNEL); | ||
1135 | if (!bank_attrs) | ||
1136 | return -ENOMEM; | ||
1137 | |||
1138 | for (i = 0; i < banks; i++) { | ||
1139 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
1140 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
1141 | if (!a->attr.name) | ||
1142 | goto nomem; | ||
1143 | a->attr.mode = 0644; | ||
1144 | a->show = show_bank; | ||
1145 | a->store = set_bank; | ||
1146 | } | ||
1147 | return 0; | ||
1148 | |||
1149 | nomem: | ||
1150 | while (--i >= 0) | ||
1151 | kfree(bank_attrs[i].attr.name); | ||
1152 | kfree(bank_attrs); | ||
1153 | bank_attrs = NULL; | ||
1154 | return -ENOMEM; | ||
1155 | } | ||
1156 | |||
902 | static __init int mce_init_device(void) | 1157 | static __init int mce_init_device(void) |
903 | { | 1158 | { |
904 | int err; | 1159 | int err; |
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void) | |||
906 | 1161 | ||
907 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
908 | return -EIO; | 1163 | return -EIO; |
1164 | |||
1165 | err = mce_init_banks(); | ||
1166 | if (err) | ||
1167 | return err; | ||
1168 | |||
909 | err = sysdev_class_register(&mce_sysclass); | 1169 | err = sysdev_class_register(&mce_sysclass); |
910 | if (err) | 1170 | if (err) |
911 | return err; | 1171 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 9817506dd469..c5a32f92d07e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = { | |||
79 | 79 | ||
80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ |
81 | 81 | ||
82 | static void amd_threshold_interrupt(void); | ||
83 | |||
82 | /* | 84 | /* |
83 | * CPU Initialization | 85 | * CPU Initialization |
84 | */ | 86 | */ |
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
174 | tr.reset = 0; | 176 | tr.reset = 0; |
175 | tr.old_limit = 0; | 177 | tr.old_limit = 0; |
176 | threshold_restart_bank(&tr); | 178 | threshold_restart_bank(&tr); |
179 | |||
180 | mce_threshold_vector = amd_threshold_interrupt; | ||
177 | } | 181 | } |
178 | } | 182 | } |
179 | } | 183 | } |
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
187 | * the interrupt goes off when error_count reaches threshold_limit. | 191 | * the interrupt goes off when error_count reaches threshold_limit. |
188 | * the handler will simply log mcelog w/ software defined bank number. | 192 | * the handler will simply log mcelog w/ software defined bank number. |
189 | */ | 193 | */ |
190 | asmlinkage void mce_threshold_interrupt(void) | 194 | static void amd_threshold_interrupt(void) |
191 | { | 195 | { |
192 | unsigned int bank, block; | 196 | unsigned int bank, block; |
193 | struct mce m; | 197 | struct mce m; |
194 | u32 low = 0, high = 0, address = 0; | 198 | u32 low = 0, high = 0, address = 0; |
195 | 199 | ||
196 | ack_APIC_irq(); | 200 | mce_setup(&m); |
197 | exit_idle(); | ||
198 | irq_enter(); | ||
199 | |||
200 | memset(&m, 0, sizeof(m)); | ||
201 | rdtscll(m.tsc); | ||
202 | m.cpu = smp_processor_id(); | ||
203 | 201 | ||
204 | /* assume first bank caused it */ | 202 | /* assume first bank caused it */ |
205 | for (bank = 0; bank < NR_BANKS; ++bank) { | 203 | for (bank = 0; bank < NR_BANKS; ++bank) { |
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void) | |||
233 | 231 | ||
234 | /* Log the machine check that caused the threshold | 232 | /* Log the machine check that caused the threshold |
235 | event. */ | 233 | event. */ |
236 | do_machine_check(NULL, 0); | 234 | machine_check_poll(MCP_TIMESTAMP, |
235 | &__get_cpu_var(mce_poll_banks)); | ||
237 | 236 | ||
238 | if (high & MASK_OVERFLOW_HI) { | 237 | if (high & MASK_OVERFLOW_HI) { |
239 | rdmsrl(address, m.misc); | 238 | rdmsrl(address, m.misc); |
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void) | |||
243 | + bank * NR_BLOCKS | 242 | + bank * NR_BLOCKS |
244 | + block; | 243 | + block; |
245 | mce_log(&m); | 244 | mce_log(&m); |
246 | goto out; | 245 | return; |
247 | } | 246 | } |
248 | } | 247 | } |
249 | } | 248 | } |
250 | out: | ||
251 | inc_irq_stat(irq_threshold_count); | ||
252 | irq_exit(); | ||
253 | } | 249 | } |
254 | 250 | ||
255 | /* | 251 | /* |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index aa5e287c98e0..aaa7d9730938 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Intel specific MCE features. | 2 | * Intel specific MCE features. |
3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> |
4 | * Copyright (C) 2008, 2009 Intel Corporation | ||
5 | * Author: Andi Kleen | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
@@ -13,6 +15,7 @@ | |||
13 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
14 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
15 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
18 | #include <asm/apic.h> | ||
16 | 19 | ||
17 | asmlinkage void smp_thermal_interrupt(void) | 20 | asmlinkage void smp_thermal_interrupt(void) |
18 | { | 21 | { |
@@ -25,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
25 | 28 | ||
26 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
27 | if (therm_throt_process(msr_val & 1)) | 30 | if (therm_throt_process(msr_val & 1)) |
28 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 31 | mce_log_therm_throt_event(msr_val); |
29 | 32 | ||
30 | inc_irq_stat(irq_thermal_count); | 33 | inc_irq_stat(irq_thermal_count); |
31 | irq_exit(); | 34 | irq_exit(); |
@@ -85,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
85 | return; | 88 | return; |
86 | } | 89 | } |
87 | 90 | ||
91 | /* | ||
92 | * Support for Intel Correct Machine Check Interrupts. This allows | ||
93 | * the CPU to raise an interrupt when a corrected machine check happened. | ||
94 | * Normally we pick those up using a regular polling timer. | ||
95 | * Also supports reliable discovery of shared banks. | ||
96 | */ | ||
97 | |||
98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | ||
99 | |||
100 | /* | ||
101 | * cmci_discover_lock protects against parallel discovery attempts | ||
102 | * which could race against each other. | ||
103 | */ | ||
104 | static DEFINE_SPINLOCK(cmci_discover_lock); | ||
105 | |||
106 | #define CMCI_THRESHOLD 1 | ||
107 | |||
108 | static int cmci_supported(int *banks) | ||
109 | { | ||
110 | u64 cap; | ||
111 | |||
112 | /* | ||
113 | * Vendor check is not strictly needed, but the initial | ||
114 | * initialization is vendor keyed and this | ||
115 | * makes sure none of the backdoors are entered otherwise. | ||
116 | */ | ||
117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
118 | return 0; | ||
119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | ||
120 | return 0; | ||
121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | ||
123 | return !!(cap & MCG_CMCI_P); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * The interrupt handler. This is called on every event. | ||
128 | * Just call the poller directly to log any events. | ||
129 | * This could in theory increase the threshold under high load, | ||
130 | * but doesn't for now. | ||
131 | */ | ||
132 | static void intel_threshold_interrupt(void) | ||
133 | { | ||
134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
135 | mce_notify_user(); | ||
136 | } | ||
137 | |||
138 | static void print_update(char *type, int *hdr, int num) | ||
139 | { | ||
140 | if (*hdr == 0) | ||
141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | ||
142 | *hdr = 1; | ||
143 | printk(KERN_CONT " %s:%d", type, num); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | ||
148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | ||
149 | * banks. | ||
150 | */ | ||
151 | static void cmci_discover(int banks, int boot) | ||
152 | { | ||
153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | ||
154 | int hdr = 0; | ||
155 | int i; | ||
156 | |||
157 | spin_lock(&cmci_discover_lock); | ||
158 | for (i = 0; i < banks; i++) { | ||
159 | u64 val; | ||
160 | |||
161 | if (test_bit(i, owned)) | ||
162 | continue; | ||
163 | |||
164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
165 | |||
166 | /* Already owned by someone else? */ | ||
167 | if (val & CMCI_EN) { | ||
168 | if (test_and_clear_bit(i, owned) || boot) | ||
169 | print_update("SHD", &hdr, i); | ||
170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
171 | continue; | ||
172 | } | ||
173 | |||
174 | val |= CMCI_EN | CMCI_THRESHOLD; | ||
175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
177 | |||
178 | /* Did the enable bit stick? -- the bank supports CMCI */ | ||
179 | if (val & CMCI_EN) { | ||
180 | if (!test_and_set_bit(i, owned) || boot) | ||
181 | print_update("CMCI", &hdr, i); | ||
182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
183 | } else { | ||
184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | ||
185 | } | ||
186 | } | ||
187 | spin_unlock(&cmci_discover_lock); | ||
188 | if (hdr) | ||
189 | printk(KERN_CONT "\n"); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Just in case we missed an event during initialization check | ||
194 | * all the CMCI owned banks. | ||
195 | */ | ||
196 | void cmci_recheck(void) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | int banks; | ||
200 | |||
201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | ||
202 | return; | ||
203 | local_irq_save(flags); | ||
204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
205 | local_irq_restore(flags); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | ||
210 | * This allows other CPUs to claim the banks on rediscovery. | ||
211 | */ | ||
212 | void cmci_clear(void) | ||
213 | { | ||
214 | int i; | ||
215 | int banks; | ||
216 | u64 val; | ||
217 | |||
218 | if (!cmci_supported(&banks)) | ||
219 | return; | ||
220 | spin_lock(&cmci_discover_lock); | ||
221 | for (i = 0; i < banks; i++) { | ||
222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | ||
223 | continue; | ||
224 | /* Disable CMCI */ | ||
225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | ||
227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
229 | } | ||
230 | spin_unlock(&cmci_discover_lock); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * After a CPU went down cycle through all the others and rediscover | ||
235 | * Must run in process context. | ||
236 | */ | ||
237 | void cmci_rediscover(int dying) | ||
238 | { | ||
239 | int banks; | ||
240 | int cpu; | ||
241 | cpumask_var_t old; | ||
242 | |||
243 | if (!cmci_supported(&banks)) | ||
244 | return; | ||
245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
246 | return; | ||
247 | cpumask_copy(old, ¤t->cpus_allowed); | ||
248 | |||
249 | for_each_online_cpu (cpu) { | ||
250 | if (cpu == dying) | ||
251 | continue; | ||
252 | if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) | ||
253 | continue; | ||
254 | /* Recheck banks in case CPUs don't all have the same */ | ||
255 | if (cmci_supported(&banks)) | ||
256 | cmci_discover(banks, 0); | ||
257 | } | ||
258 | |||
259 | set_cpus_allowed_ptr(current, old); | ||
260 | free_cpumask_var(old); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Reenable CMCI on this CPU in case a CPU down failed. | ||
265 | */ | ||
266 | void cmci_reenable(void) | ||
267 | { | ||
268 | int banks; | ||
269 | if (cmci_supported(&banks)) | ||
270 | cmci_discover(banks, 0); | ||
271 | } | ||
272 | |||
273 | static __cpuinit void intel_init_cmci(void) | ||
274 | { | ||
275 | int banks; | ||
276 | |||
277 | if (!cmci_supported(&banks)) | ||
278 | return; | ||
279 | |||
280 | mce_threshold_vector = intel_threshold_interrupt; | ||
281 | cmci_discover(banks, 1); | ||
282 | /* | ||
283 | * For CPU #0 this runs with still disabled APIC, but that's | ||
284 | * ok because only the vector is set up. We still do another | ||
285 | * check for the banks later for CPU #0 just to make sure | ||
286 | * to not miss any events. | ||
287 | */ | ||
288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | ||
289 | cmci_recheck(); | ||
290 | } | ||
291 | |||
88 | void mce_intel_feature_init(struct cpuinfo_x86 *c) | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
89 | { | 293 | { |
90 | intel_init_thermal(c); | 294 | intel_init_thermal(c); |
295 | intel_init_cmci(); | ||
91 | } | 296 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c new file mode 100644 index 000000000000..23ee9e730f78 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Common corrected MCE threshold handler code: | ||
3 | */ | ||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/kernel.h> | ||
6 | |||
7 | #include <asm/irq_vectors.h> | ||
8 | #include <asm/apic.h> | ||
9 | #include <asm/idle.h> | ||
10 | #include <asm/mce.h> | ||
11 | |||
12 | static void default_threshold_interrupt(void) | ||
13 | { | ||
14 | printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", | ||
15 | THRESHOLD_APIC_VECTOR); | ||
16 | } | ||
17 | |||
18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | ||
19 | |||
20 | asmlinkage void mce_threshold_interrupt(void) | ||
21 | { | ||
22 | exit_idle(); | ||
23 | irq_enter(); | ||
24 | inc_irq_stat(irq_threshold_count); | ||
25 | mce_threshold_vector(); | ||
26 | irq_exit(); | ||
27 | /* Ack only at the end to avoid potential reentry */ | ||
28 | ack_APIC_irq(); | ||
29 | } | ||