diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-02-19 07:18:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-19 07:31:33 -0500 |
commit | fa45a45ca34891614789e68dfbf7ce344c9013ac (patch) | |
tree | 175d96fcb279055882c08be06e1c9f5806d87476 | |
parent | e07e0d4cb0c4bfe822ec8491cc06269096a38bea (diff) | |
parent | d79f931f1c8b6963e13a3738ef2906ba89bb8d12 (diff) |
Merge tag 'ras_for_3.21' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras into x86/ras
Pull RAS updates from Borislav Petkov:
"- Enable AMD thresholding IRQ by default if supported. (Aravind Gopalakrishnan)
- Unify mce_panic() message pattern. (Derek Che)
- A bit more involved simplification of the CMCI logic after yet another
report about race condition with the adaptive logic. (Borislav Petkov)
- ACPI APEI EINJ fleshing out of the user documentation. (Borislav Petkov)
- Minor cleanup. (Jan Beulich.)"
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | Documentation/acpi/apei/einj.txt | 196 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-internal.h | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 90 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 63 |
6 files changed, 228 insertions, 149 deletions
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt index f51861bcb07b..e550c8b98139 100644 --- a/Documentation/acpi/apei/einj.txt +++ b/Documentation/acpi/apei/einj.txt | |||
@@ -1,129 +1,177 @@ | |||
1 | APEI Error INJection | 1 | APEI Error INJection |
2 | ~~~~~~~~~~~~~~~~~~~~ | 2 | ~~~~~~~~~~~~~~~~~~~~ |
3 | 3 | ||
4 | EINJ provides a hardware error injection mechanism | 4 | EINJ provides a hardware error injection mechanism. It is very useful |
5 | It is very useful for debugging and testing of other APEI and RAS features. | 5 | for debugging and testing APEI and RAS features in general. |
6 | 6 | ||
7 | To use EINJ, make sure the following are enabled in your kernel | 7 | You need to check whether your BIOS supports EINJ first. For that, look |
8 | for early boot messages similar to this one: | ||
9 | |||
10 | ACPI: EINJ 0x000000007370A000 000150 (v01 INTEL 00000001 INTL 00000001) | ||
11 | |||
12 | which shows that the BIOS is exposing an EINJ table - it is the | ||
13 | mechanism through which the injection is done. | ||
14 | |||
15 | Alternatively, look in /sys/firmware/acpi/tables for an "EINJ" file, | ||
16 | which is a different representation of the same thing. | ||
17 | |||
18 | It doesn't necessarily mean that EINJ is not supported if those above | ||
19 | don't exist: before you give up, go into BIOS setup to see if the BIOS | ||
20 | has an option to enable error injection. Look for something called WHEA | ||
21 | or similar. Often, you need to enable an ACPI5 support option prior, in | ||
22 | order to see the APEI,EINJ,... functionality supported and exposed by | ||
23 | the BIOS menu. | ||
24 | |||
25 | To use EINJ, make sure the following are options enabled in your kernel | ||
8 | configuration: | 26 | configuration: |
9 | 27 | ||
10 | CONFIG_DEBUG_FS | 28 | CONFIG_DEBUG_FS |
11 | CONFIG_ACPI_APEI | 29 | CONFIG_ACPI_APEI |
12 | CONFIG_ACPI_APEI_EINJ | 30 | CONFIG_ACPI_APEI_EINJ |
13 | 31 | ||
14 | The user interface of EINJ is debug file system, under the | 32 | The EINJ user interface is in <debugfs mount point>/apei/einj. |
15 | directory apei/einj. The following files are provided. | 33 | |
34 | The following files belong to it: | ||
16 | 35 | ||
17 | - available_error_type | 36 | - available_error_type |
18 | Reading this file returns the error injection capability of the | 37 | |
19 | platform, that is, which error types are supported. The error type | 38 | This file shows which error types are supported: |
20 | definition is as follow, the left field is the error type value, the | 39 | |
21 | right field is error description. | 40 | Error Type Value Error Description |
22 | 41 | ================ ================= | |
23 | 0x00000001 Processor Correctable | 42 | 0x00000001 Processor Correctable |
24 | 0x00000002 Processor Uncorrectable non-fatal | 43 | 0x00000002 Processor Uncorrectable non-fatal |
25 | 0x00000004 Processor Uncorrectable fatal | 44 | 0x00000004 Processor Uncorrectable fatal |
26 | 0x00000008 Memory Correctable | 45 | 0x00000008 Memory Correctable |
27 | 0x00000010 Memory Uncorrectable non-fatal | 46 | 0x00000010 Memory Uncorrectable non-fatal |
28 | 0x00000020 Memory Uncorrectable fatal | 47 | 0x00000020 Memory Uncorrectable fatal |
29 | 0x00000040 PCI Express Correctable | 48 | 0x00000040 PCI Express Correctable |
30 | 0x00000080 PCI Express Uncorrectable fatal | 49 | 0x00000080 PCI Express Uncorrectable fatal |
31 | 0x00000100 PCI Express Uncorrectable non-fatal | 50 | 0x00000100 PCI Express Uncorrectable non-fatal |
32 | 0x00000200 Platform Correctable | 51 | 0x00000200 Platform Correctable |
33 | 0x00000400 Platform Uncorrectable non-fatal | 52 | 0x00000400 Platform Uncorrectable non-fatal |
34 | 0x00000800 Platform Uncorrectable fatal | 53 | 0x00000800 Platform Uncorrectable fatal |
35 | 54 | ||
36 | The format of file contents are as above, except there are only the | 55 | The format of the file contents are as above, except present are only |
37 | available error type lines. | 56 | the available error types. |
38 | 57 | ||
39 | - error_type | 58 | - error_type |
40 | This file is used to set the error type value. The error type value | 59 | |
41 | is defined in "available_error_type" description. | 60 | Set the value of the error type being injected. Possible error types |
61 | are defined in the file available_error_type above. | ||
42 | 62 | ||
43 | - error_inject | 63 | - error_inject |
44 | Write any integer to this file to trigger the error | 64 | |
45 | injection. Before this, please specify all necessary error | 65 | Write any integer to this file to trigger the error injection. Make |
46 | parameters. | 66 | sure you have specified all necessary error parameters, i.e. this |
67 | write should be the last step when injecting errors. | ||
47 | 68 | ||
48 | - flags | 69 | - flags |
49 | Present for kernel version 3.13 and above. Used to specify which | 70 | |
50 | of param{1..4} are valid and should be used by BIOS during injection. | 71 | Present for kernel versions 3.13 and above. Used to specify which |
51 | Value is a bitmask as specified in ACPI5.0 spec for the | 72 | of param{1..4} are valid and should be used by the firmware during |
73 | injection. Value is a bitmask as specified in ACPI5.0 spec for the | ||
52 | SET_ERROR_TYPE_WITH_ADDRESS data structure: | 74 | SET_ERROR_TYPE_WITH_ADDRESS data structure: |
53 | Bit 0 - Processor APIC field valid (see param3 below) | 75 | |
54 | Bit 1 - Memory address and mask valid (param1 and param2) | 76 | Bit 0 - Processor APIC field valid (see param3 below). |
55 | Bit 2 - PCIe (seg,bus,dev,fn) valid (param4 below) | 77 | Bit 1 - Memory address and mask valid (param1 and param2). |
56 | If set to zero, legacy behaviour is used where the type of injection | 78 | Bit 2 - PCIe (seg,bus,dev,fn) valid (see param4 below). |
57 | specifies just one bit set, and param1 is multiplexed. | 79 | |
80 | If set to zero, legacy behavior is mimicked where the type of | ||
81 | injection specifies just one bit set, and param1 is multiplexed. | ||
58 | 82 | ||
59 | - param1 | 83 | - param1 |
60 | This file is used to set the first error parameter value. Effect of | 84 | |
61 | parameter depends on error_type specified. For example, if error | 85 | This file is used to set the first error parameter value. Its effect |
62 | type is memory related type, the param1 should be a valid physical | 86 | depends on the error type specified in error_type. For example, if |
63 | memory address. [Unless "flag" is set - see above] | 87 | error type is memory related type, the param1 should be a valid |
88 | physical memory address. [Unless "flag" is set - see above] | ||
64 | 89 | ||
65 | - param2 | 90 | - param2 |
66 | This file is used to set the second error parameter value. Effect of | 91 | |
67 | parameter depends on error_type specified. For example, if error | 92 | Same use as param1 above. For example, if error type is of memory |
68 | type is memory related type, the param2 should be a physical memory | 93 | related type, then param2 should be a physical memory address mask. |
69 | address mask. Linux requires page or narrower granularity, say, | 94 | Linux requires page or narrower granularity, say, 0xfffffffffffff000. |
70 | 0xfffffffffffff000. | ||
71 | 95 | ||
72 | - param3 | 96 | - param3 |
73 | Used when the 0x1 bit is set in "flag" to specify the APIC id | 97 | |
98 | Used when the 0x1 bit is set in "flags" to specify the APIC id | ||
74 | 99 | ||
75 | - param4 | 100 | - param4 |
76 | Used when the 0x4 bit is set in "flag" to specify target PCIe device | 101 | Used when the 0x4 bit is set in "flags" to specify target PCIe device |
77 | 102 | ||
78 | - notrigger | 103 | - notrigger |
79 | The EINJ mechanism is a two step process. First inject the error, then | 104 | |
80 | perform some actions to trigger it. Setting "notrigger" to 1 skips the | 105 | The error injection mechanism is a two-step process. First inject the |
81 | trigger phase, which *may* allow the user to cause the error in some other | 106 | error, then perform some actions to trigger it. Setting "notrigger" |
82 | context by a simple access to the cpu, memory location, or device that is | 107 | to 1 skips the trigger phase, which *may* allow the user to cause the |
83 | the target of the error injection. Whether this actually works depends | 108 | error in some other context by a simple access to the CPU, memory |
84 | on what operations the BIOS actually includes in the trigger phase. | 109 | location, or device that is the target of the error injection. Whether |
85 | 110 | this actually works depends on what operations the BIOS actually | |
86 | BIOS versions based in the ACPI 4.0 specification have limited options | 111 | includes in the trigger phase. |
87 | to control where the errors are injected. Your BIOS may support an | 112 | |
88 | extension (enabled with the param_extension=1 module parameter, or | 113 | BIOS versions based on the ACPI 4.0 specification have limited options |
89 | boot command line einj.param_extension=1). This allows the address | 114 | in controlling where the errors are injected. Your BIOS may support an |
90 | and mask for memory injections to be specified by the param1 and | 115 | extension (enabled with the param_extension=1 module parameter, or boot |
91 | param2 files in apei/einj. | 116 | command line einj.param_extension=1). This allows the address and mask |
92 | 117 | for memory injections to be specified by the param1 and param2 files in | |
93 | BIOS versions using the ACPI 5.0 specification have more control over | 118 | apei/einj. |
94 | the target of the injection. For processor related errors (type 0x1, | 119 | |
95 | 0x2 and 0x4) the APICID of the target should be provided using the | 120 | BIOS versions based on the ACPI 5.0 specification have more control over |
96 | param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20) | 121 | the target of the injection. For processor-related errors (type 0x1, 0x2 |
97 | the address is set using param1 with a mask in param2 (0x0 is equivalent | 122 | and 0x4), you can set flags to 0x3 (param3 for bit 0, and param1 and |
98 | to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the | 123 | param2 for bit 1) so that you have more information added to the error |
99 | segment, bus, device and function are specified using param1: | 124 | signature being injected. The actual data passed is this: |
125 | |||
126 | memory_address = param1; | ||
127 | memory_address_range = param2; | ||
128 | apicid = param3; | ||
129 | pcie_sbdf = param4; | ||
130 | |||
131 | For memory errors (type 0x8, 0x10 and 0x20) the address is set using | ||
132 | param1 with a mask in param2 (0x0 is equivalent to all ones). For PCI | ||
133 | express errors (type 0x40, 0x80 and 0x100) the segment, bus, device and | ||
134 | function are specified using param1: | ||
100 | 135 | ||
101 | 31 24 23 16 15 11 10 8 7 0 | 136 | 31 24 23 16 15 11 10 8 7 0 |
102 | +-------------------------------------------------+ | 137 | +-------------------------------------------------+ |
103 | | segment | bus | device | function | reserved | | 138 | | segment | bus | device | function | reserved | |
104 | +-------------------------------------------------+ | 139 | +-------------------------------------------------+ |
105 | 140 | ||
106 | An ACPI 5.0 BIOS may also allow vendor specific errors to be injected. | 141 | Anyway, you get the idea, if there's doubt just take a look at the code |
142 | in drivers/acpi/apei/einj.c. | ||
143 | |||
144 | An ACPI 5.0 BIOS may also allow vendor-specific errors to be injected. | ||
107 | In this case a file named vendor will contain identifying information | 145 | In this case a file named vendor will contain identifying information |
108 | from the BIOS that hopefully will allow an application wishing to use | 146 | from the BIOS that hopefully will allow an application wishing to use |
109 | the vendor specific extension to tell that they are running on a BIOS | 147 | the vendor-specific extension to tell that they are running on a BIOS |
110 | that supports it. All vendor extensions have the 0x80000000 bit set in | 148 | that supports it. All vendor extensions have the 0x80000000 bit set in |
111 | error_type. A file vendor_flags controls the interpretation of param1 | 149 | error_type. A file vendor_flags controls the interpretation of param1 |
112 | and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor | 150 | and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor |
113 | documentation for details (and expect changes to this API if vendors | 151 | documentation for details (and expect changes to this API if vendors |
114 | creativity in using this feature expands beyond our expectations). | 152 | creativity in using this feature expands beyond our expectations). |
115 | 153 | ||
116 | Example: | 154 | |
155 | An error injection example: | ||
156 | |||
117 | # cd /sys/kernel/debug/apei/einj | 157 | # cd /sys/kernel/debug/apei/einj |
118 | # cat available_error_type # See which errors can be injected | 158 | # cat available_error_type # See which errors can be injected |
119 | 0x00000002 Processor Uncorrectable non-fatal | 159 | 0x00000002 Processor Uncorrectable non-fatal |
120 | 0x00000008 Memory Correctable | 160 | 0x00000008 Memory Correctable |
121 | 0x00000010 Memory Uncorrectable non-fatal | 161 | 0x00000010 Memory Uncorrectable non-fatal |
122 | # echo 0x12345000 > param1 # Set memory address for injection | 162 | # echo 0x12345000 > param1 # Set memory address for injection |
123 | # echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page | 163 | # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page |
124 | # echo 0x8 > error_type # Choose correctable memory error | 164 | # echo 0x8 > error_type # Choose correctable memory error |
125 | # echo 1 > error_inject # Inject now | 165 | # echo 1 > error_inject # Inject now |
126 | 166 | ||
167 | You should see something like this in dmesg: | ||
168 | |||
169 | [22715.830801] EDAC sbridge MC3: HANDLING MCE MEMORY ERROR | ||
170 | [22715.834759] EDAC sbridge MC3: CPU 0: Machine Check Event: 0 Bank 7: 8c00004000010090 | ||
171 | [22715.834759] EDAC sbridge MC3: TSC 0 | ||
172 | [22715.834759] EDAC sbridge MC3: ADDR 12345000 EDAC sbridge MC3: MISC 144780c86 | ||
173 | [22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0 | ||
174 | [22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0) | ||
127 | 175 | ||
128 | For more information about EINJ, please refer to ACPI specification | 176 | For more information about EINJ, please refer to ACPI specification |
129 | version 4.0, section 17.5 and ACPI 5.0, section 18.6. | 177 | version 4.0, section 17.5 and ACPI 5.0, section 18.6. |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 9b3de99dc004..fd38a23e729f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -183,11 +183,11 @@ typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | |||
183 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | 183 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); |
184 | 184 | ||
185 | enum mcp_flags { | 185 | enum mcp_flags { |
186 | MCP_TIMESTAMP = (1 << 0), /* log time stamp */ | 186 | MCP_TIMESTAMP = BIT(0), /* log time stamp */ |
187 | MCP_UC = (1 << 1), /* log uncorrected errors */ | 187 | MCP_UC = BIT(1), /* log uncorrected errors */ |
188 | MCP_DONTLOG = (1 << 2), /* only clear, don't log */ | 188 | MCP_DONTLOG = BIT(2), /* only clear, don't log */ |
189 | }; | 189 | }; |
190 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | 190 | bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b); |
191 | 191 | ||
192 | int mce_notify_irq(void); | 192 | int mce_notify_irq(void); |
193 | 193 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 10b46906767f..e12f0bfb45c1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
@@ -14,6 +14,7 @@ enum severity_level { | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | #define ATTR_LEN 16 | 16 | #define ATTR_LEN 16 |
17 | #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ | ||
17 | 18 | ||
18 | /* One object for each MCE bank, shared by all CPUs */ | 19 | /* One object for each MCE bank, shared by all CPUs */ |
19 | struct mce_bank { | 20 | struct mce_bank { |
@@ -30,13 +31,13 @@ extern struct mce_bank *mce_banks; | |||
30 | extern mce_banks_t mce_banks_ce_disabled; | 31 | extern mce_banks_t mce_banks_ce_disabled; |
31 | 32 | ||
32 | #ifdef CONFIG_X86_MCE_INTEL | 33 | #ifdef CONFIG_X86_MCE_INTEL |
33 | unsigned long mce_intel_adjust_timer(unsigned long interval); | 34 | unsigned long cmci_intel_adjust_timer(unsigned long interval); |
34 | void mce_intel_cmci_poll(void); | 35 | bool mce_intel_cmci_poll(void); |
35 | void mce_intel_hcpu_update(unsigned long cpu); | 36 | void mce_intel_hcpu_update(unsigned long cpu); |
36 | void cmci_disable_bank(int bank); | 37 | void cmci_disable_bank(int bank); |
37 | #else | 38 | #else |
38 | # define mce_intel_adjust_timer mce_adjust_timer_default | 39 | # define cmci_intel_adjust_timer mce_adjust_timer_default |
39 | static inline void mce_intel_cmci_poll(void) { } | 40 | static inline bool mce_intel_cmci_poll(void) { return false; } |
40 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } | 41 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } |
41 | static inline void cmci_disable_bank(int bank) { } | 42 | static inline void cmci_disable_bank(int bank) { } |
42 | #endif | 43 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index cdfed7953963..d760931a4546 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -59,7 +59,7 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); | |||
59 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
60 | #include <trace/events/mce.h> | 60 | #include <trace/events/mce.h> |
61 | 61 | ||
62 | #define SPINUNIT 100 /* 100ns */ | 62 | #define SPINUNIT 100 /* 100ns */ |
63 | 63 | ||
64 | DEFINE_PER_CPU(unsigned, mce_exception_count); | 64 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
65 | 65 | ||
@@ -88,9 +88,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); | |||
88 | static DEFINE_PER_CPU(struct mce, mces_seen); | 88 | static DEFINE_PER_CPU(struct mce, mces_seen); |
89 | static int cpu_missing; | 89 | static int cpu_missing; |
90 | 90 | ||
91 | /* CMCI storm detection filter */ | ||
92 | static DEFINE_PER_CPU(unsigned long, mce_polled_error); | ||
93 | |||
94 | /* | 91 | /* |
95 | * MCA banks polled by the period polling timer for corrected events. | 92 | * MCA banks polled by the period polling timer for corrected events. |
96 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). | 93 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
@@ -624,8 +621,9 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); | |||
624 | * is already totally * confused. In this case it's likely it will | 621 | * is already totally * confused. In this case it's likely it will |
625 | * not fully execute the machine check handler either. | 622 | * not fully execute the machine check handler either. |
626 | */ | 623 | */ |
627 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | 624 | bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
628 | { | 625 | { |
626 | bool error_logged = false; | ||
629 | struct mce m; | 627 | struct mce m; |
630 | int severity; | 628 | int severity; |
631 | int i; | 629 | int i; |
@@ -648,7 +646,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
648 | if (!(m.status & MCI_STATUS_VAL)) | 646 | if (!(m.status & MCI_STATUS_VAL)) |
649 | continue; | 647 | continue; |
650 | 648 | ||
651 | this_cpu_write(mce_polled_error, 1); | 649 | |
652 | /* | 650 | /* |
653 | * Uncorrected or signalled events are handled by the exception | 651 | * Uncorrected or signalled events are handled by the exception |
654 | * handler when it is enabled, so don't process those here. | 652 | * handler when it is enabled, so don't process those here. |
@@ -681,8 +679,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
681 | * Don't get the IP here because it's unlikely to | 679 | * Don't get the IP here because it's unlikely to |
682 | * have anything to do with the actual error location. | 680 | * have anything to do with the actual error location. |
683 | */ | 681 | */ |
684 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) | 682 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) { |
683 | error_logged = true; | ||
685 | mce_log(&m); | 684 | mce_log(&m); |
685 | } | ||
686 | 686 | ||
687 | /* | 687 | /* |
688 | * Clear state for this bank. | 688 | * Clear state for this bank. |
@@ -696,6 +696,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
696 | */ | 696 | */ |
697 | 697 | ||
698 | sync_core(); | 698 | sync_core(); |
699 | |||
700 | return error_logged; | ||
699 | } | 701 | } |
700 | EXPORT_SYMBOL_GPL(machine_check_poll); | 702 | EXPORT_SYMBOL_GPL(machine_check_poll); |
701 | 703 | ||
@@ -815,7 +817,7 @@ static void mce_reign(void) | |||
815 | * other CPUs. | 817 | * other CPUs. |
816 | */ | 818 | */ |
817 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) | 819 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
818 | mce_panic("Fatal Machine check", m, msg); | 820 | mce_panic("Fatal machine check", m, msg); |
819 | 821 | ||
820 | /* | 822 | /* |
821 | * For UC somewhere we let the CPU who detects it handle it. | 823 | * For UC somewhere we let the CPU who detects it handle it. |
@@ -828,7 +830,7 @@ static void mce_reign(void) | |||
828 | * source or one CPU is hung. Panic. | 830 | * source or one CPU is hung. Panic. |
829 | */ | 831 | */ |
830 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) | 832 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
831 | mce_panic("Machine check from unknown source", NULL, NULL); | 833 | mce_panic("Fatal machine check from unknown source", NULL, NULL); |
832 | 834 | ||
833 | /* | 835 | /* |
834 | * Now clear all the mces_seen so that they don't reappear on | 836 | * Now clear all the mces_seen so that they don't reappear on |
@@ -1260,7 +1262,7 @@ void mce_log_therm_throt_event(__u64 status) | |||
1260 | * poller finds an MCE, poll 2x faster. When the poller finds no more | 1262 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
1261 | * errors, poll 2x slower (up to check_interval seconds). | 1263 | * errors, poll 2x slower (up to check_interval seconds). |
1262 | */ | 1264 | */ |
1263 | static unsigned long check_interval = 5 * 60; /* 5 minutes */ | 1265 | static unsigned long check_interval = INITIAL_CHECK_INTERVAL; |
1264 | 1266 | ||
1265 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ | 1267 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
1266 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 1268 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
@@ -1270,49 +1272,57 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) | |||
1270 | return interval; | 1272 | return interval; |
1271 | } | 1273 | } |
1272 | 1274 | ||
1273 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = | 1275 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; |
1274 | mce_adjust_timer_default; | ||
1275 | 1276 | ||
1276 | static int cmc_error_seen(void) | 1277 | static void __restart_timer(struct timer_list *t, unsigned long interval) |
1277 | { | 1278 | { |
1278 | unsigned long *v = this_cpu_ptr(&mce_polled_error); | 1279 | unsigned long when = jiffies + interval; |
1280 | unsigned long flags; | ||
1281 | |||
1282 | local_irq_save(flags); | ||
1283 | |||
1284 | if (timer_pending(t)) { | ||
1285 | if (time_before(when, t->expires)) | ||
1286 | mod_timer_pinned(t, when); | ||
1287 | } else { | ||
1288 | t->expires = round_jiffies(when); | ||
1289 | add_timer_on(t, smp_processor_id()); | ||
1290 | } | ||
1279 | 1291 | ||
1280 | return test_and_clear_bit(0, v); | 1292 | local_irq_restore(flags); |
1281 | } | 1293 | } |
1282 | 1294 | ||
1283 | static void mce_timer_fn(unsigned long data) | 1295 | static void mce_timer_fn(unsigned long data) |
1284 | { | 1296 | { |
1285 | struct timer_list *t = this_cpu_ptr(&mce_timer); | 1297 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1298 | int cpu = smp_processor_id(); | ||
1286 | unsigned long iv; | 1299 | unsigned long iv; |
1287 | int notify; | ||
1288 | 1300 | ||
1289 | WARN_ON(smp_processor_id() != data); | 1301 | WARN_ON(cpu != data); |
1302 | |||
1303 | iv = __this_cpu_read(mce_next_interval); | ||
1290 | 1304 | ||
1291 | if (mce_available(this_cpu_ptr(&cpu_info))) { | 1305 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1292 | machine_check_poll(MCP_TIMESTAMP, | 1306 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks)); |
1293 | this_cpu_ptr(&mce_poll_banks)); | 1307 | |
1294 | mce_intel_cmci_poll(); | 1308 | if (mce_intel_cmci_poll()) { |
1309 | iv = mce_adjust_timer(iv); | ||
1310 | goto done; | ||
1311 | } | ||
1295 | } | 1312 | } |
1296 | 1313 | ||
1297 | /* | 1314 | /* |
1298 | * Alert userspace if needed. If we logged an MCE, reduce the | 1315 | * Alert userspace if needed. If we logged an MCE, reduce the polling |
1299 | * polling interval, otherwise increase the polling interval. | 1316 | * interval, otherwise increase the polling interval. |
1300 | */ | 1317 | */ |
1301 | iv = __this_cpu_read(mce_next_interval); | 1318 | if (mce_notify_irq()) |
1302 | notify = mce_notify_irq(); | ||
1303 | notify |= cmc_error_seen(); | ||
1304 | if (notify) { | ||
1305 | iv = max(iv / 2, (unsigned long) HZ/100); | 1319 | iv = max(iv / 2, (unsigned long) HZ/100); |
1306 | } else { | 1320 | else |
1307 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1321 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
1308 | iv = mce_adjust_timer(iv); | 1322 | |
1309 | } | 1323 | done: |
1310 | __this_cpu_write(mce_next_interval, iv); | 1324 | __this_cpu_write(mce_next_interval, iv); |
1311 | /* Might have become 0 after CMCI storm subsided */ | 1325 | __restart_timer(t, iv); |
1312 | if (iv) { | ||
1313 | t->expires = jiffies + iv; | ||
1314 | add_timer_on(t, smp_processor_id()); | ||
1315 | } | ||
1316 | } | 1326 | } |
1317 | 1327 | ||
1318 | /* | 1328 | /* |
@@ -1321,16 +1331,10 @@ static void mce_timer_fn(unsigned long data) | |||
1321 | void mce_timer_kick(unsigned long interval) | 1331 | void mce_timer_kick(unsigned long interval) |
1322 | { | 1332 | { |
1323 | struct timer_list *t = this_cpu_ptr(&mce_timer); | 1333 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1324 | unsigned long when = jiffies + interval; | ||
1325 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1334 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1326 | 1335 | ||
1327 | if (timer_pending(t)) { | 1336 | __restart_timer(t, interval); |
1328 | if (time_before(when, t->expires)) | 1337 | |
1329 | mod_timer_pinned(t, when); | ||
1330 | } else { | ||
1331 | t->expires = round_jiffies(when); | ||
1332 | add_timer_on(t, smp_processor_id()); | ||
1333 | } | ||
1334 | if (interval < iv) | 1338 | if (interval < iv) |
1335 | __this_cpu_write(mce_next_interval, interval); | 1339 | __this_cpu_write(mce_next_interval, interval); |
1336 | } | 1340 | } |
@@ -1631,7 +1635,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1631 | switch (c->x86_vendor) { | 1635 | switch (c->x86_vendor) { |
1632 | case X86_VENDOR_INTEL: | 1636 | case X86_VENDOR_INTEL: |
1633 | mce_intel_feature_init(c); | 1637 | mce_intel_feature_init(c); |
1634 | mce_adjust_timer = mce_intel_adjust_timer; | 1638 | mce_adjust_timer = cmci_intel_adjust_timer; |
1635 | break; | 1639 | break; |
1636 | case X86_VENDOR_AMD: | 1640 | case X86_VENDOR_AMD: |
1637 | mce_amd_feature_init(c); | 1641 | mce_amd_feature_init(c); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index f1c3769bbd64..55ad9b37cae8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -79,7 +79,7 @@ static inline bool is_shared_bank(int bank) | |||
79 | return (bank == 4); | 79 | return (bank == 4); |
80 | } | 80 | } |
81 | 81 | ||
82 | static const char * const bank4_names(struct threshold_block *b) | 82 | static const char *bank4_names(const struct threshold_block *b) |
83 | { | 83 | { |
84 | switch (b->address) { | 84 | switch (b->address) { |
85 | /* MSR4_MISC0 */ | 85 | /* MSR4_MISC0 */ |
@@ -250,6 +250,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
250 | if (!b.interrupt_capable) | 250 | if (!b.interrupt_capable) |
251 | goto init; | 251 | goto init; |
252 | 252 | ||
253 | b.interrupt_enable = 1; | ||
253 | new = (high & MASK_LVTOFF_HI) >> 20; | 254 | new = (high & MASK_LVTOFF_HI) >> 20; |
254 | offset = setup_APIC_mce(offset, new); | 255 | offset = setup_APIC_mce(offset, new); |
255 | 256 | ||
@@ -322,6 +323,8 @@ static void amd_threshold_interrupt(void) | |||
322 | log: | 323 | log: |
323 | mce_setup(&m); | 324 | mce_setup(&m); |
324 | rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status); | 325 | rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status); |
326 | if (!(m.status & MCI_STATUS_VAL)) | ||
327 | return; | ||
325 | m.misc = ((u64)high << 32) | low; | 328 | m.misc = ((u64)high << 32) | low; |
326 | m.bank = bank; | 329 | m.bank = bank; |
327 | mce_log(&m); | 330 | mce_log(&m); |
@@ -497,10 +500,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, | |||
497 | b->interrupt_capable = lvt_interrupt_supported(bank, high); | 500 | b->interrupt_capable = lvt_interrupt_supported(bank, high); |
498 | b->threshold_limit = THRESHOLD_MAX; | 501 | b->threshold_limit = THRESHOLD_MAX; |
499 | 502 | ||
500 | if (b->interrupt_capable) | 503 | if (b->interrupt_capable) { |
501 | threshold_ktype.default_attrs[2] = &interrupt_enable.attr; | 504 | threshold_ktype.default_attrs[2] = &interrupt_enable.attr; |
502 | else | 505 | b->interrupt_enable = 1; |
506 | } else { | ||
503 | threshold_ktype.default_attrs[2] = NULL; | 507 | threshold_ktype.default_attrs[2] = NULL; |
508 | } | ||
504 | 509 | ||
505 | INIT_LIST_HEAD(&b->miscj); | 510 | INIT_LIST_HEAD(&b->miscj); |
506 | 511 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index b3c97bafc123..b4a41cf030ed 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -39,6 +39,15 @@ | |||
39 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | 39 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * CMCI storm detection backoff counter | ||
43 | * | ||
44 | * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've | ||
45 | * encountered an error. If not, we decrement it by one. We signal the end of | ||
46 | * the CMCI storm when it reaches 0. | ||
47 | */ | ||
48 | static DEFINE_PER_CPU(int, cmci_backoff_cnt); | ||
49 | |||
50 | /* | ||
42 | * cmci_discover_lock protects against parallel discovery attempts | 51 | * cmci_discover_lock protects against parallel discovery attempts |
43 | * which could race against each other. | 52 | * which could race against each other. |
44 | */ | 53 | */ |
@@ -46,7 +55,7 @@ static DEFINE_RAW_SPINLOCK(cmci_discover_lock); | |||
46 | 55 | ||
47 | #define CMCI_THRESHOLD 1 | 56 | #define CMCI_THRESHOLD 1 |
48 | #define CMCI_POLL_INTERVAL (30 * HZ) | 57 | #define CMCI_POLL_INTERVAL (30 * HZ) |
49 | #define CMCI_STORM_INTERVAL (1 * HZ) | 58 | #define CMCI_STORM_INTERVAL (HZ) |
50 | #define CMCI_STORM_THRESHOLD 15 | 59 | #define CMCI_STORM_THRESHOLD 15 |
51 | 60 | ||
52 | static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); | 61 | static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); |
@@ -82,11 +91,21 @@ static int cmci_supported(int *banks) | |||
82 | return !!(cap & MCG_CMCI_P); | 91 | return !!(cap & MCG_CMCI_P); |
83 | } | 92 | } |
84 | 93 | ||
85 | void mce_intel_cmci_poll(void) | 94 | bool mce_intel_cmci_poll(void) |
86 | { | 95 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 96 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 97 | return false; |
89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 98 | |
99 | /* | ||
100 | * Reset the counter if we've logged an error in the last poll | ||
101 | * during the storm. | ||
102 | */ | ||
103 | if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned))) | ||
104 | this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); | ||
105 | else | ||
106 | this_cpu_dec(cmci_backoff_cnt); | ||
107 | |||
108 | return true; | ||
90 | } | 109 | } |
91 | 110 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 111 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -97,31 +116,32 @@ void mce_intel_hcpu_update(unsigned long cpu) | |||
97 | per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; | 116 | per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; |
98 | } | 117 | } |
99 | 118 | ||
100 | unsigned long mce_intel_adjust_timer(unsigned long interval) | 119 | unsigned long cmci_intel_adjust_timer(unsigned long interval) |
101 | { | 120 | { |
102 | int r; | 121 | if ((this_cpu_read(cmci_backoff_cnt) > 0) && |
103 | 122 | (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { | |
104 | if (interval < CMCI_POLL_INTERVAL) | 123 | mce_notify_irq(); |
105 | return interval; | 124 | return CMCI_STORM_INTERVAL; |
125 | } | ||
106 | 126 | ||
107 | switch (__this_cpu_read(cmci_storm_state)) { | 127 | switch (__this_cpu_read(cmci_storm_state)) { |
108 | case CMCI_STORM_ACTIVE: | 128 | case CMCI_STORM_ACTIVE: |
129 | |||
109 | /* | 130 | /* |
110 | * We switch back to interrupt mode once the poll timer has | 131 | * We switch back to interrupt mode once the poll timer has |
111 | * silenced itself. That means no events recorded and the | 132 | * silenced itself. That means no events recorded and the timer |
112 | * timer interval is back to our poll interval. | 133 | * interval is back to our poll interval. |
113 | */ | 134 | */ |
114 | __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); | 135 | __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); |
115 | r = atomic_sub_return(1, &cmci_storm_on_cpus); | 136 | if (!atomic_sub_return(1, &cmci_storm_on_cpus)) |
116 | if (r == 0) | ||
117 | pr_notice("CMCI storm subsided: switching to interrupt mode\n"); | 137 | pr_notice("CMCI storm subsided: switching to interrupt mode\n"); |
138 | |||
118 | /* FALLTHROUGH */ | 139 | /* FALLTHROUGH */ |
119 | 140 | ||
120 | case CMCI_STORM_SUBSIDED: | 141 | case CMCI_STORM_SUBSIDED: |
121 | /* | 142 | /* |
122 | * We wait for all cpus to go back to SUBSIDED | 143 | * We wait for all CPUs to go back to SUBSIDED state. When that |
123 | * state. When that happens we switch back to | 144 | * happens we switch back to interrupt mode. |
124 | * interrupt mode. | ||
125 | */ | 145 | */ |
126 | if (!atomic_read(&cmci_storm_on_cpus)) { | 146 | if (!atomic_read(&cmci_storm_on_cpus)) { |
127 | __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); | 147 | __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); |
@@ -130,10 +150,8 @@ unsigned long mce_intel_adjust_timer(unsigned long interval) | |||
130 | } | 150 | } |
131 | return CMCI_POLL_INTERVAL; | 151 | return CMCI_POLL_INTERVAL; |
132 | default: | 152 | default: |
133 | /* | 153 | |
134 | * We have shiny weather. Let the poll do whatever it | 154 | /* We have shiny weather. Let the poll do whatever it thinks. */ |
135 | * thinks. | ||
136 | */ | ||
137 | return interval; | 155 | return interval; |
138 | } | 156 | } |
139 | } | 157 | } |
@@ -178,7 +196,8 @@ static bool cmci_storm_detect(void) | |||
178 | cmci_storm_disable_banks(); | 196 | cmci_storm_disable_banks(); |
179 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); | 197 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); |
180 | r = atomic_add_return(1, &cmci_storm_on_cpus); | 198 | r = atomic_add_return(1, &cmci_storm_on_cpus); |
181 | mce_timer_kick(CMCI_POLL_INTERVAL); | 199 | mce_timer_kick(CMCI_STORM_INTERVAL); |
200 | this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); | ||
182 | 201 | ||
183 | if (r == 1) | 202 | if (r == 1) |
184 | pr_notice("CMCI storm detected: switching to poll mode\n"); | 203 | pr_notice("CMCI storm detected: switching to poll mode\n"); |
@@ -195,6 +214,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 214 | { |
196 | if (cmci_storm_detect()) | 215 | if (cmci_storm_detect()) |
197 | return; | 216 | return; |
217 | |||
198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 218 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 219 | mce_notify_irq(); |
200 | } | 220 | } |
@@ -286,6 +306,7 @@ void cmci_recheck(void) | |||
286 | 306 | ||
287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 307 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 308 | return; |
309 | |||
289 | local_irq_save(flags); | 310 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 311 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 312 | local_irq_restore(flags); |