diff options
-rw-r--r-- | Documentation/ABI/testing/sysfs-devices-system-cpu | 1 | ||||
-rw-r--r-- | Documentation/admin-guide/hw-vuln/index.rst | 1 | ||||
-rw-r--r-- | Documentation/admin-guide/hw-vuln/tsx_async_abort.rst | 276 | ||||
-rw-r--r-- | Documentation/admin-guide/kernel-parameters.txt | 67 | ||||
-rw-r--r-- | Documentation/x86/index.rst | 1 | ||||
-rw-r--r-- | Documentation/x86/tsx_async_abort.rst | 117 | ||||
-rw-r--r-- | arch/x86/Kconfig | 45 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/nospec-branch.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 131 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/tsx.c | 140 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 19 | ||||
-rw-r--r-- | drivers/base/cpu.c | 9 | ||||
-rw-r--r-- | include/linux/cpu.h | 3 |
20 files changed, 881 insertions, 7 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 06d0931119cc..0e77569bd5e0 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -486,6 +486,7 @@ What: /sys/devices/system/cpu/vulnerabilities | |||
486 | /sys/devices/system/cpu/vulnerabilities/spec_store_bypass | 486 | /sys/devices/system/cpu/vulnerabilities/spec_store_bypass |
487 | /sys/devices/system/cpu/vulnerabilities/l1tf | 487 | /sys/devices/system/cpu/vulnerabilities/l1tf |
488 | /sys/devices/system/cpu/vulnerabilities/mds | 488 | /sys/devices/system/cpu/vulnerabilities/mds |
489 | /sys/devices/system/cpu/vulnerabilities/tsx_async_abort | ||
489 | Date: January 2018 | 490 | Date: January 2018 |
490 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | 491 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> |
491 | Description: Information about CPU vulnerabilities | 492 | Description: Information about CPU vulnerabilities |
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 49311f3da6f2..0802b1c67452 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst | |||
@@ -12,3 +12,4 @@ are configurable at compile, boot or run time. | |||
12 | spectre | 12 | spectre |
13 | l1tf | 13 | l1tf |
14 | mds | 14 | mds |
15 | tsx_async_abort | ||
diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst new file mode 100644 index 000000000000..fddbd7579c53 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst | |||
@@ -0,0 +1,276 @@ | |||
1 | .. SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | TAA - TSX Asynchronous Abort | ||
4 | ====================================== | ||
5 | |||
6 | TAA is a hardware vulnerability that allows unprivileged speculative access to | ||
7 | data which is available in various CPU internal buffers by using asynchronous | ||
8 | aborts within an Intel TSX transactional region. | ||
9 | |||
10 | Affected processors | ||
11 | ------------------- | ||
12 | |||
13 | This vulnerability only affects Intel processors that support Intel | ||
14 | Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8) | ||
15 | is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit | ||
16 | (bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations | ||
17 | also mitigate against TAA. | ||
18 | |||
19 | Whether a processor is affected or not can be read out from the TAA | ||
20 | vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`. | ||
21 | |||
22 | Related CVEs | ||
23 | ------------ | ||
24 | |||
25 | The following CVE entry is related to this TAA issue: | ||
26 | |||
27 | ============== ===== =================================================== | ||
28 | CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some | ||
29 | microprocessors utilizing speculative execution may | ||
30 | allow an authenticated user to potentially enable | ||
31 | information disclosure via a side channel with | ||
32 | local access. | ||
33 | ============== ===== =================================================== | ||
34 | |||
35 | Problem | ||
36 | ------- | ||
37 | |||
38 | When performing store, load or L1 refill operations, processors write | ||
39 | data into temporary microarchitectural structures (buffers). The data in | ||
40 | those buffers can be forwarded to load operations as an optimization. | ||
41 | |||
42 | Intel TSX is an extension to the x86 instruction set architecture that adds | ||
43 | hardware transactional memory support to improve performance of multi-threaded | ||
44 | software. TSX lets the processor expose and exploit concurrency hidden in an | ||
45 | application due to dynamically avoiding unnecessary synchronization. | ||
46 | |||
47 | TSX supports atomic memory transactions that are either committed (success) or | ||
48 | aborted. During an abort, operations that happened within the transactional region | ||
49 | are rolled back. An asynchronous abort takes place, among other options, when a | ||
50 | different thread accesses a cache line that is also used within the transactional | ||
51 | region when that access might lead to a data race. | ||
52 | |||
53 | Immediately after an uncompleted asynchronous abort, certain speculatively | ||
54 | executed loads may read data from those internal buffers and pass it to dependent | ||
55 | operations. This can be then used to infer the value via a cache side channel | ||
56 | attack. | ||
57 | |||
58 | Because the buffers are potentially shared between Hyper-Threads cross | ||
59 | Hyper-Thread attacks are possible. | ||
60 | |||
61 | The victim of a malicious actor does not need to make use of TSX. Only the | ||
62 | attacker needs to begin a TSX transaction and raise an asynchronous abort | ||
63 | which in turn potenitally leaks data stored in the buffers. | ||
64 | |||
65 | More detailed technical information is available in the TAA specific x86 | ||
66 | architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`. | ||
67 | |||
68 | |||
69 | Attack scenarios | ||
70 | ---------------- | ||
71 | |||
72 | Attacks against the TAA vulnerability can be implemented from unprivileged | ||
73 | applications running on hosts or guests. | ||
74 | |||
75 | As for MDS, the attacker has no control over the memory addresses that can | ||
76 | be leaked. Only the victim is responsible for bringing data to the CPU. As | ||
77 | a result, the malicious actor has to sample as much data as possible and | ||
78 | then postprocess it to try to infer any useful information from it. | ||
79 | |||
80 | A potential attacker only has read access to the data. Also, there is no direct | ||
81 | privilege escalation by using this technique. | ||
82 | |||
83 | |||
84 | .. _tsx_async_abort_sys_info: | ||
85 | |||
86 | TAA system information | ||
87 | ----------------------- | ||
88 | |||
89 | The Linux kernel provides a sysfs interface to enumerate the current TAA status | ||
90 | of mitigated systems. The relevant sysfs file is: | ||
91 | |||
92 | /sys/devices/system/cpu/vulnerabilities/tsx_async_abort | ||
93 | |||
94 | The possible values in this file are: | ||
95 | |||
96 | .. list-table:: | ||
97 | |||
98 | * - 'Vulnerable' | ||
99 | - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied. | ||
100 | * - 'Vulnerable: Clear CPU buffers attempted, no microcode' | ||
101 | - The system tries to clear the buffers but the microcode might not support the operation. | ||
102 | * - 'Mitigation: Clear CPU buffers' | ||
103 | - The microcode has been updated to clear the buffers. TSX is still enabled. | ||
104 | * - 'Mitigation: TSX disabled' | ||
105 | - TSX is disabled. | ||
106 | * - 'Not affected' | ||
107 | - The CPU is not affected by this issue. | ||
108 | |||
109 | .. _ucode_needed: | ||
110 | |||
111 | Best effort mitigation mode | ||
112 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ||
113 | |||
114 | If the processor is vulnerable, but the availability of the microcode-based | ||
115 | mitigation mechanism is not advertised via CPUID the kernel selects a best | ||
116 | effort mitigation mode. This mode invokes the mitigation instructions | ||
117 | without a guarantee that they clear the CPU buffers. | ||
118 | |||
119 | This is done to address virtualization scenarios where the host has the | ||
120 | microcode update applied, but the hypervisor is not yet updated to expose the | ||
121 | CPUID to the guest. If the host has updated microcode the protection takes | ||
122 | effect; otherwise a few CPU cycles are wasted pointlessly. | ||
123 | |||
124 | The state in the tsx_async_abort sysfs file reflects this situation | ||
125 | accordingly. | ||
126 | |||
127 | |||
128 | Mitigation mechanism | ||
129 | -------------------- | ||
130 | |||
131 | The kernel detects the affected CPUs and the presence of the microcode which is | ||
132 | required. If a CPU is affected and the microcode is available, then the kernel | ||
133 | enables the mitigation by default. | ||
134 | |||
135 | |||
136 | The mitigation can be controlled at boot time via a kernel command line option. | ||
137 | See :ref:`taa_mitigation_control_command_line`. | ||
138 | |||
139 | .. _virt_mechanism: | ||
140 | |||
141 | Virtualization mitigation | ||
142 | ^^^^^^^^^^^^^^^^^^^^^^^^^ | ||
143 | |||
144 | Affected systems where the host has TAA microcode and TAA is mitigated by | ||
145 | having disabled TSX previously, are not vulnerable regardless of the status | ||
146 | of the VMs. | ||
147 | |||
148 | In all other cases, if the host either does not have the TAA microcode or | ||
149 | the kernel is not mitigated, the system might be vulnerable. | ||
150 | |||
151 | |||
152 | .. _taa_mitigation_control_command_line: | ||
153 | |||
154 | Mitigation control on the kernel command line | ||
155 | --------------------------------------------- | ||
156 | |||
157 | The kernel command line allows to control the TAA mitigations at boot time with | ||
158 | the option "tsx_async_abort=". The valid arguments for this option are: | ||
159 | |||
160 | ============ ============================================================= | ||
161 | off This option disables the TAA mitigation on affected platforms. | ||
162 | If the system has TSX enabled (see next parameter) and the CPU | ||
163 | is affected, the system is vulnerable. | ||
164 | |||
165 | full TAA mitigation is enabled. If TSX is enabled, on an affected | ||
166 | system it will clear CPU buffers on ring transitions. On | ||
167 | systems which are MDS-affected and deploy MDS mitigation, | ||
168 | TAA is also mitigated. Specifying this option on those | ||
169 | systems will have no effect. | ||
170 | |||
171 | full,nosmt The same as tsx_async_abort=full, with SMT disabled on | ||
172 | vulnerable CPUs that have TSX enabled. This is the complete | ||
173 | mitigation. When TSX is disabled, SMT is not disabled because | ||
174 | CPU is not vulnerable to cross-thread TAA attacks. | ||
175 | ============ ============================================================= | ||
176 | |||
177 | Not specifying this option is equivalent to "tsx_async_abort=full". | ||
178 | |||
179 | The kernel command line also allows to control the TSX feature using the | ||
180 | parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used | ||
181 | to control the TSX feature and the enumeration of the TSX feature bits (RTM | ||
182 | and HLE) in CPUID. | ||
183 | |||
184 | The valid options are: | ||
185 | |||
186 | ============ ============================================================= | ||
187 | off Disables TSX on the system. | ||
188 | |||
189 | Note that this option takes effect only on newer CPUs which are | ||
190 | not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 | ||
191 | and which get the new IA32_TSX_CTRL MSR through a microcode | ||
192 | update. This new MSR allows for the reliable deactivation of | ||
193 | the TSX functionality. | ||
194 | |||
195 | on Enables TSX. | ||
196 | |||
197 | Although there are mitigations for all known security | ||
198 | vulnerabilities, TSX has been known to be an accelerator for | ||
199 | several previous speculation-related CVEs, and so there may be | ||
200 | unknown security risks associated with leaving it enabled. | ||
201 | |||
202 | auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX | ||
203 | on the system. | ||
204 | ============ ============================================================= | ||
205 | |||
206 | Not specifying this option is equivalent to "tsx=off". | ||
207 | |||
208 | The following combinations of the "tsx_async_abort" and "tsx" are possible. For | ||
209 | affected platforms tsx=auto is equivalent to tsx=off and the result will be: | ||
210 | |||
211 | ========= ========================== ========================================= | ||
212 | tsx=on tsx_async_abort=full The system will use VERW to clear CPU | ||
213 | buffers. Cross-thread attacks are still | ||
214 | possible on SMT machines. | ||
215 | tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT | ||
216 | mitigated. | ||
217 | tsx=on tsx_async_abort=off The system is vulnerable. | ||
218 | tsx=off tsx_async_abort=full TSX might be disabled if microcode | ||
219 | provides a TSX control MSR. If so, | ||
220 | system is not vulnerable. | ||
221 | tsx=off tsx_async_abort=full,nosmt Ditto | ||
222 | tsx=off tsx_async_abort=off ditto | ||
223 | ========= ========================== ========================================= | ||
224 | |||
225 | |||
226 | For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU | ||
227 | buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0) | ||
228 | "tsx" command line argument has no effect. | ||
229 | |||
230 | For the affected platforms below table indicates the mitigation status for the | ||
231 | combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO | ||
232 | and TSX_CTRL_MSR. | ||
233 | |||
234 | ======= ========= ============= ======================================== | ||
235 | MDS_NO MD_CLEAR TSX_CTRL_MSR Status | ||
236 | ======= ========= ============= ======================================== | ||
237 | 0 0 0 Vulnerable (needs microcode) | ||
238 | 0 1 0 MDS and TAA mitigated via VERW | ||
239 | 1 1 0 MDS fixed, TAA vulnerable if TSX enabled | ||
240 | because MD_CLEAR has no meaning and | ||
241 | VERW is not guaranteed to clear buffers | ||
242 | 1 X 1 MDS fixed, TAA can be mitigated by | ||
243 | VERW or TSX_CTRL_MSR | ||
244 | ======= ========= ============= ======================================== | ||
245 | |||
246 | Mitigation selection guide | ||
247 | -------------------------- | ||
248 | |||
249 | 1. Trusted userspace and guests | ||
250 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ||
251 | |||
252 | If all user space applications are from a trusted source and do not execute | ||
253 | untrusted code which is supplied externally, then the mitigation can be | ||
254 | disabled. The same applies to virtualized environments with trusted guests. | ||
255 | |||
256 | |||
257 | 2. Untrusted userspace and guests | ||
258 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ||
259 | |||
260 | If there are untrusted applications or guests on the system, enabling TSX | ||
261 | might allow a malicious actor to leak data from the host or from other | ||
262 | processes running on the same physical core. | ||
263 | |||
264 | If the microcode is available and the TSX is disabled on the host, attacks | ||
265 | are prevented in a virtualized environment as well, even if the VMs do not | ||
266 | explicitly enable the mitigation. | ||
267 | |||
268 | |||
269 | .. _taa_default_mitigations: | ||
270 | |||
271 | Default mitigations | ||
272 | ------------------- | ||
273 | |||
274 | The kernel's default action for vulnerable processors is: | ||
275 | |||
276 | - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off). | ||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a84a83f8881e..fa8f03ddff24 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -2636,6 +2636,7 @@ | |||
2636 | ssbd=force-off [ARM64] | 2636 | ssbd=force-off [ARM64] |
2637 | l1tf=off [X86] | 2637 | l1tf=off [X86] |
2638 | mds=off [X86] | 2638 | mds=off [X86] |
2639 | tsx_async_abort=off [X86] | ||
2639 | 2640 | ||
2640 | auto (default) | 2641 | auto (default) |
2641 | Mitigate all CPU vulnerabilities, but leave SMT | 2642 | Mitigate all CPU vulnerabilities, but leave SMT |
@@ -2651,6 +2652,7 @@ | |||
2651 | be fully mitigated, even if it means losing SMT. | 2652 | be fully mitigated, even if it means losing SMT. |
2652 | Equivalent to: l1tf=flush,nosmt [X86] | 2653 | Equivalent to: l1tf=flush,nosmt [X86] |
2653 | mds=full,nosmt [X86] | 2654 | mds=full,nosmt [X86] |
2655 | tsx_async_abort=full,nosmt [X86] | ||
2654 | 2656 | ||
2655 | mminit_loglevel= | 2657 | mminit_loglevel= |
2656 | [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this | 2658 | [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this |
@@ -4848,6 +4850,71 @@ | |||
4848 | interruptions from clocksource watchdog are not | 4850 | interruptions from clocksource watchdog are not |
4849 | acceptable). | 4851 | acceptable). |
4850 | 4852 | ||
4853 | tsx= [X86] Control Transactional Synchronization | ||
4854 | Extensions (TSX) feature in Intel processors that | ||
4855 | support TSX control. | ||
4856 | |||
4857 | This parameter controls the TSX feature. The options are: | ||
4858 | |||
4859 | on - Enable TSX on the system. Although there are | ||
4860 | mitigations for all known security vulnerabilities, | ||
4861 | TSX has been known to be an accelerator for | ||
4862 | several previous speculation-related CVEs, and | ||
4863 | so there may be unknown security risks associated | ||
4864 | with leaving it enabled. | ||
4865 | |||
4866 | off - Disable TSX on the system. (Note that this | ||
4867 | option takes effect only on newer CPUs which are | ||
4868 | not vulnerable to MDS, i.e., have | ||
4869 | MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get | ||
4870 | the new IA32_TSX_CTRL MSR through a microcode | ||
4871 | update. This new MSR allows for the reliable | ||
4872 | deactivation of the TSX functionality.) | ||
4873 | |||
4874 | auto - Disable TSX if X86_BUG_TAA is present, | ||
4875 | otherwise enable TSX on the system. | ||
4876 | |||
4877 | Not specifying this option is equivalent to tsx=off. | ||
4878 | |||
4879 | See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst | ||
4880 | for more details. | ||
4881 | |||
4882 | tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async | ||
4883 | Abort (TAA) vulnerability. | ||
4884 | |||
4885 | Similar to Micro-architectural Data Sampling (MDS) | ||
4886 | certain CPUs that support Transactional | ||
4887 | Synchronization Extensions (TSX) are vulnerable to an | ||
4888 | exploit against CPU internal buffers which can forward | ||
4889 | information to a disclosure gadget under certain | ||
4890 | conditions. | ||
4891 | |||
4892 | In vulnerable processors, the speculatively forwarded | ||
4893 | data can be used in a cache side channel attack, to | ||
4894 | access data to which the attacker does not have direct | ||
4895 | access. | ||
4896 | |||
4897 | This parameter controls the TAA mitigation. The | ||
4898 | options are: | ||
4899 | |||
4900 | full - Enable TAA mitigation on vulnerable CPUs | ||
4901 | if TSX is enabled. | ||
4902 | |||
4903 | full,nosmt - Enable TAA mitigation and disable SMT on | ||
4904 | vulnerable CPUs. If TSX is disabled, SMT | ||
4905 | is not disabled because CPU is not | ||
4906 | vulnerable to cross-thread TAA attacks. | ||
4907 | off - Unconditionally disable TAA mitigation | ||
4908 | |||
4909 | Not specifying this option is equivalent to | ||
4910 | tsx_async_abort=full. On CPUs which are MDS affected | ||
4911 | and deploy MDS mitigation, TAA mitigation is not | ||
4912 | required and doesn't provide any additional | ||
4913 | mitigation. | ||
4914 | |||
4915 | For details see: | ||
4916 | Documentation/admin-guide/hw-vuln/tsx_async_abort.rst | ||
4917 | |||
4851 | turbografx.map[2|3]= [HW,JOY] | 4918 | turbografx.map[2|3]= [HW,JOY] |
4852 | TurboGraFX parallel port interface | 4919 | TurboGraFX parallel port interface |
4853 | Format: | 4920 | Format: |
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst index af64c4bb4447..a8de2fbc1caa 100644 --- a/Documentation/x86/index.rst +++ b/Documentation/x86/index.rst | |||
@@ -27,6 +27,7 @@ x86-specific Documentation | |||
27 | mds | 27 | mds |
28 | microcode | 28 | microcode |
29 | resctrl_ui | 29 | resctrl_ui |
30 | tsx_async_abort | ||
30 | usb-legacy-support | 31 | usb-legacy-support |
31 | i386/index | 32 | i386/index |
32 | x86_64/index | 33 | x86_64/index |
diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst new file mode 100644 index 000000000000..583ddc185ba2 --- /dev/null +++ b/Documentation/x86/tsx_async_abort.rst | |||
@@ -0,0 +1,117 @@ | |||
1 | .. SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | TSX Async Abort (TAA) mitigation | ||
4 | ================================ | ||
5 | |||
6 | .. _tsx_async_abort: | ||
7 | |||
8 | Overview | ||
9 | -------- | ||
10 | |||
11 | TSX Async Abort (TAA) is a side channel attack on internal buffers in some | ||
12 | Intel processors similar to Microachitectural Data Sampling (MDS). In this | ||
13 | case certain loads may speculatively pass invalid data to dependent operations | ||
14 | when an asynchronous abort condition is pending in a Transactional | ||
15 | Synchronization Extensions (TSX) transaction. This includes loads with no | ||
16 | fault or assist condition. Such loads may speculatively expose stale data from | ||
17 | the same uarch data structures as in MDS, with same scope of exposure i.e. | ||
18 | same-thread and cross-thread. This issue affects all current processors that | ||
19 | support TSX. | ||
20 | |||
21 | Mitigation strategy | ||
22 | ------------------- | ||
23 | |||
24 | a) TSX disable - one of the mitigations is to disable TSX. A new MSR | ||
25 | IA32_TSX_CTRL will be available in future and current processors after | ||
26 | microcode update which can be used to disable TSX. In addition, it | ||
27 | controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID. | ||
28 | |||
29 | b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this | ||
30 | vulnerability. More details on this approach can be found in | ||
31 | :ref:`Documentation/admin-guide/hw-vuln/mds.rst <mds>`. | ||
32 | |||
33 | Kernel internal mitigation modes | ||
34 | -------------------------------- | ||
35 | |||
36 | ============= ============================================================ | ||
37 | off Mitigation is disabled. Either the CPU is not affected or | ||
38 | tsx_async_abort=off is supplied on the kernel command line. | ||
39 | |||
40 | tsx disabled Mitigation is enabled. TSX feature is disabled by default at | ||
41 | bootup on processors that support TSX control. | ||
42 | |||
43 | verw Mitigation is enabled. CPU is affected and MD_CLEAR is | ||
44 | advertised in CPUID. | ||
45 | |||
46 | ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not | ||
47 | advertised in CPUID. That is mainly for virtualization | ||
48 | scenarios where the host has the updated microcode but the | ||
49 | hypervisor does not expose MD_CLEAR in CPUID. It's a best | ||
50 | effort approach without guarantee. | ||
51 | ============= ============================================================ | ||
52 | |||
53 | If the CPU is affected and the "tsx_async_abort" kernel command line parameter is | ||
54 | not provided then the kernel selects an appropriate mitigation depending on the | ||
55 | status of RTM and MD_CLEAR CPUID bits. | ||
56 | |||
57 | Below tables indicate the impact of tsx=on|off|auto cmdline options on state of | ||
58 | TAA mitigation, VERW behavior and TSX feature for various combinations of | ||
59 | MSR_IA32_ARCH_CAPABILITIES bits. | ||
60 | |||
61 | 1. "tsx=off" | ||
62 | |||
63 | ========= ========= ============ ============ ============== =================== ====================== | ||
64 | MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off | ||
65 | ---------------------------------- ------------------------------------------------------------------------- | ||
66 | TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation | ||
67 | after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full | ||
68 | ========= ========= ============ ============ ============== =================== ====================== | ||
69 | 0 0 0 HW default Yes Same as MDS Same as MDS | ||
70 | 0 0 1 Invalid case Invalid case Invalid case Invalid case | ||
71 | 0 1 0 HW default No Need ucode update Need ucode update | ||
72 | 0 1 1 Disabled Yes TSX disabled TSX disabled | ||
73 | 1 X 1 Disabled X None needed None needed | ||
74 | ========= ========= ============ ============ ============== =================== ====================== | ||
75 | |||
76 | 2. "tsx=on" | ||
77 | |||
78 | ========= ========= ============ ============ ============== =================== ====================== | ||
79 | MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on | ||
80 | ---------------------------------- ------------------------------------------------------------------------- | ||
81 | TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation | ||
82 | after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full | ||
83 | ========= ========= ============ ============ ============== =================== ====================== | ||
84 | 0 0 0 HW default Yes Same as MDS Same as MDS | ||
85 | 0 0 1 Invalid case Invalid case Invalid case Invalid case | ||
86 | 0 1 0 HW default No Need ucode update Need ucode update | ||
87 | 0 1 1 Enabled Yes None Same as MDS | ||
88 | 1 X 1 Enabled X None needed None needed | ||
89 | ========= ========= ============ ============ ============== =================== ====================== | ||
90 | |||
91 | 3. "tsx=auto" | ||
92 | |||
93 | ========= ========= ============ ============ ============== =================== ====================== | ||
94 | MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto | ||
95 | ---------------------------------- ------------------------------------------------------------------------- | ||
96 | TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation | ||
97 | after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full | ||
98 | ========= ========= ============ ============ ============== =================== ====================== | ||
99 | 0 0 0 HW default Yes Same as MDS Same as MDS | ||
100 | 0 0 1 Invalid case Invalid case Invalid case Invalid case | ||
101 | 0 1 0 HW default No Need ucode update Need ucode update | ||
102 | 0 1 1 Disabled Yes TSX disabled TSX disabled | ||
103 | 1 X 1 Enabled X None needed None needed | ||
104 | ========= ========= ============ ============ ============== =================== ====================== | ||
105 | |||
106 | In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that | ||
107 | indicates whether MSR_IA32_TSX_CTRL is supported. | ||
108 | |||
109 | There are two control bits in IA32_TSX_CTRL MSR: | ||
110 | |||
111 | Bit 0: When set it disables the Restricted Transactional Memory (RTM) | ||
112 | sub-feature of TSX (will force all transactions to abort on the | ||
113 | XBEGIN instruction). | ||
114 | |||
115 | Bit 1: When set it disables the enumeration of the RTM and HLE feature | ||
116 | (i.e. it will make CPUID(EAX=7).EBX{bit4} and | ||
117 | CPUID(EAX=7).EBX{bit11} read as 0). | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d6e1faa28c58..8ef85139553f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1940,6 +1940,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS | |||
1940 | 1940 | ||
1941 | If unsure, say y. | 1941 | If unsure, say y. |
1942 | 1942 | ||
1943 | choice | ||
1944 | prompt "TSX enable mode" | ||
1945 | depends on CPU_SUP_INTEL | ||
1946 | default X86_INTEL_TSX_MODE_OFF | ||
1947 | help | ||
1948 | Intel's TSX (Transactional Synchronization Extensions) feature | ||
1949 | allows to optimize locking protocols through lock elision which | ||
1950 | can lead to a noticeable performance boost. | ||
1951 | |||
1952 | On the other hand it has been shown that TSX can be exploited | ||
1953 | to form side channel attacks (e.g. TAA) and chances are there | ||
1954 | will be more of those attacks discovered in the future. | ||
1955 | |||
1956 | Therefore TSX is not enabled by default (aka tsx=off). An admin | ||
1957 | might override this decision by tsx=on the command line parameter. | ||
1958 | Even with TSX enabled, the kernel will attempt to enable the best | ||
1959 | possible TAA mitigation setting depending on the microcode available | ||
1960 | for the particular machine. | ||
1961 | |||
1962 | This option allows to set the default tsx mode between tsx=on, =off | ||
1963 | and =auto. See Documentation/admin-guide/kernel-parameters.txt for more | ||
1964 | details. | ||
1965 | |||
1966 | Say off if not sure, auto if TSX is in use but it should be used on safe | ||
1967 | platforms or on if TSX is in use and the security aspect of tsx is not | ||
1968 | relevant. | ||
1969 | |||
1970 | config X86_INTEL_TSX_MODE_OFF | ||
1971 | bool "off" | ||
1972 | help | ||
1973 | TSX is disabled if possible - equals to tsx=off command line parameter. | ||
1974 | |||
1975 | config X86_INTEL_TSX_MODE_ON | ||
1976 | bool "on" | ||
1977 | help | ||
1978 | TSX is always enabled on TSX capable HW - equals the tsx=on command | ||
1979 | line parameter. | ||
1980 | |||
1981 | config X86_INTEL_TSX_MODE_AUTO | ||
1982 | bool "auto" | ||
1983 | help | ||
1984 | TSX is enabled on TSX capable HW that is believed to be safe against | ||
1985 | side channel attacks- equals the tsx=auto command line parameter. | ||
1986 | endchoice | ||
1987 | |||
1943 | config EFI | 1988 | config EFI |
1944 | bool "EFI runtime service support" | 1989 | bool "EFI runtime service support" |
1945 | depends on ACPI | 1990 | depends on ACPI |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0652d3eed9bd..989e03544f18 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -399,5 +399,6 @@ | |||
399 | #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ | 399 | #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ |
400 | #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ | 400 | #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ |
401 | #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ | 401 | #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ |
402 | #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ | ||
402 | 403 | ||
403 | #endif /* _ASM_X86_CPUFEATURES_H */ | 404 | #endif /* _ASM_X86_CPUFEATURES_H */ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 20ce682a2540..b3a8bb2af0b6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -93,6 +93,11 @@ | |||
93 | * Microarchitectural Data | 93 | * Microarchitectural Data |
94 | * Sampling (MDS) vulnerabilities. | 94 | * Sampling (MDS) vulnerabilities. |
95 | */ | 95 | */ |
96 | #define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */ | ||
97 | #define ARCH_CAP_TAA_NO BIT(8) /* | ||
98 | * Not susceptible to | ||
99 | * TSX Async Abort (TAA) vulnerabilities. | ||
100 | */ | ||
96 | 101 | ||
97 | #define MSR_IA32_FLUSH_CMD 0x0000010b | 102 | #define MSR_IA32_FLUSH_CMD 0x0000010b |
98 | #define L1D_FLUSH BIT(0) /* | 103 | #define L1D_FLUSH BIT(0) /* |
@@ -103,6 +108,10 @@ | |||
103 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 108 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
104 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e | 109 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
105 | 110 | ||
111 | #define MSR_IA32_TSX_CTRL 0x00000122 | ||
112 | #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ | ||
113 | #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ | ||
114 | |||
106 | #define MSR_IA32_SYSENTER_CS 0x00000174 | 115 | #define MSR_IA32_SYSENTER_CS 0x00000174 |
107 | #define MSR_IA32_SYSENTER_ESP 0x00000175 | 116 | #define MSR_IA32_SYSENTER_ESP 0x00000175 |
108 | #define MSR_IA32_SYSENTER_EIP 0x00000176 | 117 | #define MSR_IA32_SYSENTER_EIP 0x00000176 |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 80bc209c0708..5c24a7b35166 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); | |||
314 | #include <asm/segment.h> | 314 | #include <asm/segment.h> |
315 | 315 | ||
316 | /** | 316 | /** |
317 | * mds_clear_cpu_buffers - Mitigation for MDS vulnerability | 317 | * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability |
318 | * | 318 | * |
319 | * This uses the otherwise unused and obsolete VERW instruction in | 319 | * This uses the otherwise unused and obsolete VERW instruction in |
320 | * combination with microcode which triggers a CPU buffer flush when the | 320 | * combination with microcode which triggers a CPU buffer flush when the |
@@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void) | |||
337 | } | 337 | } |
338 | 338 | ||
339 | /** | 339 | /** |
340 | * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability | 340 | * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability |
341 | * | 341 | * |
342 | * Clear CPU buffers if the corresponding static key is enabled | 342 | * Clear CPU buffers if the corresponding static key is enabled |
343 | */ | 343 | */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6e0a3b43d027..54f5d54280f6 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -988,4 +988,11 @@ enum mds_mitigations { | |||
988 | MDS_MITIGATION_VMWERV, | 988 | MDS_MITIGATION_VMWERV, |
989 | }; | 989 | }; |
990 | 990 | ||
991 | enum taa_mitigations { | ||
992 | TAA_MITIGATION_OFF, | ||
993 | TAA_MITIGATION_UCODE_NEEDED, | ||
994 | TAA_MITIGATION_VERW, | ||
995 | TAA_MITIGATION_TSX_DISABLED, | ||
996 | }; | ||
997 | |||
991 | #endif /* _ASM_X86_PROCESSOR_H */ | 998 | #endif /* _ASM_X86_PROCESSOR_H */ |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index d7a1e5a9331c..890f60083eca 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS) += proc.o | |||
30 | obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o | 30 | obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o |
31 | 31 | ||
32 | ifdef CONFIG_CPU_SUP_INTEL | 32 | ifdef CONFIG_CPU_SUP_INTEL |
33 | obj-y += intel.o intel_pconfig.o | 33 | obj-y += intel.o intel_pconfig.o tsx.o |
34 | obj-$(CONFIG_PM) += intel_epb.o | 34 | obj-$(CONFIG_PM) += intel_epb.o |
35 | endif | 35 | endif |
36 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | 36 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 91c2561b905f..43c647e19439 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void); | |||
39 | static void __init ssb_select_mitigation(void); | 39 | static void __init ssb_select_mitigation(void); |
40 | static void __init l1tf_select_mitigation(void); | 40 | static void __init l1tf_select_mitigation(void); |
41 | static void __init mds_select_mitigation(void); | 41 | static void __init mds_select_mitigation(void); |
42 | static void __init taa_select_mitigation(void); | ||
42 | 43 | ||
43 | /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ | 44 | /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ |
44 | u64 x86_spec_ctrl_base; | 45 | u64 x86_spec_ctrl_base; |
@@ -105,6 +106,7 @@ void __init check_bugs(void) | |||
105 | ssb_select_mitigation(); | 106 | ssb_select_mitigation(); |
106 | l1tf_select_mitigation(); | 107 | l1tf_select_mitigation(); |
107 | mds_select_mitigation(); | 108 | mds_select_mitigation(); |
109 | taa_select_mitigation(); | ||
108 | 110 | ||
109 | arch_smt_update(); | 111 | arch_smt_update(); |
110 | 112 | ||
@@ -269,6 +271,100 @@ static int __init mds_cmdline(char *str) | |||
269 | early_param("mds", mds_cmdline); | 271 | early_param("mds", mds_cmdline); |
270 | 272 | ||
271 | #undef pr_fmt | 273 | #undef pr_fmt |
274 | #define pr_fmt(fmt) "TAA: " fmt | ||
275 | |||
276 | /* Default mitigation for TAA-affected CPUs */ | ||
277 | static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; | ||
278 | static bool taa_nosmt __ro_after_init; | ||
279 | |||
280 | static const char * const taa_strings[] = { | ||
281 | [TAA_MITIGATION_OFF] = "Vulnerable", | ||
282 | [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", | ||
283 | [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", | ||
284 | [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", | ||
285 | }; | ||
286 | |||
287 | static void __init taa_select_mitigation(void) | ||
288 | { | ||
289 | u64 ia32_cap; | ||
290 | |||
291 | if (!boot_cpu_has_bug(X86_BUG_TAA)) { | ||
292 | taa_mitigation = TAA_MITIGATION_OFF; | ||
293 | return; | ||
294 | } | ||
295 | |||
296 | /* TSX previously disabled by tsx=off */ | ||
297 | if (!boot_cpu_has(X86_FEATURE_RTM)) { | ||
298 | taa_mitigation = TAA_MITIGATION_TSX_DISABLED; | ||
299 | goto out; | ||
300 | } | ||
301 | |||
302 | if (cpu_mitigations_off()) { | ||
303 | taa_mitigation = TAA_MITIGATION_OFF; | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ | ||
308 | if (taa_mitigation == TAA_MITIGATION_OFF) | ||
309 | goto out; | ||
310 | |||
311 | if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) | ||
312 | taa_mitigation = TAA_MITIGATION_VERW; | ||
313 | else | ||
314 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; | ||
315 | |||
316 | /* | ||
317 | * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. | ||
318 | * A microcode update fixes this behavior to clear CPU buffers. It also | ||
319 | * adds support for MSR_IA32_TSX_CTRL which is enumerated by the | ||
320 | * ARCH_CAP_TSX_CTRL_MSR bit. | ||
321 | * | ||
322 | * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode | ||
323 | * update is required. | ||
324 | */ | ||
325 | ia32_cap = x86_read_arch_cap_msr(); | ||
326 | if ( (ia32_cap & ARCH_CAP_MDS_NO) && | ||
327 | !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) | ||
328 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; | ||
329 | |||
330 | /* | ||
331 | * TSX is enabled, select alternate mitigation for TAA which is | ||
332 | * the same as MDS. Enable MDS static branch to clear CPU buffers. | ||
333 | * | ||
334 | * For guests that can't determine whether the correct microcode is | ||
335 | * present on host, enable the mitigation for UCODE_NEEDED as well. | ||
336 | */ | ||
337 | static_branch_enable(&mds_user_clear); | ||
338 | |||
339 | if (taa_nosmt || cpu_mitigations_auto_nosmt()) | ||
340 | cpu_smt_disable(false); | ||
341 | |||
342 | out: | ||
343 | pr_info("%s\n", taa_strings[taa_mitigation]); | ||
344 | } | ||
345 | |||
346 | static int __init tsx_async_abort_parse_cmdline(char *str) | ||
347 | { | ||
348 | if (!boot_cpu_has_bug(X86_BUG_TAA)) | ||
349 | return 0; | ||
350 | |||
351 | if (!str) | ||
352 | return -EINVAL; | ||
353 | |||
354 | if (!strcmp(str, "off")) { | ||
355 | taa_mitigation = TAA_MITIGATION_OFF; | ||
356 | } else if (!strcmp(str, "full")) { | ||
357 | taa_mitigation = TAA_MITIGATION_VERW; | ||
358 | } else if (!strcmp(str, "full,nosmt")) { | ||
359 | taa_mitigation = TAA_MITIGATION_VERW; | ||
360 | taa_nosmt = true; | ||
361 | } | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); | ||
366 | |||
367 | #undef pr_fmt | ||
272 | #define pr_fmt(fmt) "Spectre V1 : " fmt | 368 | #define pr_fmt(fmt) "Spectre V1 : " fmt |
273 | 369 | ||
274 | enum spectre_v1_mitigation { | 370 | enum spectre_v1_mitigation { |
@@ -786,6 +882,7 @@ static void update_mds_branch_idle(void) | |||
786 | } | 882 | } |
787 | 883 | ||
788 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" | 884 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" |
885 | #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" | ||
789 | 886 | ||
790 | void cpu_bugs_smt_update(void) | 887 | void cpu_bugs_smt_update(void) |
791 | { | 888 | { |
@@ -819,6 +916,17 @@ void cpu_bugs_smt_update(void) | |||
819 | break; | 916 | break; |
820 | } | 917 | } |
821 | 918 | ||
919 | switch (taa_mitigation) { | ||
920 | case TAA_MITIGATION_VERW: | ||
921 | case TAA_MITIGATION_UCODE_NEEDED: | ||
922 | if (sched_smt_active()) | ||
923 | pr_warn_once(TAA_MSG_SMT); | ||
924 | break; | ||
925 | case TAA_MITIGATION_TSX_DISABLED: | ||
926 | case TAA_MITIGATION_OFF: | ||
927 | break; | ||
928 | } | ||
929 | |||
822 | mutex_unlock(&spec_ctrl_mutex); | 930 | mutex_unlock(&spec_ctrl_mutex); |
823 | } | 931 | } |
824 | 932 | ||
@@ -1328,6 +1436,21 @@ static ssize_t mds_show_state(char *buf) | |||
1328 | sched_smt_active() ? "vulnerable" : "disabled"); | 1436 | sched_smt_active() ? "vulnerable" : "disabled"); |
1329 | } | 1437 | } |
1330 | 1438 | ||
1439 | static ssize_t tsx_async_abort_show_state(char *buf) | ||
1440 | { | ||
1441 | if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || | ||
1442 | (taa_mitigation == TAA_MITIGATION_OFF)) | ||
1443 | return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); | ||
1444 | |||
1445 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { | ||
1446 | return sprintf(buf, "%s; SMT Host state unknown\n", | ||
1447 | taa_strings[taa_mitigation]); | ||
1448 | } | ||
1449 | |||
1450 | return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], | ||
1451 | sched_smt_active() ? "vulnerable" : "disabled"); | ||
1452 | } | ||
1453 | |||
1331 | static char *stibp_state(void) | 1454 | static char *stibp_state(void) |
1332 | { | 1455 | { |
1333 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) | 1456 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
@@ -1398,6 +1521,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr | |||
1398 | case X86_BUG_MDS: | 1521 | case X86_BUG_MDS: |
1399 | return mds_show_state(buf); | 1522 | return mds_show_state(buf); |
1400 | 1523 | ||
1524 | case X86_BUG_TAA: | ||
1525 | return tsx_async_abort_show_state(buf); | ||
1526 | |||
1401 | default: | 1527 | default: |
1402 | break; | 1528 | break; |
1403 | } | 1529 | } |
@@ -1434,4 +1560,9 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu | |||
1434 | { | 1560 | { |
1435 | return cpu_show_common(dev, attr, buf, X86_BUG_MDS); | 1561 | return cpu_show_common(dev, attr, buf, X86_BUG_MDS); |
1436 | } | 1562 | } |
1563 | |||
1564 | ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) | ||
1565 | { | ||
1566 | return cpu_show_common(dev, attr, buf, X86_BUG_TAA); | ||
1567 | } | ||
1437 | #endif | 1568 | #endif |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9ae7d1bcd4f4..f8b8afc8f5b5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1092,19 +1092,26 @@ static bool __init cpu_matches(unsigned long which) | |||
1092 | return m && !!(m->driver_data & which); | 1092 | return m && !!(m->driver_data & which); |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | 1095 | u64 x86_read_arch_cap_msr(void) |
1096 | { | 1096 | { |
1097 | u64 ia32_cap = 0; | 1097 | u64 ia32_cap = 0; |
1098 | 1098 | ||
1099 | if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) | ||
1100 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); | ||
1101 | |||
1102 | return ia32_cap; | ||
1103 | } | ||
1104 | |||
1105 | static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | ||
1106 | { | ||
1107 | u64 ia32_cap = x86_read_arch_cap_msr(); | ||
1108 | |||
1099 | if (cpu_matches(NO_SPECULATION)) | 1109 | if (cpu_matches(NO_SPECULATION)) |
1100 | return; | 1110 | return; |
1101 | 1111 | ||
1102 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | 1112 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
1103 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | 1113 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
1104 | 1114 | ||
1105 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) | ||
1106 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); | ||
1107 | |||
1108 | if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && | 1115 | if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && |
1109 | !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) | 1116 | !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) |
1110 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); | 1117 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
@@ -1121,6 +1128,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | |||
1121 | if (!cpu_matches(NO_SWAPGS)) | 1128 | if (!cpu_matches(NO_SWAPGS)) |
1122 | setup_force_cpu_bug(X86_BUG_SWAPGS); | 1129 | setup_force_cpu_bug(X86_BUG_SWAPGS); |
1123 | 1130 | ||
1131 | /* | ||
1132 | * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: | ||
1133 | * - TSX is supported or | ||
1134 | * - TSX_CTRL is present | ||
1135 | * | ||
1136 | * TSX_CTRL check is needed for cases when TSX could be disabled before | ||
1137 | * the kernel boot e.g. kexec. | ||
1138 | * TSX_CTRL check alone is not sufficient for cases when the microcode | ||
1139 | * update is not present or running as guest that don't get TSX_CTRL. | ||
1140 | */ | ||
1141 | if (!(ia32_cap & ARCH_CAP_TAA_NO) && | ||
1142 | (cpu_has(c, X86_FEATURE_RTM) || | ||
1143 | (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) | ||
1144 | setup_force_cpu_bug(X86_BUG_TAA); | ||
1145 | |||
1124 | if (cpu_matches(NO_MELTDOWN)) | 1146 | if (cpu_matches(NO_MELTDOWN)) |
1125 | return; | 1147 | return; |
1126 | 1148 | ||
@@ -1554,6 +1576,8 @@ void __init identify_boot_cpu(void) | |||
1554 | #endif | 1576 | #endif |
1555 | cpu_detect_tlb(&boot_cpu_data); | 1577 | cpu_detect_tlb(&boot_cpu_data); |
1556 | setup_cr_pinning(); | 1578 | setup_cr_pinning(); |
1579 | |||
1580 | tsx_init(); | ||
1557 | } | 1581 | } |
1558 | 1582 | ||
1559 | void identify_secondary_cpu(struct cpuinfo_x86 *c) | 1583 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index c0e2407abdd6..38ab6e115eac 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -44,6 +44,22 @@ struct _tlb_table { | |||
44 | extern const struct cpu_dev *const __x86_cpu_dev_start[], | 44 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
45 | *const __x86_cpu_dev_end[]; | 45 | *const __x86_cpu_dev_end[]; |
46 | 46 | ||
47 | #ifdef CONFIG_CPU_SUP_INTEL | ||
48 | enum tsx_ctrl_states { | ||
49 | TSX_CTRL_ENABLE, | ||
50 | TSX_CTRL_DISABLE, | ||
51 | TSX_CTRL_NOT_SUPPORTED, | ||
52 | }; | ||
53 | |||
54 | extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state; | ||
55 | |||
56 | extern void __init tsx_init(void); | ||
57 | extern void tsx_enable(void); | ||
58 | extern void tsx_disable(void); | ||
59 | #else | ||
60 | static inline void tsx_init(void) { } | ||
61 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
62 | |||
47 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | 63 | extern void get_cpu_cap(struct cpuinfo_x86 *c); |
48 | extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); | 64 | extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); |
49 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 65 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
@@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu); | |||
62 | 78 | ||
63 | extern void x86_spec_ctrl_setup_ap(void); | 79 | extern void x86_spec_ctrl_setup_ap(void); |
64 | 80 | ||
81 | extern u64 x86_read_arch_cap_msr(void); | ||
82 | |||
65 | #endif /* ARCH_X86_CPU_H */ | 83 | #endif /* ARCH_X86_CPU_H */ |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c2fdc00df163..11d5c5950e2d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
762 | detect_tme(c); | 762 | detect_tme(c); |
763 | 763 | ||
764 | init_intel_misc_features(c); | 764 | init_intel_misc_features(c); |
765 | |||
766 | if (tsx_ctrl_state == TSX_CTRL_ENABLE) | ||
767 | tsx_enable(); | ||
768 | if (tsx_ctrl_state == TSX_CTRL_DISABLE) | ||
769 | tsx_disable(); | ||
765 | } | 770 | } |
766 | 771 | ||
767 | #ifdef CONFIG_X86_32 | 772 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c new file mode 100644 index 000000000000..3e20d322bc98 --- /dev/null +++ b/arch/x86/kernel/cpu/tsx.c | |||
@@ -0,0 +1,140 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Intel Transactional Synchronization Extensions (TSX) control. | ||
4 | * | ||
5 | * Copyright (C) 2019 Intel Corporation | ||
6 | * | ||
7 | * Author: | ||
8 | * Pawan Gupta <pawan.kumar.gupta@linux.intel.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/cpufeature.h> | ||
12 | |||
13 | #include <asm/cmdline.h> | ||
14 | |||
15 | #include "cpu.h" | ||
16 | |||
17 | enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; | ||
18 | |||
19 | void tsx_disable(void) | ||
20 | { | ||
21 | u64 tsx; | ||
22 | |||
23 | rdmsrl(MSR_IA32_TSX_CTRL, tsx); | ||
24 | |||
25 | /* Force all transactions to immediately abort */ | ||
26 | tsx |= TSX_CTRL_RTM_DISABLE; | ||
27 | |||
28 | /* | ||
29 | * Ensure TSX support is not enumerated in CPUID. | ||
30 | * This is visible to userspace and will ensure they | ||
31 | * do not waste resources trying TSX transactions that | ||
32 | * will always abort. | ||
33 | */ | ||
34 | tsx |= TSX_CTRL_CPUID_CLEAR; | ||
35 | |||
36 | wrmsrl(MSR_IA32_TSX_CTRL, tsx); | ||
37 | } | ||
38 | |||
39 | void tsx_enable(void) | ||
40 | { | ||
41 | u64 tsx; | ||
42 | |||
43 | rdmsrl(MSR_IA32_TSX_CTRL, tsx); | ||
44 | |||
45 | /* Enable the RTM feature in the cpu */ | ||
46 | tsx &= ~TSX_CTRL_RTM_DISABLE; | ||
47 | |||
48 | /* | ||
49 | * Ensure TSX support is enumerated in CPUID. | ||
50 | * This is visible to userspace and will ensure they | ||
51 | * can enumerate and use the TSX feature. | ||
52 | */ | ||
53 | tsx &= ~TSX_CTRL_CPUID_CLEAR; | ||
54 | |||
55 | wrmsrl(MSR_IA32_TSX_CTRL, tsx); | ||
56 | } | ||
57 | |||
58 | static bool __init tsx_ctrl_is_supported(void) | ||
59 | { | ||
60 | u64 ia32_cap = x86_read_arch_cap_msr(); | ||
61 | |||
62 | /* | ||
63 | * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this | ||
64 | * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. | ||
65 | * | ||
66 | * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a | ||
67 | * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES | ||
68 | * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get | ||
69 | * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, | ||
70 | * tsx= cmdline requests will do nothing on CPUs without | ||
71 | * MSR_IA32_TSX_CTRL support. | ||
72 | */ | ||
73 | return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); | ||
74 | } | ||
75 | |||
76 | static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) | ||
77 | { | ||
78 | if (boot_cpu_has_bug(X86_BUG_TAA)) | ||
79 | return TSX_CTRL_DISABLE; | ||
80 | |||
81 | return TSX_CTRL_ENABLE; | ||
82 | } | ||
83 | |||
84 | void __init tsx_init(void) | ||
85 | { | ||
86 | char arg[5] = {}; | ||
87 | int ret; | ||
88 | |||
89 | if (!tsx_ctrl_is_supported()) | ||
90 | return; | ||
91 | |||
92 | ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); | ||
93 | if (ret >= 0) { | ||
94 | if (!strcmp(arg, "on")) { | ||
95 | tsx_ctrl_state = TSX_CTRL_ENABLE; | ||
96 | } else if (!strcmp(arg, "off")) { | ||
97 | tsx_ctrl_state = TSX_CTRL_DISABLE; | ||
98 | } else if (!strcmp(arg, "auto")) { | ||
99 | tsx_ctrl_state = x86_get_tsx_auto_mode(); | ||
100 | } else { | ||
101 | tsx_ctrl_state = TSX_CTRL_DISABLE; | ||
102 | pr_err("tsx: invalid option, defaulting to off\n"); | ||
103 | } | ||
104 | } else { | ||
105 | /* tsx= not provided */ | ||
106 | if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO)) | ||
107 | tsx_ctrl_state = x86_get_tsx_auto_mode(); | ||
108 | else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF)) | ||
109 | tsx_ctrl_state = TSX_CTRL_DISABLE; | ||
110 | else | ||
111 | tsx_ctrl_state = TSX_CTRL_ENABLE; | ||
112 | } | ||
113 | |||
114 | if (tsx_ctrl_state == TSX_CTRL_DISABLE) { | ||
115 | tsx_disable(); | ||
116 | |||
117 | /* | ||
118 | * tsx_disable() will change the state of the | ||
119 | * RTM CPUID bit. Clear it here since it is now | ||
120 | * expected to be not set. | ||
121 | */ | ||
122 | setup_clear_cpu_cap(X86_FEATURE_RTM); | ||
123 | } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { | ||
124 | |||
125 | /* | ||
126 | * HW defaults TSX to be enabled at bootup. | ||
127 | * We may still need the TSX enable support | ||
128 | * during init for special cases like | ||
129 | * kexec after TSX is disabled. | ||
130 | */ | ||
131 | tsx_enable(); | ||
132 | |||
133 | /* | ||
134 | * tsx_enable() will change the state of the | ||
135 | * RTM CPUID bit. Force it here since it is now | ||
136 | * expected to be set. | ||
137 | */ | ||
138 | setup_force_cpu_cap(X86_FEATURE_RTM); | ||
139 | } | ||
140 | } | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff395f812719..32d70ca2a7fd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1298,6 +1298,25 @@ static u64 kvm_get_arch_capabilities(void) | |||
1298 | if (!boot_cpu_has_bug(X86_BUG_MDS)) | 1298 | if (!boot_cpu_has_bug(X86_BUG_MDS)) |
1299 | data |= ARCH_CAP_MDS_NO; | 1299 | data |= ARCH_CAP_MDS_NO; |
1300 | 1300 | ||
1301 | /* | ||
1302 | * On TAA affected systems, export MDS_NO=0 when: | ||
1303 | * - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1. | ||
1304 | * - Updated microcode is present. This is detected by | ||
1305 | * the presence of ARCH_CAP_TSX_CTRL_MSR and ensures | ||
1306 | * that VERW clears CPU buffers. | ||
1307 | * | ||
1308 | * When MDS_NO=0 is exported, guests deploy clear CPU buffer | ||
1309 | * mitigation and don't complain: | ||
1310 | * | ||
1311 | * "Vulnerable: Clear CPU buffers attempted, no microcode" | ||
1312 | * | ||
1313 | * If TSX is disabled on the system, guests are also mitigated against | ||
1314 | * TAA and clear CPU buffer mitigation is not required for guests. | ||
1315 | */ | ||
1316 | if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) && | ||
1317 | (data & ARCH_CAP_TSX_CTRL_MSR)) | ||
1318 | data &= ~ARCH_CAP_MDS_NO; | ||
1319 | |||
1301 | return data; | 1320 | return data; |
1302 | } | 1321 | } |
1303 | 1322 | ||
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index cc37511de866..0fccd8c0312e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -554,12 +554,20 @@ ssize_t __weak cpu_show_mds(struct device *dev, | |||
554 | return sprintf(buf, "Not affected\n"); | 554 | return sprintf(buf, "Not affected\n"); |
555 | } | 555 | } |
556 | 556 | ||
557 | ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, | ||
558 | struct device_attribute *attr, | ||
559 | char *buf) | ||
560 | { | ||
561 | return sprintf(buf, "Not affected\n"); | ||
562 | } | ||
563 | |||
557 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); | 564 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); |
558 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); | 565 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); |
559 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); | 566 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); |
560 | static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); | 567 | static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); |
561 | static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); | 568 | static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); |
562 | static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); | 569 | static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); |
570 | static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); | ||
563 | 571 | ||
564 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { | 572 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { |
565 | &dev_attr_meltdown.attr, | 573 | &dev_attr_meltdown.attr, |
@@ -568,6 +576,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { | |||
568 | &dev_attr_spec_store_bypass.attr, | 576 | &dev_attr_spec_store_bypass.attr, |
569 | &dev_attr_l1tf.attr, | 577 | &dev_attr_l1tf.attr, |
570 | &dev_attr_mds.attr, | 578 | &dev_attr_mds.attr, |
579 | &dev_attr_tsx_async_abort.attr, | ||
571 | NULL | 580 | NULL |
572 | }; | 581 | }; |
573 | 582 | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d0633ebdaa9c..f35369f79771 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -59,6 +59,9 @@ extern ssize_t cpu_show_l1tf(struct device *dev, | |||
59 | struct device_attribute *attr, char *buf); | 59 | struct device_attribute *attr, char *buf); |
60 | extern ssize_t cpu_show_mds(struct device *dev, | 60 | extern ssize_t cpu_show_mds(struct device *dev, |
61 | struct device_attribute *attr, char *buf); | 61 | struct device_attribute *attr, char *buf); |
62 | extern ssize_t cpu_show_tsx_async_abort(struct device *dev, | ||
63 | struct device_attribute *attr, | ||
64 | char *buf); | ||
62 | 65 | ||
63 | extern __printf(4, 5) | 66 | extern __printf(4, 5) |
64 | struct device *cpu_device_create(struct device *parent, void *drvdata, | 67 | struct device *cpu_device_create(struct device *parent, void *drvdata, |