diff options
author | Borislav Petkov <bp@suse.de> | 2015-11-19 06:25:26 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-11-24 03:52:52 -0500 |
commit | b7106fa0f29f9fd83d2d1905ab690d334ef855c1 (patch) | |
tree | c1b01b00f03ffe25a08842f10665145be499f66c /arch/x86/include | |
parent | b74a0cf1b3db30173eefa00c411775d2b1697700 (diff) |
x86/fpu: Get rid of xstate_fault()
Add macros for the alternative XSAVE*/XRSTOR* operations which
contain the fault handling and use them. Kill xstate_fault().
Also, copy_xregs_to_kernel() didn't have the extended state as
memory reference in the asm.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1447932326-4371-3-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 105 |
1 files changed, 52 insertions, 53 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 709a3df8ab37..eadcdd5bb946 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -224,19 +224,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
224 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | 224 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" |
225 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | 225 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" |
226 | 226 | ||
227 | /* xstate instruction fault handler: */ | ||
228 | #define xstate_fault(__err) \ | ||
229 | \ | ||
230 | ".section .fixup,\"ax\"\n" \ | ||
231 | \ | ||
232 | "3: movl $-2,%[_err]\n" \ | ||
233 | " jmp 2b\n" \ | ||
234 | \ | ||
235 | ".previous\n" \ | ||
236 | \ | ||
237 | _ASM_EXTABLE(1b, 3b) \ | ||
238 | : [_err] "=r" (__err) | ||
239 | |||
240 | #define XSTATE_OP(op, st, lmask, hmask, err) \ | 227 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
241 | asm volatile("1:" op "\n\t" \ | 228 | asm volatile("1:" op "\n\t" \ |
242 | "xor %[err], %[err]\n" \ | 229 | "xor %[err], %[err]\n" \ |
@@ -250,6 +237,54 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
250 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | 237 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
251 | : "memory") | 238 | : "memory") |
252 | 239 | ||
240 | /* | ||
241 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact | ||
242 | * format and supervisor states in addition to modified optimization in | ||
243 | * XSAVEOPT. | ||
244 | * | ||
245 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT | ||
246 | * supports modified optimization which is not supported by XSAVE. | ||
247 | * | ||
248 | * We use XSAVE as a fallback. | ||
249 | * | ||
250 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the | ||
251 | * original instruction which gets replaced. We need to use it here as the | ||
252 | * address of the instruction where we might get an exception at. | ||
253 | */ | ||
254 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ | ||
255 | asm volatile(ALTERNATIVE_2(XSAVE, \ | ||
256 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ | ||
257 | XSAVES, X86_FEATURE_XSAVES) \ | ||
258 | "\n" \ | ||
259 | "xor %[err], %[err]\n" \ | ||
260 | "3:\n" \ | ||
261 | ".pushsection .fixup,\"ax\"\n" \ | ||
262 | "4: movl $-2, %[err]\n" \ | ||
263 | "jmp 3b\n" \ | ||
264 | ".popsection\n" \ | ||
265 | _ASM_EXTABLE(661b, 4b) \ | ||
266 | : [err] "=r" (err) \ | ||
267 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | ||
268 | : "memory") | ||
269 | |||
270 | /* | ||
271 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | ||
272 | * XSAVE area format. | ||
273 | */ | ||
274 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | ||
275 | asm volatile(ALTERNATIVE(XRSTOR, \ | ||
276 | XRSTORS, X86_FEATURE_XSAVES) \ | ||
277 | "\n" \ | ||
278 | "xor %[err], %[err]\n" \ | ||
279 | "3:\n" \ | ||
280 | ".pushsection .fixup,\"ax\"\n" \ | ||
281 | "4: movl $-2, %[err]\n" \ | ||
282 | "jmp 3b\n" \ | ||
283 | ".popsection\n" \ | ||
284 | _ASM_EXTABLE(661b, 4b) \ | ||
285 | : [err] "=r" (err) \ | ||
286 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | ||
287 | : "memory") | ||
253 | 288 | ||
254 | /* | 289 | /* |
255 | * This function is called only during boot time when x86 caps are not set | 290 | * This function is called only during boot time when x86 caps are not set |
@@ -303,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) | |||
303 | u64 mask = -1; | 338 | u64 mask = -1; |
304 | u32 lmask = mask; | 339 | u32 lmask = mask; |
305 | u32 hmask = mask >> 32; | 340 | u32 hmask = mask >> 32; |
306 | int err = 0; | 341 | int err; |
307 | 342 | ||
308 | WARN_ON(!alternatives_patched); | 343 | WARN_ON(!alternatives_patched); |
309 | 344 | ||
310 | /* | 345 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
311 | * If xsaves is enabled, xsaves replaces xsaveopt because | ||
312 | * it supports compact format and supervisor states in addition to | ||
313 | * modified optimization in xsaveopt. | ||
314 | * | ||
315 | * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave | ||
316 | * because xsaveopt supports modified optimization which is not | ||
317 | * supported by xsave. | ||
318 | * | ||
319 | * If none of xsaves and xsaveopt is enabled, use xsave. | ||
320 | */ | ||
321 | alternative_input_2( | ||
322 | "1:"XSAVE, | ||
323 | XSAVEOPT, | ||
324 | X86_FEATURE_XSAVEOPT, | ||
325 | XSAVES, | ||
326 | X86_FEATURE_XSAVES, | ||
327 | [xstate] "D" (xstate), "a" (lmask), "d" (hmask) : | ||
328 | "memory"); | ||
329 | asm volatile("2:\n\t" | ||
330 | xstate_fault(err) | ||
331 | : "0" (err) | ||
332 | : "memory"); | ||
333 | 346 | ||
334 | /* We should never fault when copying to a kernel buffer: */ | 347 | /* We should never fault when copying to a kernel buffer: */ |
335 | WARN_ON_FPU(err); | 348 | WARN_ON_FPU(err); |
@@ -342,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) | |||
342 | { | 355 | { |
343 | u32 lmask = mask; | 356 | u32 lmask = mask; |
344 | u32 hmask = mask >> 32; | 357 | u32 hmask = mask >> 32; |
345 | int err = 0; | 358 | int err; |
346 | 359 | ||
347 | /* | 360 | XSTATE_XRESTORE(xstate, lmask, hmask, err); |
348 | * Use xrstors to restore context if it is enabled. xrstors supports | ||
349 | * compacted format of xsave area which is not supported by xrstor. | ||
350 | */ | ||
351 | alternative_input( | ||
352 | "1: " XRSTOR, | ||
353 | XRSTORS, | ||
354 | X86_FEATURE_XSAVES, | ||
355 | "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask) | ||
356 | : "memory"); | ||
357 | |||
358 | asm volatile("2:\n" | ||
359 | xstate_fault(err) | ||
360 | : "0" (err) | ||
361 | : "memory"); | ||
362 | 361 | ||
363 | /* We should never fault when copying from a kernel buffer: */ | 362 | /* We should never fault when copying from a kernel buffer: */ |
364 | WARN_ON_FPU(err); | 363 | WARN_ON_FPU(err); |