diff options
author | Borislav Petkov <bp@suse.de> | 2015-11-19 06:25:25 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-11-24 03:52:52 -0500 |
commit | b74a0cf1b3db30173eefa00c411775d2b1697700 (patch) | |
tree | 014b61ff7293e0ebb883d23d37e2d5947e354cff | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
x86/fpu: Add an XSTATE_OP() macro
Add an XSTATE_OP() macro which contains the XSAVE* fault handling
and replace all non-alternatives users of xstate_fault() with
it.
This fixes also the buglet in copy_xregs_to_user() and
copy_user_to_xregs() where the inline asm didn't have @xstate as
memory reference and thus potentially causing unwanted
reordering of accesses to the extended state.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1447932326-4371-2-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 68 |
1 files changed, 31 insertions, 37 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 3c3550c3a4a3..709a3df8ab37 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -237,6 +237,20 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
237 | _ASM_EXTABLE(1b, 3b) \ | 237 | _ASM_EXTABLE(1b, 3b) \ |
238 | : [_err] "=r" (__err) | 238 | : [_err] "=r" (__err) |
239 | 239 | ||
240 | #define XSTATE_OP(op, st, lmask, hmask, err) \ | ||
241 | asm volatile("1:" op "\n\t" \ | ||
242 | "xor %[err], %[err]\n" \ | ||
243 | "2:\n\t" \ | ||
244 | ".pushsection .fixup,\"ax\"\n\t" \ | ||
245 | "3: movl $-2,%[err]\n\t" \ | ||
246 | "jmp 2b\n\t" \ | ||
247 | ".popsection\n\t" \ | ||
248 | _ASM_EXTABLE(1b, 3b) \ | ||
249 | : [err] "=r" (err) \ | ||
250 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | ||
251 | : "memory") | ||
252 | |||
253 | |||
240 | /* | 254 | /* |
241 | * This function is called only during boot time when x86 caps are not set | 255 | * This function is called only during boot time when x86 caps are not set |
242 | * up and alternative can not be used yet. | 256 | * up and alternative can not be used yet. |
@@ -246,22 +260,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) | |||
246 | u64 mask = -1; | 260 | u64 mask = -1; |
247 | u32 lmask = mask; | 261 | u32 lmask = mask; |
248 | u32 hmask = mask >> 32; | 262 | u32 hmask = mask >> 32; |
249 | int err = 0; | 263 | int err; |
250 | 264 | ||
251 | WARN_ON(system_state != SYSTEM_BOOTING); | 265 | WARN_ON(system_state != SYSTEM_BOOTING); |
252 | 266 | ||
253 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 267 | if (static_cpu_has_safe(X86_FEATURE_XSAVES)) |
254 | asm volatile("1:"XSAVES"\n\t" | 268 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
255 | "2:\n\t" | ||
256 | xstate_fault(err) | ||
257 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
258 | : "memory"); | ||
259 | else | 269 | else |
260 | asm volatile("1:"XSAVE"\n\t" | 270 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
261 | "2:\n\t" | ||
262 | xstate_fault(err) | ||
263 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
264 | : "memory"); | ||
265 | 271 | ||
266 | /* We should never fault when copying to a kernel buffer: */ | 272 | /* We should never fault when copying to a kernel buffer: */ |
267 | WARN_ON_FPU(err); | 273 | WARN_ON_FPU(err); |
@@ -276,22 +282,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) | |||
276 | u64 mask = -1; | 282 | u64 mask = -1; |
277 | u32 lmask = mask; | 283 | u32 lmask = mask; |
278 | u32 hmask = mask >> 32; | 284 | u32 hmask = mask >> 32; |
279 | int err = 0; | 285 | int err; |
280 | 286 | ||
281 | WARN_ON(system_state != SYSTEM_BOOTING); | 287 | WARN_ON(system_state != SYSTEM_BOOTING); |
282 | 288 | ||
283 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 289 | if (static_cpu_has_safe(X86_FEATURE_XSAVES)) |
284 | asm volatile("1:"XRSTORS"\n\t" | 290 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
285 | "2:\n\t" | ||
286 | xstate_fault(err) | ||
287 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
288 | : "memory"); | ||
289 | else | 291 | else |
290 | asm volatile("1:"XRSTOR"\n\t" | 292 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
291 | "2:\n\t" | ||
292 | xstate_fault(err) | ||
293 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
294 | : "memory"); | ||
295 | 293 | ||
296 | /* We should never fault when copying from a kernel buffer: */ | 294 | /* We should never fault when copying from a kernel buffer: */ |
297 | WARN_ON_FPU(err); | 295 | WARN_ON_FPU(err); |
@@ -388,12 +386,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |||
388 | if (unlikely(err)) | 386 | if (unlikely(err)) |
389 | return -EFAULT; | 387 | return -EFAULT; |
390 | 388 | ||
391 | __asm__ __volatile__(ASM_STAC "\n" | 389 | stac(); |
392 | "1:"XSAVE"\n" | 390 | XSTATE_OP(XSAVE, buf, -1, -1, err); |
393 | "2: " ASM_CLAC "\n" | 391 | clac(); |
394 | xstate_fault(err) | 392 | |
395 | : "D" (buf), "a" (-1), "d" (-1), "0" (err) | ||
396 | : "memory"); | ||
397 | return err; | 393 | return err; |
398 | } | 394 | } |
399 | 395 | ||
@@ -405,14 +401,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |||
405 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); | 401 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
406 | u32 lmask = mask; | 402 | u32 lmask = mask; |
407 | u32 hmask = mask >> 32; | 403 | u32 hmask = mask >> 32; |
408 | int err = 0; | 404 | int err; |
405 | |||
406 | stac(); | ||
407 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); | ||
408 | clac(); | ||
409 | 409 | ||
410 | __asm__ __volatile__(ASM_STAC "\n" | ||
411 | "1:"XRSTOR"\n" | ||
412 | "2: " ASM_CLAC "\n" | ||
413 | xstate_fault(err) | ||
414 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err) | ||
415 | : "memory"); /* memory required? */ | ||
416 | return err; | 410 | return err; |
417 | } | 411 | } |
418 | 412 | ||