diff options
-rw-r--r-- | arch/x86/include/asm/i387.h | 75 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 |
4 files changed, 58 insertions, 23 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 1e12c2d087e4..548b2c07ac9a 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -280,6 +280,47 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
280 | } | 280 | } |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Software FPU state helpers. Careful: these need to | ||
284 | * be preemption protection *and* they need to be | ||
285 | * properly paired with the CR0.TS changes! | ||
286 | */ | ||
287 | static inline int __thread_has_fpu(struct thread_info *ti) | ||
288 | { | ||
289 | return ti->status & TS_USEDFPU; | ||
290 | } | ||
291 | |||
292 | /* Must be paired with an 'stts' after! */ | ||
293 | static inline void __thread_clear_has_fpu(struct thread_info *ti) | ||
294 | { | ||
295 | ti->status &= ~TS_USEDFPU; | ||
296 | } | ||
297 | |||
298 | /* Must be paired with a 'clts' before! */ | ||
299 | static inline void __thread_set_has_fpu(struct thread_info *ti) | ||
300 | { | ||
301 | ti->status |= TS_USEDFPU; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Encapsulate the CR0.TS handling together with the | ||
306 | * software flag. | ||
307 | * | ||
308 | * These generally need preemption protection to work, | ||
309 | * do try to avoid using these on their own. | ||
310 | */ | ||
311 | static inline void __thread_fpu_end(struct thread_info *ti) | ||
312 | { | ||
313 | __thread_clear_has_fpu(ti); | ||
314 | stts(); | ||
315 | } | ||
316 | |||
317 | static inline void __thread_fpu_begin(struct thread_info *ti) | ||
318 | { | ||
319 | clts(); | ||
320 | __thread_set_has_fpu(ti); | ||
321 | } | ||
322 | |||
323 | /* | ||
283 | * Signal frame handlers... | 324 | * Signal frame handlers... |
284 | */ | 325 | */ |
285 | extern int save_i387_xstate(void __user *buf); | 326 | extern int save_i387_xstate(void __user *buf); |
@@ -287,23 +328,21 @@ extern int restore_i387_xstate(void __user *buf); | |||
287 | 328 | ||
288 | static inline void __unlazy_fpu(struct task_struct *tsk) | 329 | static inline void __unlazy_fpu(struct task_struct *tsk) |
289 | { | 330 | { |
290 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 331 | if (__thread_has_fpu(task_thread_info(tsk))) { |
291 | __save_init_fpu(tsk); | 332 | __save_init_fpu(tsk); |
292 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 333 | __thread_fpu_end(task_thread_info(tsk)); |
293 | stts(); | ||
294 | } else | 334 | } else |
295 | tsk->fpu_counter = 0; | 335 | tsk->fpu_counter = 0; |
296 | } | 336 | } |
297 | 337 | ||
298 | static inline void __clear_fpu(struct task_struct *tsk) | 338 | static inline void __clear_fpu(struct task_struct *tsk) |
299 | { | 339 | { |
300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 340 | if (__thread_has_fpu(task_thread_info(tsk))) { |
301 | /* Ignore delayed exceptions from user space */ | 341 | /* Ignore delayed exceptions from user space */ |
302 | asm volatile("1: fwait\n" | 342 | asm volatile("1: fwait\n" |
303 | "2:\n" | 343 | "2:\n" |
304 | _ASM_EXTABLE(1b, 2b)); | 344 | _ASM_EXTABLE(1b, 2b)); |
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 345 | __thread_fpu_end(task_thread_info(tsk)); |
306 | stts(); | ||
307 | } | 346 | } |
308 | } | 347 | } |
309 | 348 | ||
@@ -311,14 +350,14 @@ static inline void __clear_fpu(struct task_struct *tsk) | |||
311 | * Were we in an interrupt that interrupted kernel mode? | 350 | * Were we in an interrupt that interrupted kernel mode? |
312 | * | 351 | * |
313 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | 352 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that |
314 | * pair does nothing at all: TS_USEDFPU must be clear (so | 353 | * pair does nothing at all: the thread must not have fpu (so |
315 | * that we don't try to save the FPU state), and TS must | 354 | * that we don't try to save the FPU state), and TS must |
316 | * be set (so that the clts/stts pair does nothing that is | 355 | * be set (so that the clts/stts pair does nothing that is |
317 | * visible in the interrupted kernel thread). | 356 | * visible in the interrupted kernel thread). |
318 | */ | 357 | */ |
319 | static inline bool interrupted_kernel_fpu_idle(void) | 358 | static inline bool interrupted_kernel_fpu_idle(void) |
320 | { | 359 | { |
321 | return !(current_thread_info()->status & TS_USEDFPU) && | 360 | return !__thread_has_fpu(current_thread_info()) && |
322 | (read_cr0() & X86_CR0_TS); | 361 | (read_cr0() & X86_CR0_TS); |
323 | } | 362 | } |
324 | 363 | ||
@@ -356,9 +395,9 @@ static inline void kernel_fpu_begin(void) | |||
356 | 395 | ||
357 | WARN_ON_ONCE(!irq_fpu_usable()); | 396 | WARN_ON_ONCE(!irq_fpu_usable()); |
358 | preempt_disable(); | 397 | preempt_disable(); |
359 | if (me->status & TS_USEDFPU) { | 398 | if (__thread_has_fpu(me)) { |
360 | __save_init_fpu(me->task); | 399 | __save_init_fpu(me->task); |
361 | me->status &= ~TS_USEDFPU; | 400 | __thread_clear_has_fpu(me); |
362 | /* We do 'stts()' in kernel_fpu_end() */ | 401 | /* We do 'stts()' in kernel_fpu_end() */ |
363 | } else | 402 | } else |
364 | clts(); | 403 | clts(); |
@@ -422,24 +461,21 @@ static inline void irq_ts_restore(int TS_state) | |||
422 | */ | 461 | */ |
423 | static inline int user_has_fpu(void) | 462 | static inline int user_has_fpu(void) |
424 | { | 463 | { |
425 | return current_thread_info()->status & TS_USEDFPU; | 464 | return __thread_has_fpu(current_thread_info()); |
426 | } | 465 | } |
427 | 466 | ||
428 | static inline void user_fpu_end(void) | 467 | static inline void user_fpu_end(void) |
429 | { | 468 | { |
430 | preempt_disable(); | 469 | preempt_disable(); |
431 | current_thread_info()->status &= ~TS_USEDFPU; | 470 | __thread_fpu_end(current_thread_info()); |
432 | stts(); | ||
433 | preempt_enable(); | 471 | preempt_enable(); |
434 | } | 472 | } |
435 | 473 | ||
436 | static inline void user_fpu_begin(void) | 474 | static inline void user_fpu_begin(void) |
437 | { | 475 | { |
438 | preempt_disable(); | 476 | preempt_disable(); |
439 | if (!user_has_fpu()) { | 477 | if (!user_has_fpu()) |
440 | clts(); | 478 | __thread_fpu_begin(current_thread_info()); |
441 | current_thread_info()->status |= TS_USEDFPU; | ||
442 | } | ||
443 | preempt_enable(); | 479 | preempt_enable(); |
444 | } | 480 | } |
445 | 481 | ||
@@ -448,11 +484,10 @@ static inline void user_fpu_begin(void) | |||
448 | */ | 484 | */ |
449 | static inline void save_init_fpu(struct task_struct *tsk) | 485 | static inline void save_init_fpu(struct task_struct *tsk) |
450 | { | 486 | { |
451 | WARN_ON_ONCE(!(task_thread_info(tsk)->status & TS_USEDFPU)); | 487 | WARN_ON_ONCE(!__thread_has_fpu(task_thread_info(tsk))); |
452 | preempt_disable(); | 488 | preempt_disable(); |
453 | __save_init_fpu(tsk); | 489 | __save_init_fpu(tsk); |
454 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 490 | __thread_fpu_end(task_thread_info(tsk)); |
455 | stts(); | ||
456 | preempt_enable(); | 491 | preempt_enable(); |
457 | } | 492 | } |
458 | 493 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 982433b5da30..fc676e44c77f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -588,7 +588,7 @@ void __math_state_restore(void) | |||
588 | return; | 588 | return; |
589 | } | 589 | } |
590 | 590 | ||
591 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | 591 | __thread_set_has_fpu(thread); /* clts in caller! */ |
592 | tsk->fpu_counter++; | 592 | tsk->fpu_counter++; |
593 | } | 593 | } |
594 | 594 | ||
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 86f1f09a738a..a0bcd0dbc951 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
47 | if (!fx) | 47 | if (!fx) |
48 | return; | 48 | return; |
49 | 49 | ||
50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | 50 | BUG_ON(__thread_has_fpu(task_thread_info(tsk))); |
51 | 51 | ||
52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | 52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; |
53 | 53 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..36091dd04b4b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
1459 | #endif | 1459 | #endif |
1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current_thread_info())) |
1461 | clts(); | 1461 | clts(); |
1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
1463 | } | 1463 | } |