aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/i387.h44
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/kernel/traps.c11
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/vmx.c2
6 files changed, 30 insertions, 32 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 01b115d86770..f5376676f89c 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -264,21 +264,21 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
264 * be preemption protection *and* they need to be 264 * be preemption protection *and* they need to be
265 * properly paired with the CR0.TS changes! 265 * properly paired with the CR0.TS changes!
266 */ 266 */
267static inline int __thread_has_fpu(struct thread_info *ti) 267static inline int __thread_has_fpu(struct task_struct *tsk)
268{ 268{
269 return ti->status & TS_USEDFPU; 269 return tsk->thread.has_fpu;
270} 270}
271 271
272/* Must be paired with an 'stts' after! */ 272/* Must be paired with an 'stts' after! */
273static inline void __thread_clear_has_fpu(struct thread_info *ti) 273static inline void __thread_clear_has_fpu(struct task_struct *tsk)
274{ 274{
275 ti->status &= ~TS_USEDFPU; 275 tsk->thread.has_fpu = 0;
276} 276}
277 277
278/* Must be paired with a 'clts' before! */ 278/* Must be paired with a 'clts' before! */
279static inline void __thread_set_has_fpu(struct thread_info *ti) 279static inline void __thread_set_has_fpu(struct task_struct *tsk)
280{ 280{
281 ti->status |= TS_USEDFPU; 281 tsk->thread.has_fpu = 1;
282} 282}
283 283
284/* 284/*
@@ -288,16 +288,16 @@ static inline void __thread_set_has_fpu(struct thread_info *ti)
288 * These generally need preemption protection to work, 288 * These generally need preemption protection to work,
289 * do try to avoid using these on their own. 289 * do try to avoid using these on their own.
290 */ 290 */
291static inline void __thread_fpu_end(struct thread_info *ti) 291static inline void __thread_fpu_end(struct task_struct *tsk)
292{ 292{
293 __thread_clear_has_fpu(ti); 293 __thread_clear_has_fpu(tsk);
294 stts(); 294 stts();
295} 295}
296 296
297static inline void __thread_fpu_begin(struct thread_info *ti) 297static inline void __thread_fpu_begin(struct task_struct *tsk)
298{ 298{
299 clts(); 299 clts();
300 __thread_set_has_fpu(ti); 300 __thread_set_has_fpu(tsk);
301} 301}
302 302
303/* 303/*
@@ -308,21 +308,21 @@ extern int restore_i387_xstate(void __user *buf);
308 308
309static inline void __unlazy_fpu(struct task_struct *tsk) 309static inline void __unlazy_fpu(struct task_struct *tsk)
310{ 310{
311 if (__thread_has_fpu(task_thread_info(tsk))) { 311 if (__thread_has_fpu(tsk)) {
312 __save_init_fpu(tsk); 312 __save_init_fpu(tsk);
313 __thread_fpu_end(task_thread_info(tsk)); 313 __thread_fpu_end(tsk);
314 } else 314 } else
315 tsk->fpu_counter = 0; 315 tsk->fpu_counter = 0;
316} 316}
317 317
318static inline void __clear_fpu(struct task_struct *tsk) 318static inline void __clear_fpu(struct task_struct *tsk)
319{ 319{
320 if (__thread_has_fpu(task_thread_info(tsk))) { 320 if (__thread_has_fpu(tsk)) {
321 /* Ignore delayed exceptions from user space */ 321 /* Ignore delayed exceptions from user space */
322 asm volatile("1: fwait\n" 322 asm volatile("1: fwait\n"
323 "2:\n" 323 "2:\n"
324 _ASM_EXTABLE(1b, 2b)); 324 _ASM_EXTABLE(1b, 2b));
325 __thread_fpu_end(task_thread_info(tsk)); 325 __thread_fpu_end(tsk);
326 } 326 }
327} 327}
328 328
@@ -337,7 +337,7 @@ static inline void __clear_fpu(struct task_struct *tsk)
337 */ 337 */
338static inline bool interrupted_kernel_fpu_idle(void) 338static inline bool interrupted_kernel_fpu_idle(void)
339{ 339{
340 return !__thread_has_fpu(current_thread_info()) && 340 return !__thread_has_fpu(current) &&
341 (read_cr0() & X86_CR0_TS); 341 (read_cr0() & X86_CR0_TS);
342} 342}
343 343
@@ -371,12 +371,12 @@ static inline bool irq_fpu_usable(void)
371 371
372static inline void kernel_fpu_begin(void) 372static inline void kernel_fpu_begin(void)
373{ 373{
374 struct thread_info *me = current_thread_info(); 374 struct task_struct *me = current;
375 375
376 WARN_ON_ONCE(!irq_fpu_usable()); 376 WARN_ON_ONCE(!irq_fpu_usable());
377 preempt_disable(); 377 preempt_disable();
378 if (__thread_has_fpu(me)) { 378 if (__thread_has_fpu(me)) {
379 __save_init_fpu(me->task); 379 __save_init_fpu(me);
380 __thread_clear_has_fpu(me); 380 __thread_clear_has_fpu(me);
381 /* We do 'stts()' in kernel_fpu_end() */ 381 /* We do 'stts()' in kernel_fpu_end() */
382 } else 382 } else
@@ -441,13 +441,13 @@ static inline void irq_ts_restore(int TS_state)
441 */ 441 */
442static inline int user_has_fpu(void) 442static inline int user_has_fpu(void)
443{ 443{
444 return __thread_has_fpu(current_thread_info()); 444 return __thread_has_fpu(current);
445} 445}
446 446
447static inline void user_fpu_end(void) 447static inline void user_fpu_end(void)
448{ 448{
449 preempt_disable(); 449 preempt_disable();
450 __thread_fpu_end(current_thread_info()); 450 __thread_fpu_end(current);
451 preempt_enable(); 451 preempt_enable();
452} 452}
453 453
@@ -455,7 +455,7 @@ static inline void user_fpu_begin(void)
455{ 455{
456 preempt_disable(); 456 preempt_disable();
457 if (!user_has_fpu()) 457 if (!user_has_fpu())
458 __thread_fpu_begin(current_thread_info()); 458 __thread_fpu_begin(current);
459 preempt_enable(); 459 preempt_enable();
460} 460}
461 461
@@ -464,10 +464,10 @@ static inline void user_fpu_begin(void)
464 */ 464 */
465static inline void save_init_fpu(struct task_struct *tsk) 465static inline void save_init_fpu(struct task_struct *tsk)
466{ 466{
467 WARN_ON_ONCE(!__thread_has_fpu(task_thread_info(tsk))); 467 WARN_ON_ONCE(!__thread_has_fpu(tsk));
468 preempt_disable(); 468 preempt_disable();
469 __save_init_fpu(tsk); 469 __save_init_fpu(tsk);
470 __thread_fpu_end(task_thread_info(tsk)); 470 __thread_fpu_end(tsk);
471 preempt_enable(); 471 preempt_enable();
472} 472}
473 473
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index aa9088c26931..f7c89e231c6c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,6 +454,7 @@ struct thread_struct {
454 unsigned long trap_no; 454 unsigned long trap_no;
455 unsigned long error_code; 455 unsigned long error_code;
456 /* floating point and extended processor state */ 456 /* floating point and extended processor state */
457 unsigned long has_fpu;
457 struct fpu fpu; 458 struct fpu fpu;
458#ifdef CONFIG_X86_32 459#ifdef CONFIG_X86_32
459 /* Virtual 86 mode info */ 460 /* Virtual 86 mode info */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index bc817cd8b443..cfd8144d5527 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -247,8 +247,6 @@ static inline struct thread_info *current_thread_info(void)
247 * ever touches our thread-synchronous status, so we don't 247 * ever touches our thread-synchronous status, so we don't
248 * have to worry about atomic accesses. 248 * have to worry about atomic accesses.
249 */ 249 */
250#define TS_USEDFPU 0x0001 /* FPU was used by this task
251 this quantum (SMP) */
252#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 250#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
253#define TS_POLLING 0x0004 /* idle task polling need_resched, 251#define TS_POLLING 0x0004 /* idle task polling need_resched,
254 skip sending interrupt */ 252 skip sending interrupt */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4d42300dcd2c..ad25e51f40c4 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -582,12 +582,11 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
582 */ 582 */
583void math_state_restore(void) 583void math_state_restore(void)
584{ 584{
585 struct thread_info *thread = current_thread_info(); 585 struct task_struct *tsk = current;
586 struct task_struct *tsk = thread->task;
587 586
588 /* We need a safe address that is cheap to find and that is already 587 /* We need a safe address that is cheap to find and that is already
589 in L1. We just brought in "thread->task", so use that */ 588 in L1. We're just bringing in "tsk->thread.has_fpu", so use that */
590#define safe_address (thread->task) 589#define safe_address (tsk->thread.has_fpu)
591 590
592 if (!tsk_used_math(tsk)) { 591 if (!tsk_used_math(tsk)) {
593 local_irq_enable(); 592 local_irq_enable();
@@ -604,7 +603,7 @@ void math_state_restore(void)
604 local_irq_disable(); 603 local_irq_disable();
605 } 604 }
606 605
607 __thread_fpu_begin(thread); 606 __thread_fpu_begin(tsk);
608 607
609 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 608 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
610 is pending. Clear the x87 state here by setting it to fixed 609 is pending. Clear the x87 state here by setting it to fixed
@@ -620,7 +619,7 @@ void math_state_restore(void)
620 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 619 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
621 */ 620 */
622 if (unlikely(restore_fpu_checking(tsk))) { 621 if (unlikely(restore_fpu_checking(tsk))) {
623 __thread_fpu_end(thread); 622 __thread_fpu_end(tsk);
624 force_sig(SIGSEGV, tsk); 623 force_sig(SIGSEGV, tsk);
625 return; 624 return;
626 } 625 }
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index a0bcd0dbc951..711091114119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
47 if (!fx) 47 if (!fx)
48 return; 48 return;
49 49
50 BUG_ON(__thread_has_fpu(task_thread_info(tsk))); 50 BUG_ON(__thread_has_fpu(tsk));
51 51
52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; 52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
53 53
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 36091dd04b4b..3b4c8d8ad906 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1457#ifdef CONFIG_X86_64 1457#ifdef CONFIG_X86_64
1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1458 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1459#endif 1459#endif
1460 if (__thread_has_fpu(current_thread_info())) 1460 if (__thread_has_fpu(current))
1461 clts(); 1461 clts();
1462 load_gdt(&__get_cpu_var(host_gdt)); 1462 load_gdt(&__get_cpu_var(host_gdt));
1463} 1463}