aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/fpu-internal.h130
-rw-r--r--arch/x86/kernel/i387.c54
-rw-r--r--arch/x86/kernel/process.c19
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/xsave.c39
6 files changed, 117 insertions, 131 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 72ba21a8b5fc..da5e96756570 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -67,6 +67,34 @@ extern void finit_soft_fpu(struct i387_soft_struct *soft);
67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
68#endif 68#endif
69 69
70/*
71 * Must be run with preemption disabled: this clears the fpu_owner_task,
72 * on this CPU.
73 *
74 * This will disable any lazy FPU state restore of the current FPU state,
75 * but if the current thread owns the FPU, it will still be saved by.
76 */
77static inline void __cpu_disable_lazy_restore(unsigned int cpu)
78{
79 per_cpu(fpu_owner_task, cpu) = NULL;
80}
81
82/*
83 * Used to indicate that the FPU state in memory is newer than the FPU
84 * state in registers, and the FPU state should be reloaded next time the
85 * task is run. Only safe on the current task, or non-running tasks.
86 */
87static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
88{
89 tsk->thread.fpu.last_cpu = ~0;
90}
91
92static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
93{
94 return new == this_cpu_read_stable(fpu_owner_task) &&
95 cpu == new->thread.fpu.last_cpu;
96}
97
70static inline int is_ia32_compat_frame(void) 98static inline int is_ia32_compat_frame(void)
71{ 99{
72 return config_enabled(CONFIG_IA32_EMULATION) && 100 return config_enabled(CONFIG_IA32_EMULATION) &&
@@ -107,7 +135,6 @@ static __always_inline __pure bool use_fxsr(void)
107 135
108static inline void fx_finit(struct i387_fxsave_struct *fx) 136static inline void fx_finit(struct i387_fxsave_struct *fx)
109{ 137{
110 memset(fx, 0, xstate_size);
111 fx->cwd = 0x37f; 138 fx->cwd = 0x37f;
112 fx->mxcsr = MXCSR_DEFAULT; 139 fx->mxcsr = MXCSR_DEFAULT;
113} 140}
@@ -351,8 +378,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
351 __thread_set_has_fpu(tsk); 378 __thread_set_has_fpu(tsk);
352} 379}
353 380
354static inline void __drop_fpu(struct task_struct *tsk) 381static inline void drop_fpu(struct task_struct *tsk)
355{ 382{
383 /*
384 * Forget coprocessor state..
385 */
386 preempt_disable();
387 tsk->thread.fpu_counter = 0;
388
356 if (__thread_has_fpu(tsk)) { 389 if (__thread_has_fpu(tsk)) {
357 /* Ignore delayed exceptions from user space */ 390 /* Ignore delayed exceptions from user space */
358 asm volatile("1: fwait\n" 391 asm volatile("1: fwait\n"
@@ -360,30 +393,29 @@ static inline void __drop_fpu(struct task_struct *tsk)
360 _ASM_EXTABLE(1b, 2b)); 393 _ASM_EXTABLE(1b, 2b));
361 __thread_fpu_end(tsk); 394 __thread_fpu_end(tsk);
362 } 395 }
363}
364 396
365static inline void drop_fpu(struct task_struct *tsk)
366{
367 /*
368 * Forget coprocessor state..
369 */
370 preempt_disable();
371 tsk->thread.fpu_counter = 0;
372 __drop_fpu(tsk);
373 clear_stopped_child_used_math(tsk); 397 clear_stopped_child_used_math(tsk);
374 preempt_enable(); 398 preempt_enable();
375} 399}
376 400
377static inline void drop_init_fpu(struct task_struct *tsk) 401static inline void restore_init_xstate(void)
402{
403 if (use_xsave())
404 xrstor_state(init_xstate_buf, -1);
405 else
406 fxrstor_checking(&init_xstate_buf->i387);
407}
408
409/*
410 * Reset the FPU state in the eager case and drop it in the lazy case (later use
411 * will reinit it).
412 */
413static inline void fpu_reset_state(struct task_struct *tsk)
378{ 414{
379 if (!use_eager_fpu()) 415 if (!use_eager_fpu())
380 drop_fpu(tsk); 416 drop_fpu(tsk);
381 else { 417 else
382 if (use_xsave()) 418 restore_init_xstate();
383 xrstor_state(init_xstate_buf, -1);
384 else
385 fxrstor_checking(&init_xstate_buf->i387);
386 }
387} 419}
388 420
389/* 421/*
@@ -400,24 +432,6 @@ static inline void drop_init_fpu(struct task_struct *tsk)
400 */ 432 */
401typedef struct { int preload; } fpu_switch_t; 433typedef struct { int preload; } fpu_switch_t;
402 434
403/*
404 * Must be run with preemption disabled: this clears the fpu_owner_task,
405 * on this CPU.
406 *
407 * This will disable any lazy FPU state restore of the current FPU state,
408 * but if the current thread owns the FPU, it will still be saved by.
409 */
410static inline void __cpu_disable_lazy_restore(unsigned int cpu)
411{
412 per_cpu(fpu_owner_task, cpu) = NULL;
413}
414
415static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
416{
417 return new == this_cpu_read_stable(fpu_owner_task) &&
418 cpu == new->thread.fpu.last_cpu;
419}
420
421static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) 435static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
422{ 436{
423 fpu_switch_t fpu; 437 fpu_switch_t fpu;
@@ -426,13 +440,17 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
426 * If the task has used the math, pre-load the FPU on xsave processors 440 * If the task has used the math, pre-load the FPU on xsave processors
427 * or if the past 5 consecutive context-switches used math. 441 * or if the past 5 consecutive context-switches used math.
428 */ 442 */
429 fpu.preload = tsk_used_math(new) && (use_eager_fpu() || 443 fpu.preload = tsk_used_math(new) &&
430 new->thread.fpu_counter > 5); 444 (use_eager_fpu() || new->thread.fpu_counter > 5);
445
431 if (__thread_has_fpu(old)) { 446 if (__thread_has_fpu(old)) {
432 if (!__save_init_fpu(old)) 447 if (!__save_init_fpu(old))
433 cpu = ~0; 448 task_disable_lazy_fpu_restore(old);
434 old->thread.fpu.last_cpu = cpu; 449 else
435 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ 450 old->thread.fpu.last_cpu = cpu;
451
452 /* But leave fpu_owner_task! */
453 old->thread.fpu.has_fpu = 0;
436 454
437 /* Don't change CR0.TS if we just switch! */ 455 /* Don't change CR0.TS if we just switch! */
438 if (fpu.preload) { 456 if (fpu.preload) {
@@ -443,10 +461,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
443 stts(); 461 stts();
444 } else { 462 } else {
445 old->thread.fpu_counter = 0; 463 old->thread.fpu_counter = 0;
446 old->thread.fpu.last_cpu = ~0; 464 task_disable_lazy_fpu_restore(old);
447 if (fpu.preload) { 465 if (fpu.preload) {
448 new->thread.fpu_counter++; 466 new->thread.fpu_counter++;
449 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) 467 if (fpu_lazy_restore(new, cpu))
450 fpu.preload = 0; 468 fpu.preload = 0;
451 else 469 else
452 prefetch(new->thread.fpu.state); 470 prefetch(new->thread.fpu.state);
@@ -466,7 +484,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
466{ 484{
467 if (fpu.preload) { 485 if (fpu.preload) {
468 if (unlikely(restore_fpu_checking(new))) 486 if (unlikely(restore_fpu_checking(new)))
469 drop_init_fpu(new); 487 fpu_reset_state(new);
470 } 488 }
471} 489}
472 490
@@ -495,10 +513,12 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
495} 513}
496 514
497/* 515/*
498 * Need to be preemption-safe. 516 * Needs to be preemption-safe.
499 * 517 *
500 * NOTE! user_fpu_begin() must be used only immediately before restoring 518 * NOTE! user_fpu_begin() must be used only immediately before restoring
501 * it. This function does not do any save/restore on their own. 519 * the save state. It does not do any saving/restoring on its own. In
520 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
521 * the task can lose the FPU right after preempt_enable().
502 */ 522 */
503static inline void user_fpu_begin(void) 523static inline void user_fpu_begin(void)
504{ 524{
@@ -520,24 +540,6 @@ static inline void __save_fpu(struct task_struct *tsk)
520} 540}
521 541
522/* 542/*
523 * These disable preemption on their own and are safe
524 */
525static inline void save_init_fpu(struct task_struct *tsk)
526{
527 WARN_ON_ONCE(!__thread_has_fpu(tsk));
528
529 if (use_eager_fpu()) {
530 __save_fpu(tsk);
531 return;
532 }
533
534 preempt_disable();
535 __save_init_fpu(tsk);
536 __thread_fpu_end(tsk);
537 preempt_enable();
538}
539
540/*
541 * i387 state interaction 543 * i387 state interaction
542 */ 544 */
543static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 545static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 29c740deafec..367f39d35e9c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -42,8 +42,8 @@ void kernel_fpu_enable(void)
42 * be set (so that the clts/stts pair does nothing that is 42 * be set (so that the clts/stts pair does nothing that is
43 * visible in the interrupted kernel thread). 43 * visible in the interrupted kernel thread).
44 * 44 *
45 * Except for the eagerfpu case when we return 1 unless we've already 45 * Except for the eagerfpu case when we return true; in the likely case
46 * been eager and saved the state in kernel_fpu_begin(). 46 * the thread has FPU but we are not going to set/clear TS.
47 */ 47 */
48static inline bool interrupted_kernel_fpu_idle(void) 48static inline bool interrupted_kernel_fpu_idle(void)
49{ 49{
@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
51 return false; 51 return false;
52 52
53 if (use_eager_fpu()) 53 if (use_eager_fpu())
54 return __thread_has_fpu(current); 54 return true;
55 55
56 return !__thread_has_fpu(current) && 56 return !__thread_has_fpu(current) &&
57 (read_cr0() & X86_CR0_TS); 57 (read_cr0() & X86_CR0_TS);
@@ -94,9 +94,10 @@ void __kernel_fpu_begin(void)
94 94
95 if (__thread_has_fpu(me)) { 95 if (__thread_has_fpu(me)) {
96 __save_init_fpu(me); 96 __save_init_fpu(me);
97 } else if (!use_eager_fpu()) { 97 } else {
98 this_cpu_write(fpu_owner_task, NULL); 98 this_cpu_write(fpu_owner_task, NULL);
99 clts(); 99 if (!use_eager_fpu())
100 clts();
100 } 101 }
101} 102}
102EXPORT_SYMBOL(__kernel_fpu_begin); 103EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -107,7 +108,7 @@ void __kernel_fpu_end(void)
107 108
108 if (__thread_has_fpu(me)) { 109 if (__thread_has_fpu(me)) {
109 if (WARN_ON(restore_fpu_checking(me))) 110 if (WARN_ON(restore_fpu_checking(me)))
110 drop_init_fpu(me); 111 fpu_reset_state(me);
111 } else if (!use_eager_fpu()) { 112 } else if (!use_eager_fpu()) {
112 stts(); 113 stts();
113 } 114 }
@@ -120,10 +121,13 @@ void unlazy_fpu(struct task_struct *tsk)
120{ 121{
121 preempt_disable(); 122 preempt_disable();
122 if (__thread_has_fpu(tsk)) { 123 if (__thread_has_fpu(tsk)) {
123 __save_init_fpu(tsk); 124 if (use_eager_fpu()) {
124 __thread_fpu_end(tsk); 125 __save_fpu(tsk);
125 } else 126 } else {
126 tsk->thread.fpu_counter = 0; 127 __save_init_fpu(tsk);
128 __thread_fpu_end(tsk);
129 }
130 }
127 preempt_enable(); 131 preempt_enable();
128} 132}
129EXPORT_SYMBOL(unlazy_fpu); 133EXPORT_SYMBOL(unlazy_fpu);
@@ -221,11 +225,12 @@ void fpu_finit(struct fpu *fpu)
221 return; 225 return;
222 } 226 }
223 227
228 memset(fpu->state, 0, xstate_size);
229
224 if (cpu_has_fxsr) { 230 if (cpu_has_fxsr) {
225 fx_finit(&fpu->state->fxsave); 231 fx_finit(&fpu->state->fxsave);
226 } else { 232 } else {
227 struct i387_fsave_struct *fp = &fpu->state->fsave; 233 struct i387_fsave_struct *fp = &fpu->state->fsave;
228 memset(fp, 0, xstate_size);
229 fp->cwd = 0xffff037fu; 234 fp->cwd = 0xffff037fu;
230 fp->swd = 0xffff0000u; 235 fp->swd = 0xffff0000u;
231 fp->twd = 0xffffffffu; 236 fp->twd = 0xffffffffu;
@@ -247,7 +252,7 @@ int init_fpu(struct task_struct *tsk)
247 if (tsk_used_math(tsk)) { 252 if (tsk_used_math(tsk)) {
248 if (cpu_has_fpu && tsk == current) 253 if (cpu_has_fpu && tsk == current)
249 unlazy_fpu(tsk); 254 unlazy_fpu(tsk);
250 tsk->thread.fpu.last_cpu = ~0; 255 task_disable_lazy_fpu_restore(tsk);
251 return 0; 256 return 0;
252 } 257 }
253 258
@@ -336,6 +341,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
336 unsigned int pos, unsigned int count, 341 unsigned int pos, unsigned int count,
337 void *kbuf, void __user *ubuf) 342 void *kbuf, void __user *ubuf)
338{ 343{
344 struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
339 int ret; 345 int ret;
340 346
341 if (!cpu_has_xsave) 347 if (!cpu_has_xsave)
@@ -350,14 +356,12 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
350 * memory layout in the thread struct, so that we can copy the entire 356 * memory layout in the thread struct, so that we can copy the entire
351 * xstateregs to the user using one user_regset_copyout(). 357 * xstateregs to the user using one user_regset_copyout().
352 */ 358 */
353 memcpy(&target->thread.fpu.state->fxsave.sw_reserved, 359 memcpy(&xsave->i387.sw_reserved,
354 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); 360 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
355
356 /* 361 /*
357 * Copy the xstate memory layout. 362 * Copy the xstate memory layout.
358 */ 363 */
359 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 364 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
360 &target->thread.fpu.state->xsave, 0, -1);
361 return ret; 365 return ret;
362} 366}
363 367
@@ -365,8 +369,8 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
365 unsigned int pos, unsigned int count, 369 unsigned int pos, unsigned int count,
366 const void *kbuf, const void __user *ubuf) 370 const void *kbuf, const void __user *ubuf)
367{ 371{
372 struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
368 int ret; 373 int ret;
369 struct xsave_hdr_struct *xsave_hdr;
370 374
371 if (!cpu_has_xsave) 375 if (!cpu_has_xsave)
372 return -ENODEV; 376 return -ENODEV;
@@ -375,22 +379,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
375 if (ret) 379 if (ret)
376 return ret; 380 return ret;
377 381
378 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 382 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
379 &target->thread.fpu.state->xsave, 0, -1);
380
381 /* 383 /*
382 * mxcsr reserved bits must be masked to zero for security reasons. 384 * mxcsr reserved bits must be masked to zero for security reasons.
383 */ 385 */
384 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; 386 xsave->i387.mxcsr &= mxcsr_feature_mask;
385 387 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
386 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
387
388 xsave_hdr->xstate_bv &= pcntxt_mask;
389 /* 388 /*
390 * These bits must be zero. 389 * These bits must be zero.
391 */ 390 */
392 memset(xsave_hdr->reserved, 0, 48); 391 memset(&xsave->xsave_hdr.reserved, 0, 48);
393
394 return ret; 392 return ret;
395} 393}
396 394
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0c8992dbead5..8213da62b1b7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -89,8 +89,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
89 89
90 dst->thread.fpu_counter = 0; 90 dst->thread.fpu_counter = 0;
91 dst->thread.fpu.has_fpu = 0; 91 dst->thread.fpu.has_fpu = 0;
92 dst->thread.fpu.last_cpu = ~0;
93 dst->thread.fpu.state = NULL; 92 dst->thread.fpu.state = NULL;
93 task_disable_lazy_fpu_restore(dst);
94 if (tsk_used_math(src)) { 94 if (tsk_used_math(src)) {
95 int err = fpu_alloc(&dst->thread.fpu); 95 int err = fpu_alloc(&dst->thread.fpu);
96 if (err) 96 if (err)
@@ -151,13 +151,18 @@ void flush_thread(void)
151 151
152 flush_ptrace_hw_breakpoint(tsk); 152 flush_ptrace_hw_breakpoint(tsk);
153 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 153 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
154 drop_init_fpu(tsk); 154
155 /* 155 if (!use_eager_fpu()) {
156 * Free the FPU state for non xsave platforms. They get reallocated 156 /* FPU state will be reallocated lazily at the first use. */
157 * lazily at the first use. 157 drop_fpu(tsk);
158 */
159 if (!use_eager_fpu())
160 free_thread_xstate(tsk); 158 free_thread_xstate(tsk);
159 } else if (!used_math()) {
160 /* kthread execs. TODO: cleanup this horror. */
161 if (WARN_ON(init_fpu(tsk)))
162 force_sig(SIGKILL, tsk);
163 user_fpu_begin();
164 restore_init_xstate();
165 }
161} 166}
162 167
163static void hard_disable_TSC(void) 168static void hard_disable_TSC(void)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 53cc4085c3d7..3e581865c8e2 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -680,7 +680,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
680 * Ensure the signal handler starts with the new fpu state. 680 * Ensure the signal handler starts with the new fpu state.
681 */ 681 */
682 if (used_math()) 682 if (used_math())
683 drop_init_fpu(current); 683 fpu_reset_state(current);
684 } 684 }
685 signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); 685 signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
686} 686}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 6751c5c58eec..f4fa991406cd 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -731,7 +731,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
731 /* 731 /*
732 * Save the info for the exception handler and clear the error. 732 * Save the info for the exception handler and clear the error.
733 */ 733 */
734 save_init_fpu(task); 734 unlazy_fpu(task);
735 task->thread.trap_nr = trapnr; 735 task->thread.trap_nr = trapnr;
736 task->thread.error_code = error_code; 736 task->thread.error_code = error_code;
737 info.si_signo = SIGFPE; 737 info.si_signo = SIGFPE;
@@ -860,7 +860,7 @@ void math_state_restore(void)
860 kernel_fpu_disable(); 860 kernel_fpu_disable();
861 __thread_fpu_begin(tsk); 861 __thread_fpu_begin(tsk);
862 if (unlikely(restore_fpu_checking(tsk))) { 862 if (unlikely(restore_fpu_checking(tsk))) {
863 drop_init_fpu(tsk); 863 fpu_reset_state(tsk);
864 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 864 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
865 } else { 865 } else {
866 tsk->thread.fpu_counter++; 866 tsk->thread.fpu_counter++;
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index cdc6cf903078..87a815b85f3e 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -342,7 +342,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
342 config_enabled(CONFIG_IA32_EMULATION)); 342 config_enabled(CONFIG_IA32_EMULATION));
343 343
344 if (!buf) { 344 if (!buf) {
345 drop_init_fpu(tsk); 345 fpu_reset_state(tsk);
346 return 0; 346 return 0;
347 } 347 }
348 348
@@ -416,7 +416,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
416 */ 416 */
417 user_fpu_begin(); 417 user_fpu_begin();
418 if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { 418 if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
419 drop_init_fpu(tsk); 419 fpu_reset_state(tsk);
420 return -1; 420 return -1;
421 } 421 }
422 } 422 }
@@ -678,19 +678,13 @@ void xsave_init(void)
678 this_func(); 678 this_func();
679} 679}
680 680
681static inline void __init eager_fpu_init_bp(void) 681/*
682{ 682 * setup_init_fpu_buf() is __init and it is OK to call it here because
683 current->thread.fpu.state = 683 * init_xstate_buf will be unset only once during boot.
684 alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); 684 */
685 if (!init_xstate_buf) 685void __init_refok eager_fpu_init(void)
686 setup_init_fpu_buf();
687}
688
689void eager_fpu_init(void)
690{ 686{
691 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 687 WARN_ON(used_math());
692
693 clear_used_math();
694 current_thread_info()->status = 0; 688 current_thread_info()->status = 0;
695 689
696 if (eagerfpu == ENABLE) 690 if (eagerfpu == ENABLE)
@@ -701,21 +695,8 @@ void eager_fpu_init(void)
701 return; 695 return;
702 } 696 }
703 697
704 if (boot_func) { 698 if (!init_xstate_buf)
705 boot_func(); 699 setup_init_fpu_buf();
706 boot_func = NULL;
707 }
708
709 /*
710 * This is same as math_state_restore(). But use_xsave() is
711 * not yet patched to use math_state_restore().
712 */
713 init_fpu(current);
714 __thread_fpu_begin(current);
715 if (cpu_has_xsave)
716 xrstor_state(init_xstate_buf, -1);
717 else
718 fxrstor_checking(&init_xstate_buf->i387);
719} 700}
720 701
721/* 702/*