diff options
Diffstat (limited to 'arch/x86/kernel/i387.c')
-rw-r--r-- | arch/x86/kernel/i387.c | 144 |
1 files changed, 80 insertions, 64 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 54c31c285488..a46cb3522c0c 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
42 | unsigned int xstate_size; | 42 | unsigned int xstate_size; |
43 | EXPORT_SYMBOL_GPL(xstate_size); | ||
43 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | 44 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); |
44 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 45 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
45 | 46 | ||
@@ -59,18 +60,18 @@ void __cpuinit mxcsr_feature_mask_init(void) | |||
59 | stts(); | 60 | stts(); |
60 | } | 61 | } |
61 | 62 | ||
62 | void __cpuinit init_thread_xstate(void) | 63 | static void __cpuinit init_thread_xstate(void) |
63 | { | 64 | { |
65 | /* | ||
66 | * Note that xstate_size might be overwriten later during | ||
67 | * xsave_init(). | ||
68 | */ | ||
69 | |||
64 | if (!HAVE_HWFP) { | 70 | if (!HAVE_HWFP) { |
65 | xstate_size = sizeof(struct i387_soft_struct); | 71 | xstate_size = sizeof(struct i387_soft_struct); |
66 | return; | 72 | return; |
67 | } | 73 | } |
68 | 74 | ||
69 | if (cpu_has_xsave) { | ||
70 | xsave_cntxt_init(); | ||
71 | return; | ||
72 | } | ||
73 | |||
74 | if (cpu_has_fxsr) | 75 | if (cpu_has_fxsr) |
75 | xstate_size = sizeof(struct i387_fxsave_struct); | 76 | xstate_size = sizeof(struct i387_fxsave_struct); |
76 | #ifdef CONFIG_X86_32 | 77 | #ifdef CONFIG_X86_32 |
@@ -84,6 +85,7 @@ void __cpuinit init_thread_xstate(void) | |||
84 | * Called at bootup to set up the initial FPU state that is later cloned | 85 | * Called at bootup to set up the initial FPU state that is later cloned |
85 | * into all processes. | 86 | * into all processes. |
86 | */ | 87 | */ |
88 | |||
87 | void __cpuinit fpu_init(void) | 89 | void __cpuinit fpu_init(void) |
88 | { | 90 | { |
89 | unsigned long oldcr0 = read_cr0(); | 91 | unsigned long oldcr0 = read_cr0(); |
@@ -93,74 +95,77 @@ void __cpuinit fpu_init(void) | |||
93 | 95 | ||
94 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 96 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
95 | 97 | ||
96 | /* | ||
97 | * Boot processor to setup the FP and extended state context info. | ||
98 | */ | ||
99 | if (!smp_processor_id()) | 98 | if (!smp_processor_id()) |
100 | init_thread_xstate(); | 99 | init_thread_xstate(); |
101 | xsave_init(); | ||
102 | 100 | ||
103 | mxcsr_feature_mask_init(); | 101 | mxcsr_feature_mask_init(); |
104 | /* clean state in init */ | 102 | /* clean state in init */ |
105 | if (cpu_has_xsave) | 103 | current_thread_info()->status = 0; |
106 | current_thread_info()->status = TS_XSAVE; | ||
107 | else | ||
108 | current_thread_info()->status = 0; | ||
109 | clear_used_math(); | 104 | clear_used_math(); |
110 | } | 105 | } |
111 | #endif /* CONFIG_X86_64 */ | ||
112 | 106 | ||
113 | /* | 107 | #else /* CONFIG_X86_64 */ |
114 | * The _current_ task is using the FPU for the first time | 108 | |
115 | * so initialize it and set the mxcsr to its default | 109 | void __cpuinit fpu_init(void) |
116 | * value at reset if we support XMM instructions and then | ||
117 | * remeber the current task has used the FPU. | ||
118 | */ | ||
119 | int init_fpu(struct task_struct *tsk) | ||
120 | { | 110 | { |
121 | if (tsk_used_math(tsk)) { | 111 | if (!smp_processor_id()) |
122 | if (HAVE_HWFP && tsk == current) | 112 | init_thread_xstate(); |
123 | unlazy_fpu(tsk); | 113 | } |
124 | return 0; | ||
125 | } | ||
126 | 114 | ||
127 | /* | 115 | #endif /* CONFIG_X86_32 */ |
128 | * Memory allocation at the first usage of the FPU and other state. | ||
129 | */ | ||
130 | if (!tsk->thread.xstate) { | ||
131 | tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | ||
132 | GFP_KERNEL); | ||
133 | if (!tsk->thread.xstate) | ||
134 | return -ENOMEM; | ||
135 | } | ||
136 | 116 | ||
117 | void fpu_finit(struct fpu *fpu) | ||
118 | { | ||
137 | #ifdef CONFIG_X86_32 | 119 | #ifdef CONFIG_X86_32 |
138 | if (!HAVE_HWFP) { | 120 | if (!HAVE_HWFP) { |
139 | memset(tsk->thread.xstate, 0, xstate_size); | 121 | finit_soft_fpu(&fpu->state->soft); |
140 | finit_task(tsk); | 122 | return; |
141 | set_stopped_child_used_math(tsk); | ||
142 | return 0; | ||
143 | } | 123 | } |
144 | #endif | 124 | #endif |
145 | 125 | ||
146 | if (cpu_has_fxsr) { | 126 | if (cpu_has_fxsr) { |
147 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 127 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
148 | 128 | ||
149 | memset(fx, 0, xstate_size); | 129 | memset(fx, 0, xstate_size); |
150 | fx->cwd = 0x37f; | 130 | fx->cwd = 0x37f; |
151 | if (cpu_has_xmm) | 131 | if (cpu_has_xmm) |
152 | fx->mxcsr = MXCSR_DEFAULT; | 132 | fx->mxcsr = MXCSR_DEFAULT; |
153 | } else { | 133 | } else { |
154 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 134 | struct i387_fsave_struct *fp = &fpu->state->fsave; |
155 | memset(fp, 0, xstate_size); | 135 | memset(fp, 0, xstate_size); |
156 | fp->cwd = 0xffff037fu; | 136 | fp->cwd = 0xffff037fu; |
157 | fp->swd = 0xffff0000u; | 137 | fp->swd = 0xffff0000u; |
158 | fp->twd = 0xffffffffu; | 138 | fp->twd = 0xffffffffu; |
159 | fp->fos = 0xffff0000u; | 139 | fp->fos = 0xffff0000u; |
160 | } | 140 | } |
141 | } | ||
142 | EXPORT_SYMBOL_GPL(fpu_finit); | ||
143 | |||
144 | /* | ||
145 | * The _current_ task is using the FPU for the first time | ||
146 | * so initialize it and set the mxcsr to its default | ||
147 | * value at reset if we support XMM instructions and then | ||
148 | * remeber the current task has used the FPU. | ||
149 | */ | ||
150 | int init_fpu(struct task_struct *tsk) | ||
151 | { | ||
152 | int ret; | ||
153 | |||
154 | if (tsk_used_math(tsk)) { | ||
155 | if (HAVE_HWFP && tsk == current) | ||
156 | unlazy_fpu(tsk); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
161 | /* | 160 | /* |
162 | * Only the device not available exception or ptrace can call init_fpu. | 161 | * Memory allocation at the first usage of the FPU and other state. |
163 | */ | 162 | */ |
163 | ret = fpu_alloc(&tsk->thread.fpu); | ||
164 | if (ret) | ||
165 | return ret; | ||
166 | |||
167 | fpu_finit(&tsk->thread.fpu); | ||
168 | |||
164 | set_stopped_child_used_math(tsk); | 169 | set_stopped_child_used_math(tsk); |
165 | return 0; | 170 | return 0; |
166 | } | 171 | } |
@@ -193,8 +198,10 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
193 | if (ret) | 198 | if (ret) |
194 | return ret; | 199 | return ret; |
195 | 200 | ||
201 | sanitize_i387_state(target); | ||
202 | |||
196 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 203 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
197 | &target->thread.xstate->fxsave, 0, -1); | 204 | &target->thread.fpu.state->fxsave, 0, -1); |
198 | } | 205 | } |
199 | 206 | ||
200 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | 207 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, |
@@ -210,20 +217,22 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
210 | if (ret) | 217 | if (ret) |
211 | return ret; | 218 | return ret; |
212 | 219 | ||
220 | sanitize_i387_state(target); | ||
221 | |||
213 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 222 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
214 | &target->thread.xstate->fxsave, 0, -1); | 223 | &target->thread.fpu.state->fxsave, 0, -1); |
215 | 224 | ||
216 | /* | 225 | /* |
217 | * mxcsr reserved bits must be masked to zero for security reasons. | 226 | * mxcsr reserved bits must be masked to zero for security reasons. |
218 | */ | 227 | */ |
219 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 228 | target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
220 | 229 | ||
221 | /* | 230 | /* |
222 | * update the header bits in the xsave header, indicating the | 231 | * update the header bits in the xsave header, indicating the |
223 | * presence of FP and SSE state. | 232 | * presence of FP and SSE state. |
224 | */ | 233 | */ |
225 | if (cpu_has_xsave) | 234 | if (cpu_has_xsave) |
226 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | 235 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; |
227 | 236 | ||
228 | return ret; | 237 | return ret; |
229 | } | 238 | } |
@@ -246,14 +255,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | |||
246 | * memory layout in the thread struct, so that we can copy the entire | 255 | * memory layout in the thread struct, so that we can copy the entire |
247 | * xstateregs to the user using one user_regset_copyout(). | 256 | * xstateregs to the user using one user_regset_copyout(). |
248 | */ | 257 | */ |
249 | memcpy(&target->thread.xstate->fxsave.sw_reserved, | 258 | memcpy(&target->thread.fpu.state->fxsave.sw_reserved, |
250 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); | 259 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); |
251 | 260 | ||
252 | /* | 261 | /* |
253 | * Copy the xstate memory layout. | 262 | * Copy the xstate memory layout. |
254 | */ | 263 | */ |
255 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 264 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
256 | &target->thread.xstate->xsave, 0, -1); | 265 | &target->thread.fpu.state->xsave, 0, -1); |
257 | return ret; | 266 | return ret; |
258 | } | 267 | } |
259 | 268 | ||
@@ -272,14 +281,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |||
272 | return ret; | 281 | return ret; |
273 | 282 | ||
274 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 283 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
275 | &target->thread.xstate->xsave, 0, -1); | 284 | &target->thread.fpu.state->xsave, 0, -1); |
276 | 285 | ||
277 | /* | 286 | /* |
278 | * mxcsr reserved bits must be masked to zero for security reasons. | 287 | * mxcsr reserved bits must be masked to zero for security reasons. |
279 | */ | 288 | */ |
280 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 289 | target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
281 | 290 | ||
282 | xsave_hdr = &target->thread.xstate->xsave.xsave_hdr; | 291 | xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr; |
283 | 292 | ||
284 | xsave_hdr->xstate_bv &= pcntxt_mask; | 293 | xsave_hdr->xstate_bv &= pcntxt_mask; |
285 | /* | 294 | /* |
@@ -365,7 +374,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |||
365 | static void | 374 | static void |
366 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) | 375 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
367 | { | 376 | { |
368 | struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; | 377 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
369 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; | 378 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
370 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | 379 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; |
371 | int i; | 380 | int i; |
@@ -405,7 +414,7 @@ static void convert_to_fxsr(struct task_struct *tsk, | |||
405 | const struct user_i387_ia32_struct *env) | 414 | const struct user_i387_ia32_struct *env) |
406 | 415 | ||
407 | { | 416 | { |
408 | struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; | 417 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; |
409 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; | 418 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
410 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | 419 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; |
411 | int i; | 420 | int i; |
@@ -445,10 +454,12 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
445 | 454 | ||
446 | if (!cpu_has_fxsr) { | 455 | if (!cpu_has_fxsr) { |
447 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 456 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
448 | &target->thread.xstate->fsave, 0, | 457 | &target->thread.fpu.state->fsave, 0, |
449 | -1); | 458 | -1); |
450 | } | 459 | } |
451 | 460 | ||
461 | sanitize_i387_state(target); | ||
462 | |||
452 | if (kbuf && pos == 0 && count == sizeof(env)) { | 463 | if (kbuf && pos == 0 && count == sizeof(env)) { |
453 | convert_from_fxsr(kbuf, target); | 464 | convert_from_fxsr(kbuf, target); |
454 | return 0; | 465 | return 0; |
@@ -470,12 +481,14 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
470 | if (ret) | 481 | if (ret) |
471 | return ret; | 482 | return ret; |
472 | 483 | ||
484 | sanitize_i387_state(target); | ||
485 | |||
473 | if (!HAVE_HWFP) | 486 | if (!HAVE_HWFP) |
474 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 487 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
475 | 488 | ||
476 | if (!cpu_has_fxsr) { | 489 | if (!cpu_has_fxsr) { |
477 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 490 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
478 | &target->thread.xstate->fsave, 0, -1); | 491 | &target->thread.fpu.state->fsave, 0, -1); |
479 | } | 492 | } |
480 | 493 | ||
481 | if (pos > 0 || count < sizeof(env)) | 494 | if (pos > 0 || count < sizeof(env)) |
@@ -490,7 +503,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
490 | * presence of FP. | 503 | * presence of FP. |
491 | */ | 504 | */ |
492 | if (cpu_has_xsave) | 505 | if (cpu_has_xsave) |
493 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; | 506 | target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; |
494 | return ret; | 507 | return ret; |
495 | } | 508 | } |
496 | 509 | ||
@@ -501,7 +514,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
501 | static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | 514 | static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) |
502 | { | 515 | { |
503 | struct task_struct *tsk = current; | 516 | struct task_struct *tsk = current; |
504 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 517 | struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave; |
505 | 518 | ||
506 | fp->status = fp->swd; | 519 | fp->status = fp->swd; |
507 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) | 520 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) |
@@ -512,7 +525,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
512 | static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | 525 | static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) |
513 | { | 526 | { |
514 | struct task_struct *tsk = current; | 527 | struct task_struct *tsk = current; |
515 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 528 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; |
516 | struct user_i387_ia32_struct env; | 529 | struct user_i387_ia32_struct env; |
517 | int err = 0; | 530 | int err = 0; |
518 | 531 | ||
@@ -536,6 +549,9 @@ static int save_i387_xsave(void __user *buf) | |||
536 | struct _fpstate_ia32 __user *fx = buf; | 549 | struct _fpstate_ia32 __user *fx = buf; |
537 | int err = 0; | 550 | int err = 0; |
538 | 551 | ||
552 | |||
553 | sanitize_i387_state(tsk); | ||
554 | |||
539 | /* | 555 | /* |
540 | * For legacy compatible, we always set FP/SSE bits in the bit | 556 | * For legacy compatible, we always set FP/SSE bits in the bit |
541 | * vector while saving the state to the user context. | 557 | * vector while saving the state to the user context. |
@@ -547,7 +563,7 @@ static int save_i387_xsave(void __user *buf) | |||
547 | * header as well as change any contents in the memory layout. | 563 | * header as well as change any contents in the memory layout. |
548 | * xrestore as part of sigreturn will capture all the changes. | 564 | * xrestore as part of sigreturn will capture all the changes. |
549 | */ | 565 | */ |
550 | tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | 566 | tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; |
551 | 567 | ||
552 | if (save_i387_fxsave(fx) < 0) | 568 | if (save_i387_fxsave(fx) < 0) |
553 | return -1; | 569 | return -1; |
@@ -599,7 +615,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
599 | { | 615 | { |
600 | struct task_struct *tsk = current; | 616 | struct task_struct *tsk = current; |
601 | 617 | ||
602 | return __copy_from_user(&tsk->thread.xstate->fsave, buf, | 618 | return __copy_from_user(&tsk->thread.fpu.state->fsave, buf, |
603 | sizeof(struct i387_fsave_struct)); | 619 | sizeof(struct i387_fsave_struct)); |
604 | } | 620 | } |
605 | 621 | ||
@@ -610,10 +626,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, | |||
610 | struct user_i387_ia32_struct env; | 626 | struct user_i387_ia32_struct env; |
611 | int err; | 627 | int err; |
612 | 628 | ||
613 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], | 629 | err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0], |
614 | size); | 630 | size); |
615 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 631 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
616 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 632 | tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; |
617 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 633 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
618 | return 1; | 634 | return 1; |
619 | convert_to_fxsr(tsk, &env); | 635 | convert_to_fxsr(tsk, &env); |
@@ -629,7 +645,7 @@ static int restore_i387_xsave(void __user *buf) | |||
629 | struct i387_fxsave_struct __user *fx = | 645 | struct i387_fxsave_struct __user *fx = |
630 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; | 646 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; |
631 | struct xsave_hdr_struct *xsave_hdr = | 647 | struct xsave_hdr_struct *xsave_hdr = |
632 | ¤t->thread.xstate->xsave.xsave_hdr; | 648 | ¤t->thread.fpu.state->xsave.xsave_hdr; |
633 | u64 mask; | 649 | u64 mask; |
634 | int err; | 650 | int err; |
635 | 651 | ||