aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/i387.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-05-06 04:45:46 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-05-10 13:48:55 -0400
commit86603283326c9e95e5ad4e9fdddeec93cac5d9ad (patch)
tree1a26a37434e920f9519b547814a1a9af35022de8 /arch/x86/kernel/i387.c
parentc9ad488289144ae5ef53b012e15895ef1f5e4bb6 (diff)
x86: Introduce 'struct fpu' and related API
Currently all fpu state access is through tsk->thread.xstate. Since we wish to generalize fpu access to non-task contexts, wrap the state in a new 'struct fpu' and convert existing access to use an fpu API. Signal frame handlers are not converted to the API since they will remain task context only things. Signed-off-by: Avi Kivity <avi@redhat.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> LKML-Reference: <1273135546-29690-3-git-send-email-avi@redhat.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/i387.c')
-rw-r--r--arch/x86/kernel/i387.c102
1 files changed, 51 insertions, 51 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 14ca1dc7a703..86cef6b32253 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -107,57 +107,57 @@ void __cpuinit fpu_init(void)
107} 107}
108#endif /* CONFIG_X86_64 */ 108#endif /* CONFIG_X86_64 */
109 109
110/* 110static void fpu_finit(struct fpu *fpu)
111 * The _current_ task is using the FPU for the first time
112 * so initialize it and set the mxcsr to its default
113 * value at reset if we support XMM instructions and then
114 * remeber the current task has used the FPU.
115 */
116int init_fpu(struct task_struct *tsk)
117{ 111{
118 if (tsk_used_math(tsk)) {
119 if (HAVE_HWFP && tsk == current)
120 unlazy_fpu(tsk);
121 return 0;
122 }
123
124 /*
125 * Memory allocation at the first usage of the FPU and other state.
126 */
127 if (!tsk->thread.xstate) {
128 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
129 GFP_KERNEL);
130 if (!tsk->thread.xstate)
131 return -ENOMEM;
132 }
133
134#ifdef CONFIG_X86_32 112#ifdef CONFIG_X86_32
135 if (!HAVE_HWFP) { 113 if (!HAVE_HWFP) {
136 memset(tsk->thread.xstate, 0, xstate_size); 114 finit_soft_fpu(&fpu->state->soft);
137 finit_task(tsk); 115 return;
138 set_stopped_child_used_math(tsk);
139 return 0;
140 } 116 }
141#endif 117#endif
142 118
143 if (cpu_has_fxsr) { 119 if (cpu_has_fxsr) {
144 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 120 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
145 121
146 memset(fx, 0, xstate_size); 122 memset(fx, 0, xstate_size);
147 fx->cwd = 0x37f; 123 fx->cwd = 0x37f;
148 if (cpu_has_xmm) 124 if (cpu_has_xmm)
149 fx->mxcsr = MXCSR_DEFAULT; 125 fx->mxcsr = MXCSR_DEFAULT;
150 } else { 126 } else {
151 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; 127 struct i387_fsave_struct *fp = &fpu->state->fsave;
152 memset(fp, 0, xstate_size); 128 memset(fp, 0, xstate_size);
153 fp->cwd = 0xffff037fu; 129 fp->cwd = 0xffff037fu;
154 fp->swd = 0xffff0000u; 130 fp->swd = 0xffff0000u;
155 fp->twd = 0xffffffffu; 131 fp->twd = 0xffffffffu;
156 fp->fos = 0xffff0000u; 132 fp->fos = 0xffff0000u;
157 } 133 }
134}
135
136/*
137 * The _current_ task is using the FPU for the first time
138 * so initialize it and set the mxcsr to its default
139 * value at reset if we support XMM instructions and then
140 * remeber the current task has used the FPU.
141 */
142int init_fpu(struct task_struct *tsk)
143{
144 int ret;
145
146 if (tsk_used_math(tsk)) {
147 if (HAVE_HWFP && tsk == current)
148 unlazy_fpu(tsk);
149 return 0;
150 }
151
158 /* 152 /*
159 * Only the device not available exception or ptrace can call init_fpu. 153 * Memory allocation at the first usage of the FPU and other state.
160 */ 154 */
155 ret = fpu_alloc(&tsk->thread.fpu);
156 if (ret)
157 return ret;
158
159 fpu_finit(&tsk->thread.fpu);
160
161 set_stopped_child_used_math(tsk); 161 set_stopped_child_used_math(tsk);
162 return 0; 162 return 0;
163} 163}
@@ -191,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
191 return ret; 191 return ret;
192 192
193 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 193 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
194 &target->thread.xstate->fxsave, 0, -1); 194 &target->thread.fpu.state->fxsave, 0, -1);
195} 195}
196 196
197int xfpregs_set(struct task_struct *target, const struct user_regset *regset, 197int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -208,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
208 return ret; 208 return ret;
209 209
210 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 210 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
211 &target->thread.xstate->fxsave, 0, -1); 211 &target->thread.fpu.state->fxsave, 0, -1);
212 212
213 /* 213 /*
214 * mxcsr reserved bits must be masked to zero for security reasons. 214 * mxcsr reserved bits must be masked to zero for security reasons.
215 */ 215 */
216 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 216 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
217 217
218 /* 218 /*
219 * update the header bits in the xsave header, indicating the 219 * update the header bits in the xsave header, indicating the
220 * presence of FP and SSE state. 220 * presence of FP and SSE state.
221 */ 221 */
222 if (cpu_has_xsave) 222 if (cpu_has_xsave)
223 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; 223 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
224 224
225 return ret; 225 return ret;
226} 226}
@@ -243,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
243 * memory layout in the thread struct, so that we can copy the entire 243 * memory layout in the thread struct, so that we can copy the entire
244 * xstateregs to the user using one user_regset_copyout(). 244 * xstateregs to the user using one user_regset_copyout().
245 */ 245 */
246 memcpy(&target->thread.xstate->fxsave.sw_reserved, 246 memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
247 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); 247 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
248 248
249 /* 249 /*
250 * Copy the xstate memory layout. 250 * Copy the xstate memory layout.
251 */ 251 */
252 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 252 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
253 &target->thread.xstate->xsave, 0, -1); 253 &target->thread.fpu.state->xsave, 0, -1);
254 return ret; 254 return ret;
255} 255}
256 256
@@ -269,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
269 return ret; 269 return ret;
270 270
271 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 271 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
272 &target->thread.xstate->xsave, 0, -1); 272 &target->thread.fpu.state->xsave, 0, -1);
273 273
274 /* 274 /*
275 * mxcsr reserved bits must be masked to zero for security reasons. 275 * mxcsr reserved bits must be masked to zero for security reasons.
276 */ 276 */
277 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 277 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
278 278
279 xsave_hdr = &target->thread.xstate->xsave.xsave_hdr; 279 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
280 280
281 xsave_hdr->xstate_bv &= pcntxt_mask; 281 xsave_hdr->xstate_bv &= pcntxt_mask;
282 /* 282 /*
@@ -362,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
362static void 362static void
363convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) 363convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
364{ 364{
365 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; 365 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
366 struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; 366 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
367 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; 367 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
368 int i; 368 int i;
@@ -402,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
402 const struct user_i387_ia32_struct *env) 402 const struct user_i387_ia32_struct *env)
403 403
404{ 404{
405 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; 405 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
406 struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; 406 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
407 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; 407 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
408 int i; 408 int i;
@@ -442,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
442 442
443 if (!cpu_has_fxsr) { 443 if (!cpu_has_fxsr) {
444 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 444 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
445 &target->thread.xstate->fsave, 0, 445 &target->thread.fpu.state->fsave, 0,
446 -1); 446 -1);
447 } 447 }
448 448
@@ -472,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
472 472
473 if (!cpu_has_fxsr) { 473 if (!cpu_has_fxsr) {
474 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 474 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
475 &target->thread.xstate->fsave, 0, -1); 475 &target->thread.fpu.state->fsave, 0, -1);
476 } 476 }
477 477
478 if (pos > 0 || count < sizeof(env)) 478 if (pos > 0 || count < sizeof(env))
@@ -487,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
487 * presence of FP. 487 * presence of FP.
488 */ 488 */
489 if (cpu_has_xsave) 489 if (cpu_has_xsave)
490 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; 490 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
491 return ret; 491 return ret;
492} 492}
493 493
@@ -498,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
498static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) 498static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
499{ 499{
500 struct task_struct *tsk = current; 500 struct task_struct *tsk = current;
501 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; 501 struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
502 502
503 fp->status = fp->swd; 503 fp->status = fp->swd;
504 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) 504 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
@@ -509,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
509static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) 509static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
510{ 510{
511 struct task_struct *tsk = current; 511 struct task_struct *tsk = current;
512 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 512 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
513 struct user_i387_ia32_struct env; 513 struct user_i387_ia32_struct env;
514 int err = 0; 514 int err = 0;
515 515
@@ -544,7 +544,7 @@ static int save_i387_xsave(void __user *buf)
544 * header as well as change any contents in the memory layout. 544 * header as well as change any contents in the memory layout.
545 * xrestore as part of sigreturn will capture all the changes. 545 * xrestore as part of sigreturn will capture all the changes.
546 */ 546 */
547 tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; 547 tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
548 548
549 if (save_i387_fxsave(fx) < 0) 549 if (save_i387_fxsave(fx) < 0)
550 return -1; 550 return -1;
@@ -596,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
596{ 596{
597 struct task_struct *tsk = current; 597 struct task_struct *tsk = current;
598 598
599 return __copy_from_user(&tsk->thread.xstate->fsave, buf, 599 return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
600 sizeof(struct i387_fsave_struct)); 600 sizeof(struct i387_fsave_struct));
601} 601}
602 602
@@ -607,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
607 struct user_i387_ia32_struct env; 607 struct user_i387_ia32_struct env;
608 int err; 608 int err;
609 609
610 err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], 610 err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
611 size); 611 size);
612 /* mxcsr reserved bits must be masked to zero for security reasons */ 612 /* mxcsr reserved bits must be masked to zero for security reasons */
613 tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 613 tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
614 if (err || __copy_from_user(&env, buf, sizeof(env))) 614 if (err || __copy_from_user(&env, buf, sizeof(env)))
615 return 1; 615 return 1;
616 convert_to_fxsr(tsk, &env); 616 convert_to_fxsr(tsk, &env);
@@ -626,7 +626,7 @@ static int restore_i387_xsave(void __user *buf)
626 struct i387_fxsave_struct __user *fx = 626 struct i387_fxsave_struct __user *fx =
627 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; 627 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
628 struct xsave_hdr_struct *xsave_hdr = 628 struct xsave_hdr_struct *xsave_hdr =
629 &current->thread.xstate->xsave.xsave_hdr; 629 &current->thread.fpu.state->xsave.xsave_hdr;
630 u64 mask; 630 u64 mask;
631 int err; 631 int err;
632 632