aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/i387.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/i387.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/i386/kernel/i387.c')
-rw-r--r--arch/i386/kernel/i387.c555
1 files changed, 555 insertions, 0 deletions
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
new file mode 100644
index 000000000000..c55e037f08f7
--- /dev/null
+++ b/arch/i386/kernel/i387.c
@@ -0,0 +1,555 @@
1/*
2 * linux/arch/i386/kernel/i387.c
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
10
11#include <linux/config.h>
12#include <linux/sched.h>
13#include <asm/processor.h>
14#include <asm/i387.h>
15#include <asm/math_emu.h>
16#include <asm/sigcontext.h>
17#include <asm/user.h>
18#include <asm/ptrace.h>
19#include <asm/uaccess.h>
20
21#ifdef CONFIG_MATH_EMULATION
22#define HAVE_HWFP (boot_cpu_data.hard_math)
23#else
24#define HAVE_HWFP 1
25#endif
26
27static unsigned long mxcsr_feature_mask = 0xffffffff;
28
29void mxcsr_feature_mask_init(void)
30{
31 unsigned long mask = 0;
32 clts();
33 if (cpu_has_fxsr) {
34 memset(&current->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
35 asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
36 mask = current->thread.i387.fxsave.mxcsr_mask;
37 if (mask == 0) mask = 0x0000ffbf;
38 }
39 mxcsr_feature_mask &= mask;
40 stts();
41}
42
43/*
44 * The _current_ task is using the FPU for the first time
45 * so initialize it and set the mxcsr to its default
46 * value at reset if we support XMM instructions and then
47 * remeber the current task has used the FPU.
48 */
49void init_fpu(struct task_struct *tsk)
50{
51 if (cpu_has_fxsr) {
52 memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
53 tsk->thread.i387.fxsave.cwd = 0x37f;
54 if (cpu_has_xmm)
55 tsk->thread.i387.fxsave.mxcsr = 0x1f80;
56 } else {
57 memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct));
58 tsk->thread.i387.fsave.cwd = 0xffff037fu;
59 tsk->thread.i387.fsave.swd = 0xffff0000u;
60 tsk->thread.i387.fsave.twd = 0xffffffffu;
61 tsk->thread.i387.fsave.fos = 0xffff0000u;
62 }
63 /* only the device not available exception or ptrace can call init_fpu */
64 set_stopped_child_used_math(tsk);
65}
66
67/*
68 * FPU lazy state save handling.
69 */
70
71void kernel_fpu_begin(void)
72{
73 struct thread_info *thread = current_thread_info();
74
75 preempt_disable();
76 if (thread->status & TS_USEDFPU) {
77 __save_init_fpu(thread->task);
78 return;
79 }
80 clts();
81}
82
83void restore_fpu( struct task_struct *tsk )
84{
85 if ( cpu_has_fxsr ) {
86 asm volatile( "fxrstor %0"
87 : : "m" (tsk->thread.i387.fxsave) );
88 } else {
89 asm volatile( "frstor %0"
90 : : "m" (tsk->thread.i387.fsave) );
91 }
92}
93
94/*
95 * FPU tag word conversions.
96 */
97
98static inline unsigned short twd_i387_to_fxsr( unsigned short twd )
99{
100 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
101
102 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
103 tmp = ~twd;
104 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
105 /* and move the valid bits to the lower byte. */
106 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
107 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
108 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
109 return tmp;
110}
111
112static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave )
113{
114 struct _fpxreg *st = NULL;
115 unsigned long tos = (fxsave->swd >> 11) & 7;
116 unsigned long twd = (unsigned long) fxsave->twd;
117 unsigned long tag;
118 unsigned long ret = 0xffff0000u;
119 int i;
120
121#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
122
123 for ( i = 0 ; i < 8 ; i++ ) {
124 if ( twd & 0x1 ) {
125 st = FPREG_ADDR( fxsave, (i - tos) & 7 );
126
127 switch ( st->exponent & 0x7fff ) {
128 case 0x7fff:
129 tag = 2; /* Special */
130 break;
131 case 0x0000:
132 if ( !st->significand[0] &&
133 !st->significand[1] &&
134 !st->significand[2] &&
135 !st->significand[3] ) {
136 tag = 1; /* Zero */
137 } else {
138 tag = 2; /* Special */
139 }
140 break;
141 default:
142 if ( st->significand[3] & 0x8000 ) {
143 tag = 0; /* Valid */
144 } else {
145 tag = 2; /* Special */
146 }
147 break;
148 }
149 } else {
150 tag = 3; /* Empty */
151 }
152 ret |= (tag << (2 * i));
153 twd = twd >> 1;
154 }
155 return ret;
156}
157
158/*
159 * FPU state interaction.
160 */
161
162unsigned short get_fpu_cwd( struct task_struct *tsk )
163{
164 if ( cpu_has_fxsr ) {
165 return tsk->thread.i387.fxsave.cwd;
166 } else {
167 return (unsigned short)tsk->thread.i387.fsave.cwd;
168 }
169}
170
171unsigned short get_fpu_swd( struct task_struct *tsk )
172{
173 if ( cpu_has_fxsr ) {
174 return tsk->thread.i387.fxsave.swd;
175 } else {
176 return (unsigned short)tsk->thread.i387.fsave.swd;
177 }
178}
179
180#if 0
181unsigned short get_fpu_twd( struct task_struct *tsk )
182{
183 if ( cpu_has_fxsr ) {
184 return tsk->thread.i387.fxsave.twd;
185 } else {
186 return (unsigned short)tsk->thread.i387.fsave.twd;
187 }
188}
189#endif /* 0 */
190
191unsigned short get_fpu_mxcsr( struct task_struct *tsk )
192{
193 if ( cpu_has_xmm ) {
194 return tsk->thread.i387.fxsave.mxcsr;
195 } else {
196 return 0x1f80;
197 }
198}
199
200#if 0
201
202void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
203{
204 if ( cpu_has_fxsr ) {
205 tsk->thread.i387.fxsave.cwd = cwd;
206 } else {
207 tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u);
208 }
209}
210
211void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
212{
213 if ( cpu_has_fxsr ) {
214 tsk->thread.i387.fxsave.swd = swd;
215 } else {
216 tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u);
217 }
218}
219
220void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
221{
222 if ( cpu_has_fxsr ) {
223 tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
224 } else {
225 tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
226 }
227}
228
229#endif /* 0 */
230
231/*
232 * FXSR floating point environment conversions.
233 */
234
235static int convert_fxsr_to_user( struct _fpstate __user *buf,
236 struct i387_fxsave_struct *fxsave )
237{
238 unsigned long env[7];
239 struct _fpreg __user *to;
240 struct _fpxreg *from;
241 int i;
242
243 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
244 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
245 env[2] = twd_fxsr_to_i387(fxsave);
246 env[3] = fxsave->fip;
247 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
248 env[5] = fxsave->foo;
249 env[6] = fxsave->fos;
250
251 if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
252 return 1;
253
254 to = &buf->_st[0];
255 from = (struct _fpxreg *) &fxsave->st_space[0];
256 for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
257 unsigned long __user *t = (unsigned long __user *)to;
258 unsigned long *f = (unsigned long *)from;
259
260 if (__put_user(*f, t) ||
261 __put_user(*(f + 1), t + 1) ||
262 __put_user(from->exponent, &to->exponent))
263 return 1;
264 }
265 return 0;
266}
267
268static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
269 struct _fpstate __user *buf )
270{
271 unsigned long env[7];
272 struct _fpxreg *to;
273 struct _fpreg __user *from;
274 int i;
275
276 if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
277 return 1;
278
279 fxsave->cwd = (unsigned short)(env[0] & 0xffff);
280 fxsave->swd = (unsigned short)(env[1] & 0xffff);
281 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
282 fxsave->fip = env[3];
283 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
284 fxsave->fcs = (env[4] & 0xffff);
285 fxsave->foo = env[5];
286 fxsave->fos = env[6];
287
288 to = (struct _fpxreg *) &fxsave->st_space[0];
289 from = &buf->_st[0];
290 for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
291 unsigned long *t = (unsigned long *)to;
292 unsigned long __user *f = (unsigned long __user *)from;
293
294 if (__get_user(*t, f) ||
295 __get_user(*(t + 1), f + 1) ||
296 __get_user(to->exponent, &from->exponent))
297 return 1;
298 }
299 return 0;
300}
301
302/*
303 * Signal frame handlers.
304 */
305
306static inline int save_i387_fsave( struct _fpstate __user *buf )
307{
308 struct task_struct *tsk = current;
309
310 unlazy_fpu( tsk );
311 tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
312 if ( __copy_to_user( buf, &tsk->thread.i387.fsave,
313 sizeof(struct i387_fsave_struct) ) )
314 return -1;
315 return 1;
316}
317
318static int save_i387_fxsave( struct _fpstate __user *buf )
319{
320 struct task_struct *tsk = current;
321 int err = 0;
322
323 unlazy_fpu( tsk );
324
325 if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) )
326 return -1;
327
328 err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status );
329 err |= __put_user( X86_FXSR_MAGIC, &buf->magic );
330 if ( err )
331 return -1;
332
333 if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
334 sizeof(struct i387_fxsave_struct) ) )
335 return -1;
336 return 1;
337}
338
339int save_i387( struct _fpstate __user *buf )
340{
341 if ( !used_math() )
342 return 0;
343
344 /* This will cause a "finit" to be triggered by the next
345 * attempted FPU operation by the 'current' process.
346 */
347 clear_used_math();
348
349 if ( HAVE_HWFP ) {
350 if ( cpu_has_fxsr ) {
351 return save_i387_fxsave( buf );
352 } else {
353 return save_i387_fsave( buf );
354 }
355 } else {
356 return save_i387_soft( &current->thread.i387.soft, buf );
357 }
358}
359
360static inline int restore_i387_fsave( struct _fpstate __user *buf )
361{
362 struct task_struct *tsk = current;
363 clear_fpu( tsk );
364 return __copy_from_user( &tsk->thread.i387.fsave, buf,
365 sizeof(struct i387_fsave_struct) );
366}
367
368static int restore_i387_fxsave( struct _fpstate __user *buf )
369{
370 int err;
371 struct task_struct *tsk = current;
372 clear_fpu( tsk );
373 err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
374 sizeof(struct i387_fxsave_struct) );
375 /* mxcsr reserved bits must be masked to zero for security reasons */
376 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
377 return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf );
378}
379
380int restore_i387( struct _fpstate __user *buf )
381{
382 int err;
383
384 if ( HAVE_HWFP ) {
385 if ( cpu_has_fxsr ) {
386 err = restore_i387_fxsave( buf );
387 } else {
388 err = restore_i387_fsave( buf );
389 }
390 } else {
391 err = restore_i387_soft( &current->thread.i387.soft, buf );
392 }
393 set_used_math();
394 return err;
395}
396
397/*
398 * ptrace request handlers.
399 */
400
401static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
402 struct task_struct *tsk )
403{
404 return __copy_to_user( buf, &tsk->thread.i387.fsave,
405 sizeof(struct user_i387_struct) );
406}
407
408static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
409 struct task_struct *tsk )
410{
411 return convert_fxsr_to_user( (struct _fpstate __user *)buf,
412 &tsk->thread.i387.fxsave );
413}
414
415int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
416{
417 if ( HAVE_HWFP ) {
418 if ( cpu_has_fxsr ) {
419 return get_fpregs_fxsave( buf, tsk );
420 } else {
421 return get_fpregs_fsave( buf, tsk );
422 }
423 } else {
424 return save_i387_soft( &tsk->thread.i387.soft,
425 (struct _fpstate __user *)buf );
426 }
427}
428
429static inline int set_fpregs_fsave( struct task_struct *tsk,
430 struct user_i387_struct __user *buf )
431{
432 return __copy_from_user( &tsk->thread.i387.fsave, buf,
433 sizeof(struct user_i387_struct) );
434}
435
436static inline int set_fpregs_fxsave( struct task_struct *tsk,
437 struct user_i387_struct __user *buf )
438{
439 return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
440 (struct _fpstate __user *)buf );
441}
442
443int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
444{
445 if ( HAVE_HWFP ) {
446 if ( cpu_has_fxsr ) {
447 return set_fpregs_fxsave( tsk, buf );
448 } else {
449 return set_fpregs_fsave( tsk, buf );
450 }
451 } else {
452 return restore_i387_soft( &tsk->thread.i387.soft,
453 (struct _fpstate __user *)buf );
454 }
455}
456
457int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk )
458{
459 if ( cpu_has_fxsr ) {
460 if (__copy_to_user( buf, &tsk->thread.i387.fxsave,
461 sizeof(struct user_fxsr_struct) ))
462 return -EFAULT;
463 return 0;
464 } else {
465 return -EIO;
466 }
467}
468
469int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf )
470{
471 int ret = 0;
472
473 if ( cpu_has_fxsr ) {
474 if (__copy_from_user( &tsk->thread.i387.fxsave, buf,
475 sizeof(struct user_fxsr_struct) ))
476 ret = -EFAULT;
477 /* mxcsr reserved bits must be masked to zero for security reasons */
478 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
479 } else {
480 ret = -EIO;
481 }
482 return ret;
483}
484
485/*
486 * FPU state for core dumps.
487 */
488
489static inline void copy_fpu_fsave( struct task_struct *tsk,
490 struct user_i387_struct *fpu )
491{
492 memcpy( fpu, &tsk->thread.i387.fsave,
493 sizeof(struct user_i387_struct) );
494}
495
496static inline void copy_fpu_fxsave( struct task_struct *tsk,
497 struct user_i387_struct *fpu )
498{
499 unsigned short *to;
500 unsigned short *from;
501 int i;
502
503 memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) );
504
505 to = (unsigned short *)&fpu->st_space[0];
506 from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0];
507 for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) {
508 memcpy( to, from, 5 * sizeof(unsigned short) );
509 }
510}
511
512int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
513{
514 int fpvalid;
515 struct task_struct *tsk = current;
516
517 fpvalid = !!used_math();
518 if ( fpvalid ) {
519 unlazy_fpu( tsk );
520 if ( cpu_has_fxsr ) {
521 copy_fpu_fxsave( tsk, fpu );
522 } else {
523 copy_fpu_fsave( tsk, fpu );
524 }
525 }
526
527 return fpvalid;
528}
529
530int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
531{
532 int fpvalid = !!tsk_used_math(tsk);
533
534 if (fpvalid) {
535 if (tsk == current)
536 unlazy_fpu(tsk);
537 if (cpu_has_fxsr)
538 copy_fpu_fxsave(tsk, fpu);
539 else
540 copy_fpu_fsave(tsk, fpu);
541 }
542 return fpvalid;
543}
544
545int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu)
546{
547 int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
548
549 if (fpvalid) {
550 if (tsk == current)
551 unlazy_fpu(tsk);
552 memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu));
553 }
554 return fpvalid;
555}