diff options
author | Roland McGrath <roland@redhat.com> | 2008-01-30 07:31:51 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:51 -0500 |
commit | 1eeaed7679eab3666d2d8c964d060c2169b3813b (patch) | |
tree | 2c2904e4ee8d75f30d00dd2367b12d37a5e3e55b /include/asm-x86/i387.h | |
parent | 4421011120b2304e5c248ae4165a2704588aedf1 (diff) |
x86: x86 i387 cleanup
This removes all the old code that is no longer used after
the i387 unification and cleanup. The i387_64.h is renamed
to i387.h with no changes, but since it replaces the nonempty
one-line stub i387.h it looks like a big diff and not a rename.
Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/i387.h')
-rw-r--r-- | include/asm-x86/i387.h | 375 |
1 files changed, 374 insertions, 1 deletions
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index b2bc0050ce99..de435b9114df 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -1 +1,374 @@ | |||
1 | #include "i387_64.h" | 1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | ||
3 | * | ||
4 | * Pentium III FXSR, SSE support | ||
5 | * General FPU state handling cleanups | ||
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
7 | * x86-64 work by Andi Kleen 2002 | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_X86_I387_H | ||
11 | #define _ASM_X86_I387_H | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/regset.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/sigcontext.h> | ||
18 | #include <asm/user.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | |||
21 | extern void fpu_init(void); | ||
22 | extern unsigned int mxcsr_feature_mask; | ||
23 | extern void mxcsr_feature_mask_init(void); | ||
24 | extern void init_fpu(struct task_struct *child); | ||
25 | extern asmlinkage void math_state_restore(void); | ||
26 | |||
27 | extern user_regset_active_fn fpregs_active, xfpregs_active; | ||
28 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | ||
29 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | ||
30 | |||
31 | #ifdef CONFIG_IA32_EMULATION | ||
32 | struct _fpstate_ia32; | ||
33 | extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); | ||
34 | extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); | ||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_X86_64 | ||
38 | |||
39 | /* Ignore delayed exceptions from user space */ | ||
40 | static inline void tolerant_fwait(void) | ||
41 | { | ||
42 | asm volatile("1: fwait\n" | ||
43 | "2:\n" | ||
44 | " .section __ex_table,\"a\"\n" | ||
45 | " .align 8\n" | ||
46 | " .quad 1b,2b\n" | ||
47 | " .previous\n"); | ||
48 | } | ||
49 | |||
50 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | ||
51 | { | ||
52 | int err; | ||
53 | |||
54 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | ||
55 | "2:\n" | ||
56 | ".section .fixup,\"ax\"\n" | ||
57 | "3: movl $-1,%[err]\n" | ||
58 | " jmp 2b\n" | ||
59 | ".previous\n" | ||
60 | ".section __ex_table,\"a\"\n" | ||
61 | " .align 8\n" | ||
62 | " .quad 1b,3b\n" | ||
63 | ".previous" | ||
64 | : [err] "=r" (err) | ||
65 | #if 0 /* See comment in __save_init_fpu() below. */ | ||
66 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
67 | #else | ||
68 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
69 | #endif | ||
70 | if (unlikely(err)) | ||
71 | init_fpu(current); | ||
72 | return err; | ||
73 | } | ||
74 | |||
75 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
76 | |||
77 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
78 | is pending. Clear the x87 state here by setting it to fixed | ||
79 | values. The kernel data segment can be sometimes 0 and sometimes | ||
80 | new user value. Both should be ok. | ||
81 | Use the PDA as safe address because it should be already in L1. */ | ||
82 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | ||
83 | { | ||
84 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
85 | asm volatile("fnclex"); | ||
86 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
87 | " emms\n" /* clear stack tags */ | ||
88 | " fildl %%gs:0", /* load to clear state */ | ||
89 | X86_FEATURE_FXSAVE_LEAK); | ||
90 | } | ||
91 | |||
92 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | ||
93 | { | ||
94 | int err; | ||
95 | |||
96 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | ||
97 | "2:\n" | ||
98 | ".section .fixup,\"ax\"\n" | ||
99 | "3: movl $-1,%[err]\n" | ||
100 | " jmp 2b\n" | ||
101 | ".previous\n" | ||
102 | ".section __ex_table,\"a\"\n" | ||
103 | " .align 8\n" | ||
104 | " .quad 1b,3b\n" | ||
105 | ".previous" | ||
106 | : [err] "=r" (err), "=m" (*fx) | ||
107 | #if 0 /* See comment in __fxsave_clear() below. */ | ||
108 | : [fx] "r" (fx), "0" (0)); | ||
109 | #else | ||
110 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
111 | #endif | ||
112 | if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
113 | err = -EFAULT; | ||
114 | /* No need to clear here because the caller clears USED_MATH */ | ||
115 | return err; | ||
116 | } | ||
117 | |||
118 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
119 | { | ||
120 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | ||
121 | uses any extended registers for addressing, a second REX prefix | ||
122 | will be generated (to the assembler, rex64 followed by semicolon | ||
123 | is a separate instruction), and hence the 64-bitness is lost. */ | ||
124 | #if 0 | ||
125 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | ||
126 | starting with gas 2.16. */ | ||
127 | __asm__ __volatile__("fxsaveq %0" | ||
128 | : "=m" (tsk->thread.i387.fxsave)); | ||
129 | #elif 0 | ||
130 | /* Using, as a workaround, the properly prefixed form below isn't | ||
131 | accepted by any binutils version so far released, complaining that | ||
132 | the same type of prefix is used twice if an extended register is | ||
133 | needed for addressing (fix submitted to mainline 2005-11-21). */ | ||
134 | __asm__ __volatile__("rex64/fxsave %0" | ||
135 | : "=m" (tsk->thread.i387.fxsave)); | ||
136 | #else | ||
137 | /* This, however, we can work around by forcing the compiler to select | ||
138 | an addressing mode that doesn't require extended registers. */ | ||
139 | __asm__ __volatile__("rex64/fxsave %P2(%1)" | ||
140 | : "=m" (tsk->thread.i387.fxsave) | ||
141 | : "cdaSDb" (tsk), | ||
142 | "i" (offsetof(__typeof__(*tsk), | ||
143 | thread.i387.fxsave))); | ||
144 | #endif | ||
145 | clear_fpu_state(&tsk->thread.i387.fxsave); | ||
146 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Signal frame handlers. | ||
151 | */ | ||
152 | |||
153 | static inline int save_i387(struct _fpstate __user *buf) | ||
154 | { | ||
155 | struct task_struct *tsk = current; | ||
156 | int err = 0; | ||
157 | |||
158 | BUILD_BUG_ON(sizeof(struct user_i387_struct) != | ||
159 | sizeof(tsk->thread.i387.fxsave)); | ||
160 | |||
161 | if ((unsigned long)buf % 16) | ||
162 | printk("save_i387: bad fpstate %p\n", buf); | ||
163 | |||
164 | if (!used_math()) | ||
165 | return 0; | ||
166 | clear_used_math(); /* trigger finit */ | ||
167 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
168 | err = save_i387_checking((struct i387_fxsave_struct __user *)buf); | ||
169 | if (err) return err; | ||
170 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
171 | stts(); | ||
172 | } else { | ||
173 | if (__copy_to_user(buf, &tsk->thread.i387.fxsave, | ||
174 | sizeof(struct i387_fxsave_struct))) | ||
175 | return -1; | ||
176 | } | ||
177 | return 1; | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * This restores directly out of user space. Exceptions are handled. | ||
182 | */ | ||
183 | static inline int restore_i387(struct _fpstate __user *buf) | ||
184 | { | ||
185 | set_used_math(); | ||
186 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
187 | clts(); | ||
188 | task_thread_info(current)->status |= TS_USEDFPU; | ||
189 | } | ||
190 | return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
191 | } | ||
192 | |||
193 | #else /* CONFIG_X86_32 */ | ||
194 | |||
195 | static inline void tolerant_fwait(void) | ||
196 | { | ||
197 | asm volatile("fnclex ; fwait"); | ||
198 | } | ||
199 | |||
200 | static inline void restore_fpu(struct task_struct *tsk) | ||
201 | { | ||
202 | /* | ||
203 | * The "nop" is needed to make the instructions the same | ||
204 | * length. | ||
205 | */ | ||
206 | alternative_input( | ||
207 | "nop ; frstor %1", | ||
208 | "fxrstor %1", | ||
209 | X86_FEATURE_FXSR, | ||
210 | "m" ((tsk)->thread.i387.fxsave)); | ||
211 | } | ||
212 | |||
213 | /* We need a safe address that is cheap to find and that is already | ||
214 | in L1 during context switch. The best choices are unfortunately | ||
215 | different for UP and SMP */ | ||
216 | #ifdef CONFIG_SMP | ||
217 | #define safe_address (__per_cpu_offset[0]) | ||
218 | #else | ||
219 | #define safe_address (kstat_cpu(0).cpustat.user) | ||
220 | #endif | ||
221 | |||
222 | /* | ||
223 | * These must be called with preempt disabled | ||
224 | */ | ||
225 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
226 | { | ||
227 | /* Use more nops than strictly needed in case the compiler | ||
228 | varies code */ | ||
229 | alternative_input( | ||
230 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
231 | "fxsave %[fx]\n" | ||
232 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
233 | X86_FEATURE_FXSR, | ||
234 | [fx] "m" (tsk->thread.i387.fxsave), | ||
235 | [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); | ||
236 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
237 | is pending. Clear the x87 state here by setting it to fixed | ||
238 | values. safe_address is a random variable that should be in L1 */ | ||
239 | alternative_input( | ||
240 | GENERIC_NOP8 GENERIC_NOP2, | ||
241 | "emms\n\t" /* clear stack tags */ | ||
242 | "fildl %[addr]", /* set F?P to defined value */ | ||
243 | X86_FEATURE_FXSAVE_LEAK, | ||
244 | [addr] "m" (safe_address)); | ||
245 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Signal frame handlers... | ||
250 | */ | ||
251 | extern int save_i387(struct _fpstate __user *buf); | ||
252 | extern int restore_i387(struct _fpstate __user *buf); | ||
253 | |||
254 | #endif /* CONFIG_X86_64 */ | ||
255 | |||
256 | static inline void __unlazy_fpu(struct task_struct *tsk) | ||
257 | { | ||
258 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
259 | __save_init_fpu(tsk); | ||
260 | stts(); | ||
261 | } else | ||
262 | tsk->fpu_counter = 0; | ||
263 | } | ||
264 | |||
265 | static inline void __clear_fpu(struct task_struct *tsk) | ||
266 | { | ||
267 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
268 | tolerant_fwait(); | ||
269 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
270 | stts(); | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static inline void kernel_fpu_begin(void) | ||
275 | { | ||
276 | struct thread_info *me = current_thread_info(); | ||
277 | preempt_disable(); | ||
278 | if (me->status & TS_USEDFPU) | ||
279 | __save_init_fpu(me->task); | ||
280 | else | ||
281 | clts(); | ||
282 | } | ||
283 | |||
284 | static inline void kernel_fpu_end(void) | ||
285 | { | ||
286 | stts(); | ||
287 | preempt_enable(); | ||
288 | } | ||
289 | |||
290 | #ifdef CONFIG_X86_64 | ||
291 | |||
292 | static inline void save_init_fpu(struct task_struct *tsk) | ||
293 | { | ||
294 | __save_init_fpu(tsk); | ||
295 | stts(); | ||
296 | } | ||
297 | |||
298 | #define unlazy_fpu __unlazy_fpu | ||
299 | #define clear_fpu __clear_fpu | ||
300 | |||
301 | #else /* CONFIG_X86_32 */ | ||
302 | |||
303 | /* | ||
304 | * These disable preemption on their own and are safe | ||
305 | */ | ||
306 | static inline void save_init_fpu(struct task_struct *tsk) | ||
307 | { | ||
308 | preempt_disable(); | ||
309 | __save_init_fpu(tsk); | ||
310 | stts(); | ||
311 | preempt_enable(); | ||
312 | } | ||
313 | |||
314 | static inline void unlazy_fpu(struct task_struct *tsk) | ||
315 | { | ||
316 | preempt_disable(); | ||
317 | __unlazy_fpu(tsk); | ||
318 | preempt_enable(); | ||
319 | } | ||
320 | |||
321 | static inline void clear_fpu(struct task_struct *tsk) | ||
322 | { | ||
323 | preempt_disable(); | ||
324 | __clear_fpu(tsk); | ||
325 | preempt_enable(); | ||
326 | } | ||
327 | |||
328 | #endif /* CONFIG_X86_64 */ | ||
329 | |||
330 | /* | ||
331 | * ptrace request handlers... | ||
332 | */ | ||
333 | extern int get_fpregs(struct user_i387_struct __user *buf, | ||
334 | struct task_struct *tsk); | ||
335 | extern int set_fpregs(struct task_struct *tsk, | ||
336 | struct user_i387_struct __user *buf); | ||
337 | |||
338 | struct user_fxsr_struct; | ||
339 | extern int get_fpxregs(struct user_fxsr_struct __user *buf, | ||
340 | struct task_struct *tsk); | ||
341 | extern int set_fpxregs(struct task_struct *tsk, | ||
342 | struct user_fxsr_struct __user *buf); | ||
343 | |||
344 | /* | ||
345 | * i387 state interaction | ||
346 | */ | ||
347 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | ||
348 | { | ||
349 | if (cpu_has_fxsr) { | ||
350 | return tsk->thread.i387.fxsave.cwd; | ||
351 | } else { | ||
352 | return (unsigned short)tsk->thread.i387.fsave.cwd; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | ||
357 | { | ||
358 | if (cpu_has_fxsr) { | ||
359 | return tsk->thread.i387.fxsave.swd; | ||
360 | } else { | ||
361 | return (unsigned short)tsk->thread.i387.fsave.swd; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | ||
366 | { | ||
367 | if (cpu_has_xmm) { | ||
368 | return tsk->thread.i387.fxsave.mxcsr; | ||
369 | } else { | ||
370 | return MXCSR_DEFAULT; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | #endif /* _ASM_X86_I387_H */ | ||