diff options
Diffstat (limited to 'arch/x86/include/asm/i387.h')
-rw-r--r-- | arch/x86/include/asm/i387.h | 590 |
1 files changed, 10 insertions, 580 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 247904945d3f..7ce0798b1b26 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -13,476 +13,19 @@ | |||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
14 | 14 | ||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/kernel_stat.h> | ||
17 | #include <linux/regset.h> | ||
18 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
19 | #include <linux/slab.h> | 17 | #include <asm/system.h> |
20 | #include <asm/asm.h> | 18 | |
21 | #include <asm/cpufeature.h> | 19 | struct pt_regs; |
22 | #include <asm/processor.h> | 20 | struct user_i387_struct; |
23 | #include <asm/sigcontext.h> | ||
24 | #include <asm/user.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/xsave.h> | ||
27 | 21 | ||
28 | extern unsigned int sig_xstate_size; | ||
29 | extern void fpu_init(void); | ||
30 | extern void mxcsr_feature_mask_init(void); | ||
31 | extern int init_fpu(struct task_struct *child); | 22 | extern int init_fpu(struct task_struct *child); |
32 | extern void math_state_restore(void); | ||
33 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 23 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
24 | extern void math_state_restore(void); | ||
34 | 25 | ||
35 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | 26 | extern bool irq_fpu_usable(void); |
36 | 27 | extern void kernel_fpu_begin(void); | |
37 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 28 | extern void kernel_fpu_end(void); |
38 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | ||
39 | xstateregs_get; | ||
40 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | ||
41 | xstateregs_set; | ||
42 | |||
43 | /* | ||
44 | * xstateregs_active == fpregs_active. Please refer to the comment | ||
45 | * at the definition of fpregs_active. | ||
46 | */ | ||
47 | #define xstateregs_active fpregs_active | ||
48 | |||
49 | extern struct _fpx_sw_bytes fx_sw_reserved; | ||
50 | #ifdef CONFIG_IA32_EMULATION | ||
51 | extern unsigned int sig_xstate_ia32_size; | ||
52 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
53 | struct _fpstate_ia32; | ||
54 | struct _xstate_ia32; | ||
55 | extern int save_i387_xstate_ia32(void __user *buf); | ||
56 | extern int restore_i387_xstate_ia32(void __user *buf); | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_MATH_EMULATION | ||
60 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
61 | #else | ||
62 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
63 | #endif | ||
64 | |||
65 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
66 | |||
67 | static __always_inline __pure bool use_xsaveopt(void) | ||
68 | { | ||
69 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | ||
70 | } | ||
71 | |||
72 | static __always_inline __pure bool use_xsave(void) | ||
73 | { | ||
74 | return static_cpu_has(X86_FEATURE_XSAVE); | ||
75 | } | ||
76 | |||
77 | static __always_inline __pure bool use_fxsr(void) | ||
78 | { | ||
79 | return static_cpu_has(X86_FEATURE_FXSR); | ||
80 | } | ||
81 | |||
82 | extern void __sanitize_i387_state(struct task_struct *); | ||
83 | |||
84 | static inline void sanitize_i387_state(struct task_struct *tsk) | ||
85 | { | ||
86 | if (!use_xsaveopt()) | ||
87 | return; | ||
88 | __sanitize_i387_state(tsk); | ||
89 | } | ||
90 | |||
91 | #ifdef CONFIG_X86_64 | ||
92 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | ||
93 | { | ||
94 | int err; | ||
95 | |||
96 | /* See comment in fxsave() below. */ | ||
97 | #ifdef CONFIG_AS_FXSAVEQ | ||
98 | asm volatile("1: fxrstorq %[fx]\n\t" | ||
99 | "2:\n" | ||
100 | ".section .fixup,\"ax\"\n" | ||
101 | "3: movl $-1,%[err]\n" | ||
102 | " jmp 2b\n" | ||
103 | ".previous\n" | ||
104 | _ASM_EXTABLE(1b, 3b) | ||
105 | : [err] "=r" (err) | ||
106 | : [fx] "m" (*fx), "0" (0)); | ||
107 | #else | ||
108 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | ||
109 | "2:\n" | ||
110 | ".section .fixup,\"ax\"\n" | ||
111 | "3: movl $-1,%[err]\n" | ||
112 | " jmp 2b\n" | ||
113 | ".previous\n" | ||
114 | _ASM_EXTABLE(1b, 3b) | ||
115 | : [err] "=r" (err) | ||
116 | : [fx] "R" (fx), "m" (*fx), "0" (0)); | ||
117 | #endif | ||
118 | return err; | ||
119 | } | ||
120 | |||
121 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | ||
122 | { | ||
123 | int err; | ||
124 | |||
125 | /* | ||
126 | * Clear the bytes not touched by the fxsave and reserved | ||
127 | * for the SW usage. | ||
128 | */ | ||
129 | err = __clear_user(&fx->sw_reserved, | ||
130 | sizeof(struct _fpx_sw_bytes)); | ||
131 | if (unlikely(err)) | ||
132 | return -EFAULT; | ||
133 | |||
134 | /* See comment in fxsave() below. */ | ||
135 | #ifdef CONFIG_AS_FXSAVEQ | ||
136 | asm volatile("1: fxsaveq %[fx]\n\t" | ||
137 | "2:\n" | ||
138 | ".section .fixup,\"ax\"\n" | ||
139 | "3: movl $-1,%[err]\n" | ||
140 | " jmp 2b\n" | ||
141 | ".previous\n" | ||
142 | _ASM_EXTABLE(1b, 3b) | ||
143 | : [err] "=r" (err), [fx] "=m" (*fx) | ||
144 | : "0" (0)); | ||
145 | #else | ||
146 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | ||
147 | "2:\n" | ||
148 | ".section .fixup,\"ax\"\n" | ||
149 | "3: movl $-1,%[err]\n" | ||
150 | " jmp 2b\n" | ||
151 | ".previous\n" | ||
152 | _ASM_EXTABLE(1b, 3b) | ||
153 | : [err] "=r" (err), "=m" (*fx) | ||
154 | : [fx] "R" (fx), "0" (0)); | ||
155 | #endif | ||
156 | if (unlikely(err) && | ||
157 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
158 | err = -EFAULT; | ||
159 | /* No need to clear here because the caller clears USED_MATH */ | ||
160 | return err; | ||
161 | } | ||
162 | |||
163 | static inline void fpu_fxsave(struct fpu *fpu) | ||
164 | { | ||
165 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | ||
166 | uses any extended registers for addressing, a second REX prefix | ||
167 | will be generated (to the assembler, rex64 followed by semicolon | ||
168 | is a separate instruction), and hence the 64-bitness is lost. */ | ||
169 | |||
170 | #ifdef CONFIG_AS_FXSAVEQ | ||
171 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | ||
172 | starting with gas 2.16. */ | ||
173 | __asm__ __volatile__("fxsaveq %0" | ||
174 | : "=m" (fpu->state->fxsave)); | ||
175 | #else | ||
176 | /* Using, as a workaround, the properly prefixed form below isn't | ||
177 | accepted by any binutils version so far released, complaining that | ||
178 | the same type of prefix is used twice if an extended register is | ||
179 | needed for addressing (fix submitted to mainline 2005-11-21). | ||
180 | asm volatile("rex64/fxsave %0" | ||
181 | : "=m" (fpu->state->fxsave)); | ||
182 | This, however, we can work around by forcing the compiler to select | ||
183 | an addressing mode that doesn't require extended registers. */ | ||
184 | asm volatile("rex64/fxsave (%[fx])" | ||
185 | : "=m" (fpu->state->fxsave) | ||
186 | : [fx] "R" (&fpu->state->fxsave)); | ||
187 | #endif | ||
188 | } | ||
189 | |||
190 | #else /* CONFIG_X86_32 */ | ||
191 | |||
192 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | ||
193 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | ||
194 | { | ||
195 | /* | ||
196 | * The "nop" is needed to make the instructions the same | ||
197 | * length. | ||
198 | */ | ||
199 | alternative_input( | ||
200 | "nop ; frstor %1", | ||
201 | "fxrstor %1", | ||
202 | X86_FEATURE_FXSR, | ||
203 | "m" (*fx)); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static inline void fpu_fxsave(struct fpu *fpu) | ||
209 | { | ||
210 | asm volatile("fxsave %[fx]" | ||
211 | : [fx] "=m" (fpu->state->fxsave)); | ||
212 | } | ||
213 | |||
214 | #endif /* CONFIG_X86_64 */ | ||
215 | |||
216 | /* | ||
217 | * These must be called with preempt disabled. Returns | ||
218 | * 'true' if the FPU state is still intact. | ||
219 | */ | ||
220 | static inline int fpu_save_init(struct fpu *fpu) | ||
221 | { | ||
222 | if (use_xsave()) { | ||
223 | fpu_xsave(fpu); | ||
224 | |||
225 | /* | ||
226 | * xsave header may indicate the init state of the FP. | ||
227 | */ | ||
228 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | ||
229 | return 1; | ||
230 | } else if (use_fxsr()) { | ||
231 | fpu_fxsave(fpu); | ||
232 | } else { | ||
233 | asm volatile("fnsave %[fx]; fwait" | ||
234 | : [fx] "=m" (fpu->state->fsave)); | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * If exceptions are pending, we need to clear them so | ||
240 | * that we don't randomly get exceptions later. | ||
241 | * | ||
242 | * FIXME! Is this perhaps only true for the old-style | ||
243 | * irq13 case? Maybe we could leave the x87 state | ||
244 | * intact otherwise? | ||
245 | */ | ||
246 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | ||
247 | asm volatile("fnclex"); | ||
248 | return 0; | ||
249 | } | ||
250 | return 1; | ||
251 | } | ||
252 | |||
253 | static inline int __save_init_fpu(struct task_struct *tsk) | ||
254 | { | ||
255 | return fpu_save_init(&tsk->thread.fpu); | ||
256 | } | ||
257 | |||
258 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | ||
259 | { | ||
260 | return fxrstor_checking(&fpu->state->fxsave); | ||
261 | } | ||
262 | |||
263 | static inline int fpu_restore_checking(struct fpu *fpu) | ||
264 | { | ||
265 | if (use_xsave()) | ||
266 | return fpu_xrstor_checking(fpu); | ||
267 | else | ||
268 | return fpu_fxrstor_checking(fpu); | ||
269 | } | ||
270 | |||
271 | static inline int restore_fpu_checking(struct task_struct *tsk) | ||
272 | { | ||
273 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
274 | is pending. Clear the x87 state here by setting it to fixed | ||
275 | values. "m" is a random variable that should be in L1 */ | ||
276 | alternative_input( | ||
277 | ASM_NOP8 ASM_NOP2, | ||
278 | "emms\n\t" /* clear stack tags */ | ||
279 | "fildl %P[addr]", /* set F?P to defined value */ | ||
280 | X86_FEATURE_FXSAVE_LEAK, | ||
281 | [addr] "m" (tsk->thread.fpu.has_fpu)); | ||
282 | |||
283 | return fpu_restore_checking(&tsk->thread.fpu); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Software FPU state helpers. Careful: these need to | ||
288 | * be preemption protection *and* they need to be | ||
289 | * properly paired with the CR0.TS changes! | ||
290 | */ | ||
291 | static inline int __thread_has_fpu(struct task_struct *tsk) | ||
292 | { | ||
293 | return tsk->thread.fpu.has_fpu; | ||
294 | } | ||
295 | |||
296 | /* Must be paired with an 'stts' after! */ | ||
297 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | ||
298 | { | ||
299 | tsk->thread.fpu.has_fpu = 0; | ||
300 | percpu_write(fpu_owner_task, NULL); | ||
301 | } | ||
302 | |||
303 | /* Must be paired with a 'clts' before! */ | ||
304 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | ||
305 | { | ||
306 | tsk->thread.fpu.has_fpu = 1; | ||
307 | percpu_write(fpu_owner_task, tsk); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Encapsulate the CR0.TS handling together with the | ||
312 | * software flag. | ||
313 | * | ||
314 | * These generally need preemption protection to work, | ||
315 | * do try to avoid using these on their own. | ||
316 | */ | ||
317 | static inline void __thread_fpu_end(struct task_struct *tsk) | ||
318 | { | ||
319 | __thread_clear_has_fpu(tsk); | ||
320 | stts(); | ||
321 | } | ||
322 | |||
323 | static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
324 | { | ||
325 | clts(); | ||
326 | __thread_set_has_fpu(tsk); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * FPU state switching for scheduling. | ||
331 | * | ||
332 | * This is a two-stage process: | ||
333 | * | ||
334 | * - switch_fpu_prepare() saves the old state and | ||
335 | * sets the new state of the CR0.TS bit. This is | ||
336 | * done within the context of the old process. | ||
337 | * | ||
338 | * - switch_fpu_finish() restores the new state as | ||
339 | * necessary. | ||
340 | */ | ||
341 | typedef struct { int preload; } fpu_switch_t; | ||
342 | |||
343 | /* | ||
344 | * FIXME! We could do a totally lazy restore, but we need to | ||
345 | * add a per-cpu "this was the task that last touched the FPU | ||
346 | * on this CPU" variable, and the task needs to have a "I last | ||
347 | * touched the FPU on this CPU" and check them. | ||
348 | * | ||
349 | * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
350 | * false, but some day.. | ||
351 | */ | ||
352 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | ||
353 | { | ||
354 | return new == percpu_read_stable(fpu_owner_task) && | ||
355 | cpu == new->thread.fpu.last_cpu; | ||
356 | } | ||
357 | |||
358 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | ||
359 | { | ||
360 | fpu_switch_t fpu; | ||
361 | |||
362 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | ||
363 | if (__thread_has_fpu(old)) { | ||
364 | if (!__save_init_fpu(old)) | ||
365 | cpu = ~0; | ||
366 | old->thread.fpu.last_cpu = cpu; | ||
367 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | ||
368 | |||
369 | /* Don't change CR0.TS if we just switch! */ | ||
370 | if (fpu.preload) { | ||
371 | new->fpu_counter++; | ||
372 | __thread_set_has_fpu(new); | ||
373 | prefetch(new->thread.fpu.state); | ||
374 | } else | ||
375 | stts(); | ||
376 | } else { | ||
377 | old->fpu_counter = 0; | ||
378 | old->thread.fpu.last_cpu = ~0; | ||
379 | if (fpu.preload) { | ||
380 | new->fpu_counter++; | ||
381 | if (fpu_lazy_restore(new, cpu)) | ||
382 | fpu.preload = 0; | ||
383 | else | ||
384 | prefetch(new->thread.fpu.state); | ||
385 | __thread_fpu_begin(new); | ||
386 | } | ||
387 | } | ||
388 | return fpu; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * By the time this gets called, we've already cleared CR0.TS and | ||
393 | * given the process the FPU if we are going to preload the FPU | ||
394 | * state - all we need to do is to conditionally restore the register | ||
395 | * state itself. | ||
396 | */ | ||
397 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | ||
398 | { | ||
399 | if (fpu.preload) { | ||
400 | if (unlikely(restore_fpu_checking(new))) | ||
401 | __thread_fpu_end(new); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Signal frame handlers... | ||
407 | */ | ||
408 | extern int save_i387_xstate(void __user *buf); | ||
409 | extern int restore_i387_xstate(void __user *buf); | ||
410 | |||
411 | static inline void __clear_fpu(struct task_struct *tsk) | ||
412 | { | ||
413 | if (__thread_has_fpu(tsk)) { | ||
414 | /* Ignore delayed exceptions from user space */ | ||
415 | asm volatile("1: fwait\n" | ||
416 | "2:\n" | ||
417 | _ASM_EXTABLE(1b, 2b)); | ||
418 | __thread_fpu_end(tsk); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Were we in an interrupt that interrupted kernel mode? | ||
424 | * | ||
425 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | ||
426 | * pair does nothing at all: the thread must not have fpu (so | ||
427 | * that we don't try to save the FPU state), and TS must | ||
428 | * be set (so that the clts/stts pair does nothing that is | ||
429 | * visible in the interrupted kernel thread). | ||
430 | */ | ||
431 | static inline bool interrupted_kernel_fpu_idle(void) | ||
432 | { | ||
433 | return !__thread_has_fpu(current) && | ||
434 | (read_cr0() & X86_CR0_TS); | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Were we in user mode (or vm86 mode) when we were | ||
439 | * interrupted? | ||
440 | * | ||
441 | * Doing kernel_fpu_begin/end() is ok if we are running | ||
442 | * in an interrupt context from user mode - we'll just | ||
443 | * save the FPU state as required. | ||
444 | */ | ||
445 | static inline bool interrupted_user_mode(void) | ||
446 | { | ||
447 | struct pt_regs *regs = get_irq_regs(); | ||
448 | return regs && user_mode_vm(regs); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Can we use the FPU in kernel mode with the | ||
453 | * whole "kernel_fpu_begin/end()" sequence? | ||
454 | * | ||
455 | * It's always ok in process context (ie "not interrupt") | ||
456 | * but it is sometimes ok even from an irq. | ||
457 | */ | ||
458 | static inline bool irq_fpu_usable(void) | ||
459 | { | ||
460 | return !in_interrupt() || | ||
461 | interrupted_user_mode() || | ||
462 | interrupted_kernel_fpu_idle(); | ||
463 | } | ||
464 | |||
465 | static inline void kernel_fpu_begin(void) | ||
466 | { | ||
467 | struct task_struct *me = current; | ||
468 | |||
469 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
470 | preempt_disable(); | ||
471 | if (__thread_has_fpu(me)) { | ||
472 | __save_init_fpu(me); | ||
473 | __thread_clear_has_fpu(me); | ||
474 | /* We do 'stts()' in kernel_fpu_end() */ | ||
475 | } else { | ||
476 | percpu_write(fpu_owner_task, NULL); | ||
477 | clts(); | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static inline void kernel_fpu_end(void) | ||
482 | { | ||
483 | stts(); | ||
484 | preempt_enable(); | ||
485 | } | ||
486 | 29 | ||
487 | /* | 30 | /* |
488 | * Some instructions like VIA's padlock instructions generate a spurious | 31 | * Some instructions like VIA's padlock instructions generate a spurious |
@@ -524,126 +67,13 @@ static inline void irq_ts_restore(int TS_state) | |||
524 | * we can just assume we have FPU access - typically | 67 | * we can just assume we have FPU access - typically |
525 | * to save the FP state - we'll just take a #NM | 68 | * to save the FP state - we'll just take a #NM |
526 | * fault and get the FPU access back. | 69 | * fault and get the FPU access back. |
527 | * | ||
528 | * The actual user_fpu_begin/end() functions | ||
529 | * need to be preemption-safe, though. | ||
530 | * | ||
531 | * NOTE! user_fpu_end() must be used only after you | ||
532 | * have saved the FP state, and user_fpu_begin() must | ||
533 | * be used only immediately before restoring it. | ||
534 | * These functions do not do any save/restore on | ||
535 | * their own. | ||
536 | */ | 70 | */ |
537 | static inline int user_has_fpu(void) | 71 | static inline int user_has_fpu(void) |
538 | { | 72 | { |
539 | return __thread_has_fpu(current); | 73 | return current->thread.fpu.has_fpu; |
540 | } | ||
541 | |||
542 | static inline void user_fpu_end(void) | ||
543 | { | ||
544 | preempt_disable(); | ||
545 | __thread_fpu_end(current); | ||
546 | preempt_enable(); | ||
547 | } | ||
548 | |||
549 | static inline void user_fpu_begin(void) | ||
550 | { | ||
551 | preempt_disable(); | ||
552 | if (!user_has_fpu()) | ||
553 | __thread_fpu_begin(current); | ||
554 | preempt_enable(); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * These disable preemption on their own and are safe | ||
559 | */ | ||
560 | static inline void save_init_fpu(struct task_struct *tsk) | ||
561 | { | ||
562 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | ||
563 | preempt_disable(); | ||
564 | __save_init_fpu(tsk); | ||
565 | __thread_fpu_end(tsk); | ||
566 | preempt_enable(); | ||
567 | } | ||
568 | |||
569 | static inline void unlazy_fpu(struct task_struct *tsk) | ||
570 | { | ||
571 | preempt_disable(); | ||
572 | if (__thread_has_fpu(tsk)) { | ||
573 | __save_init_fpu(tsk); | ||
574 | __thread_fpu_end(tsk); | ||
575 | } else | ||
576 | tsk->fpu_counter = 0; | ||
577 | preempt_enable(); | ||
578 | } | ||
579 | |||
580 | static inline void clear_fpu(struct task_struct *tsk) | ||
581 | { | ||
582 | preempt_disable(); | ||
583 | __clear_fpu(tsk); | ||
584 | preempt_enable(); | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * i387 state interaction | ||
589 | */ | ||
590 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | ||
591 | { | ||
592 | if (cpu_has_fxsr) { | ||
593 | return tsk->thread.fpu.state->fxsave.cwd; | ||
594 | } else { | ||
595 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | ||
600 | { | ||
601 | if (cpu_has_fxsr) { | ||
602 | return tsk->thread.fpu.state->fxsave.swd; | ||
603 | } else { | ||
604 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | ||
605 | } | ||
606 | } | ||
607 | |||
608 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | ||
609 | { | ||
610 | if (cpu_has_xmm) { | ||
611 | return tsk->thread.fpu.state->fxsave.mxcsr; | ||
612 | } else { | ||
613 | return MXCSR_DEFAULT; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static bool fpu_allocated(struct fpu *fpu) | ||
618 | { | ||
619 | return fpu->state != NULL; | ||
620 | } | ||
621 | |||
622 | static inline int fpu_alloc(struct fpu *fpu) | ||
623 | { | ||
624 | if (fpu_allocated(fpu)) | ||
625 | return 0; | ||
626 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | ||
627 | if (!fpu->state) | ||
628 | return -ENOMEM; | ||
629 | WARN_ON((unsigned long)fpu->state & 15); | ||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | static inline void fpu_free(struct fpu *fpu) | ||
634 | { | ||
635 | if (fpu->state) { | ||
636 | kmem_cache_free(task_xstate_cachep, fpu->state); | ||
637 | fpu->state = NULL; | ||
638 | } | ||
639 | } | ||
640 | |||
641 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | ||
642 | { | ||
643 | memcpy(dst->state, src->state, xstate_size); | ||
644 | } | 74 | } |
645 | 75 | ||
646 | extern void fpu_finit(struct fpu *fpu); | 76 | extern void unlazy_fpu(struct task_struct *tsk); |
647 | 77 | ||
648 | #endif /* __ASSEMBLY__ */ | 78 | #endif /* __ASSEMBLY__ */ |
649 | 79 | ||