aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:58:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:58:16 -0400
commit41d59102e146a4423a490b8eca68a5860af4fe1c (patch)
tree739ed4113ccdaeb33d1723a6beab09c1e18d7048 /arch
parent3e1dd193edefd2a806a0ba6cf0879cf1a95217da (diff)
parentc9775b4cc522e5f1b40b1366a993f0f05f600f39 (diff)
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, fpu: Use static_cpu_has() to implement use_xsave() x86: Add new static_cpu_has() function using alternatives x86, fpu: Use the proper asm constraint in use_xsave() x86, fpu: Unbreak FPU emulation x86: Introduce 'struct fpu' and related API x86: Eliminate TS_XSAVE x86-32: Don't set ignore_fpu_irq in simd exception x86: Merge kernel_math_error() into math_error() x86: Merge simd_math_error() into math_error() x86-32: Rework cache flush denied handler Fix trivial conflict in arch/x86/kernel/process.c
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/include/asm/cpufeature.h57
-rw-r--r--arch/x86/include/asm/i387.h129
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/xsave.h7
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/entry_32.S19
-rw-r--r--arch/x86/kernel/i387.c107
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/process.c20
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/traps.c173
-rw-r--r--arch/x86/kernel/xsave.c8
-rw-r--r--arch/x86/math-emu/fpu_aux.c6
-rw-r--r--arch/x86/math-emu/fpu_entry.c4
-rw-r--r--arch/x86/math-emu/fpu_system.h2
19 files changed, 308 insertions, 248 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 918fbb1855cc..2ac9069890cd 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -338,6 +338,10 @@ config X86_F00F_BUG
338 def_bool y 338 def_bool y
339 depends on M586MMX || M586TSC || M586 || M486 || M386 339 depends on M586MMX || M586TSC || M586 || M486 || M386
340 340
341config X86_INVD_BUG
342 def_bool y
343 depends on M486 || M386
344
341config X86_WP_WORKS_OK 345config X86_WP_WORKS_OK
342 def_bool y 346 def_bool y
343 depends on !M386 347 depends on !M386
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 630e623f61e0..dca9c545f44e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -176,6 +176,7 @@
176 176
177#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 177#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
178 178
179#include <asm/asm.h>
179#include <linux/bitops.h> 180#include <linux/bitops.h>
180 181
181extern const char * const x86_cap_flags[NCAPINTS*32]; 182extern const char * const x86_cap_flags[NCAPINTS*32];
@@ -284,6 +285,62 @@ extern const char * const x86_power_flags[32];
284 285
285#endif /* CONFIG_X86_64 */ 286#endif /* CONFIG_X86_64 */
286 287
288/*
289 * Static testing of CPU features. Used the same as boot_cpu_has().
290 * These are only valid after alternatives have run, but will statically
291 * patch the target code for additional performance.
292 *
293 */
294static __always_inline __pure bool __static_cpu_has(u8 bit)
295{
296#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
297 asm goto("1: jmp %l[t_no]\n"
298 "2:\n"
299 ".section .altinstructions,\"a\"\n"
300 _ASM_ALIGN "\n"
301 _ASM_PTR "1b\n"
302 _ASM_PTR "0\n" /* no replacement */
303 " .byte %P0\n" /* feature bit */
304 " .byte 2b - 1b\n" /* source len */
305 " .byte 0\n" /* replacement len */
306 " .byte 0xff + 0 - (2b-1b)\n" /* padding */
307 ".previous\n"
308 : : "i" (bit) : : t_no);
309 return true;
310 t_no:
311 return false;
312#else
313 u8 flag;
314 /* Open-coded due to __stringify() in ALTERNATIVE() */
315 asm volatile("1: movb $0,%0\n"
316 "2:\n"
317 ".section .altinstructions,\"a\"\n"
318 _ASM_ALIGN "\n"
319 _ASM_PTR "1b\n"
320 _ASM_PTR "3f\n"
321 " .byte %P1\n" /* feature bit */
322 " .byte 2b - 1b\n" /* source len */
323 " .byte 4f - 3f\n" /* replacement len */
324 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
325 ".previous\n"
326 ".section .altinstr_replacement,\"ax\"\n"
327 "3: movb $1,%0\n"
328 "4:\n"
329 ".previous\n"
330 : "=qm" (flag) : "i" (bit));
331 return flag;
332#endif
333}
334
335#define static_cpu_has(bit) \
336( \
337 __builtin_constant_p(boot_cpu_has(bit)) ? \
338 boot_cpu_has(bit) : \
339 (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \
340 __static_cpu_has(bit) : \
341 boot_cpu_has(bit) \
342)
343
287#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 344#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
288 345
289#endif /* _ASM_X86_CPUFEATURE_H */ 346#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..c991b3a7b904 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,7 +16,9 @@
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/regset.h> 17#include <linux/regset.h>
18#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <linux/slab.h>
19#include <asm/asm.h> 20#include <asm/asm.h>
21#include <asm/cpufeature.h>
20#include <asm/processor.h> 22#include <asm/processor.h>
21#include <asm/sigcontext.h> 23#include <asm/sigcontext.h>
22#include <asm/user.h> 24#include <asm/user.h>
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf);
56 58
57#define X87_FSW_ES (1 << 7) /* Exception Summary */ 59#define X87_FSW_ES (1 << 7) /* Exception Summary */
58 60
61static __always_inline __pure bool use_xsave(void)
62{
63 return static_cpu_has(X86_FEATURE_XSAVE);
64}
65
59#ifdef CONFIG_X86_64 66#ifdef CONFIG_X86_64
60 67
61/* Ignore delayed exceptions from user space */ 68/* Ignore delayed exceptions from user space */
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
91 values. The kernel data segment can be sometimes 0 and sometimes 98 values. The kernel data segment can be sometimes 0 and sometimes
92 new user value. Both should be ok. 99 new user value. Both should be ok.
93 Use the PDA as safe address because it should be already in L1. */ 100 Use the PDA as safe address because it should be already in L1. */
94static inline void clear_fpu_state(struct task_struct *tsk) 101static inline void fpu_clear(struct fpu *fpu)
95{ 102{
96 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 103 struct xsave_struct *xstate = &fpu->state->xsave;
97 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 104 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
98 105
99 /* 106 /*
100 * xsave header may indicate the init state of the FP. 107 * xsave header may indicate the init state of the FP.
101 */ 108 */
102 if ((task_thread_info(tsk)->status & TS_XSAVE) && 109 if (use_xsave() &&
103 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) 110 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
104 return; 111 return;
105 112
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
111 X86_FEATURE_FXSAVE_LEAK); 118 X86_FEATURE_FXSAVE_LEAK);
112} 119}
113 120
121static inline void clear_fpu_state(struct task_struct *tsk)
122{
123 fpu_clear(&tsk->thread.fpu);
124}
125
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 126static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{ 127{
116 int err; 128 int err;
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
135 return err; 147 return err;
136} 148}
137 149
138static inline void fxsave(struct task_struct *tsk) 150static inline void fpu_fxsave(struct fpu *fpu)
139{ 151{
140 /* Using "rex64; fxsave %0" is broken because, if the memory operand 152 /* Using "rex64; fxsave %0" is broken because, if the memory operand
141 uses any extended registers for addressing, a second REX prefix 153 uses any extended registers for addressing, a second REX prefix
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk)
145 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 157 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
146 starting with gas 2.16. */ 158 starting with gas 2.16. */
147 __asm__ __volatile__("fxsaveq %0" 159 __asm__ __volatile__("fxsaveq %0"
148 : "=m" (tsk->thread.xstate->fxsave)); 160 : "=m" (fpu->state->fxsave));
149#elif 0 161#elif 0
150 /* Using, as a workaround, the properly prefixed form below isn't 162 /* Using, as a workaround, the properly prefixed form below isn't
151 accepted by any binutils version so far released, complaining that 163 accepted by any binutils version so far released, complaining that
152 the same type of prefix is used twice if an extended register is 164 the same type of prefix is used twice if an extended register is
153 needed for addressing (fix submitted to mainline 2005-11-21). */ 165 needed for addressing (fix submitted to mainline 2005-11-21). */
154 __asm__ __volatile__("rex64/fxsave %0" 166 __asm__ __volatile__("rex64/fxsave %0"
155 : "=m" (tsk->thread.xstate->fxsave)); 167 : "=m" (fpu->state->fxsave));
156#else 168#else
157 /* This, however, we can work around by forcing the compiler to select 169 /* This, however, we can work around by forcing the compiler to select
158 an addressing mode that doesn't require extended registers. */ 170 an addressing mode that doesn't require extended registers. */
159 __asm__ __volatile__("rex64/fxsave (%1)" 171 __asm__ __volatile__("rex64/fxsave (%1)"
160 : "=m" (tsk->thread.xstate->fxsave) 172 : "=m" (fpu->state->fxsave)
161 : "cdaSDb" (&tsk->thread.xstate->fxsave)); 173 : "cdaSDb" (&fpu->state->fxsave));
162#endif 174#endif
163} 175}
164 176
165static inline void __save_init_fpu(struct task_struct *tsk) 177static inline void fpu_save_init(struct fpu *fpu)
166{ 178{
167 if (task_thread_info(tsk)->status & TS_XSAVE) 179 if (use_xsave())
168 xsave(tsk); 180 fpu_xsave(fpu);
169 else 181 else
170 fxsave(tsk); 182 fpu_fxsave(fpu);
183
184 fpu_clear(fpu);
185}
171 186
172 clear_fpu_state(tsk); 187static inline void __save_init_fpu(struct task_struct *tsk)
188{
189 fpu_save_init(&tsk->thread.fpu);
173 task_thread_info(tsk)->status &= ~TS_USEDFPU; 190 task_thread_info(tsk)->status &= ~TS_USEDFPU;
174} 191}
175 192
176#else /* CONFIG_X86_32 */ 193#else /* CONFIG_X86_32 */
177 194
178#ifdef CONFIG_MATH_EMULATION 195#ifdef CONFIG_MATH_EMULATION
179extern void finit_task(struct task_struct *tsk); 196extern void finit_soft_fpu(struct i387_soft_struct *soft);
180#else 197#else
181static inline void finit_task(struct task_struct *tsk) 198static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
182{
183}
184#endif 199#endif
185 200
186static inline void tolerant_fwait(void) 201static inline void tolerant_fwait(void)
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
216/* 231/*
217 * These must be called with preempt disabled 232 * These must be called with preempt disabled
218 */ 233 */
219static inline void __save_init_fpu(struct task_struct *tsk) 234static inline void fpu_save_init(struct fpu *fpu)
220{ 235{
221 if (task_thread_info(tsk)->status & TS_XSAVE) { 236 if (use_xsave()) {
222 struct xsave_struct *xstate = &tsk->thread.xstate->xsave; 237 struct xsave_struct *xstate = &fpu->state->xsave;
223 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 238 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
224 239
225 xsave(tsk); 240 fpu_xsave(fpu);
226 241
227 /* 242 /*
228 * xsave header may indicate the init state of the FP. 243 * xsave header may indicate the init state of the FP.
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
246 "fxsave %[fx]\n" 261 "fxsave %[fx]\n"
247 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", 262 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
248 X86_FEATURE_FXSR, 263 X86_FEATURE_FXSR,
249 [fx] "m" (tsk->thread.xstate->fxsave), 264 [fx] "m" (fpu->state->fxsave),
250 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); 265 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
251clear_state: 266clear_state:
252 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 267 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
253 is pending. Clear the x87 state here by setting it to fixed 268 is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +274,34 @@ clear_state:
259 X86_FEATURE_FXSAVE_LEAK, 274 X86_FEATURE_FXSAVE_LEAK,
260 [addr] "m" (safe_address)); 275 [addr] "m" (safe_address));
261end: 276end:
277 ;
278}
279
280static inline void __save_init_fpu(struct task_struct *tsk)
281{
282 fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU; 283 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263} 284}
264 285
286
265#endif /* CONFIG_X86_64 */ 287#endif /* CONFIG_X86_64 */
266 288
267static inline int restore_fpu_checking(struct task_struct *tsk) 289static inline int fpu_fxrstor_checking(struct fpu *fpu)
268{ 290{
269 if (task_thread_info(tsk)->status & TS_XSAVE) 291 return fxrstor_checking(&fpu->state->fxsave);
270 return xrstor_checking(&tsk->thread.xstate->xsave); 292}
293
294static inline int fpu_restore_checking(struct fpu *fpu)
295{
296 if (use_xsave())
297 return fpu_xrstor_checking(fpu);
271 else 298 else
272 return fxrstor_checking(&tsk->thread.xstate->fxsave); 299 return fpu_fxrstor_checking(fpu);
300}
301
302static inline int restore_fpu_checking(struct task_struct *tsk)
303{
304 return fpu_restore_checking(&tsk->thread.fpu);
273} 305}
274 306
275/* 307/*
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk)
397static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 429static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
398{ 430{
399 if (cpu_has_fxsr) { 431 if (cpu_has_fxsr) {
400 return tsk->thread.xstate->fxsave.cwd; 432 return tsk->thread.fpu.state->fxsave.cwd;
401 } else { 433 } else {
402 return (unsigned short)tsk->thread.xstate->fsave.cwd; 434 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
403 } 435 }
404} 436}
405 437
406static inline unsigned short get_fpu_swd(struct task_struct *tsk) 438static inline unsigned short get_fpu_swd(struct task_struct *tsk)
407{ 439{
408 if (cpu_has_fxsr) { 440 if (cpu_has_fxsr) {
409 return tsk->thread.xstate->fxsave.swd; 441 return tsk->thread.fpu.state->fxsave.swd;
410 } else { 442 } else {
411 return (unsigned short)tsk->thread.xstate->fsave.swd; 443 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
412 } 444 }
413} 445}
414 446
415static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 447static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
416{ 448{
417 if (cpu_has_xmm) { 449 if (cpu_has_xmm) {
418 return tsk->thread.xstate->fxsave.mxcsr; 450 return tsk->thread.fpu.state->fxsave.mxcsr;
419 } else { 451 } else {
420 return MXCSR_DEFAULT; 452 return MXCSR_DEFAULT;
421 } 453 }
422} 454}
423 455
456static bool fpu_allocated(struct fpu *fpu)
457{
458 return fpu->state != NULL;
459}
460
461static inline int fpu_alloc(struct fpu *fpu)
462{
463 if (fpu_allocated(fpu))
464 return 0;
465 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
466 if (!fpu->state)
467 return -ENOMEM;
468 WARN_ON((unsigned long)fpu->state & 15);
469 return 0;
470}
471
472static inline void fpu_free(struct fpu *fpu)
473{
474 if (fpu->state) {
475 kmem_cache_free(task_xstate_cachep, fpu->state);
476 fpu->state = NULL;
477 }
478}
479
480static inline void fpu_copy(struct fpu *dst, struct fpu *src)
481{
482 memcpy(dst->state, src->state, xstate_size);
483}
484
424#endif /* __ASSEMBLY__ */ 485#endif /* __ASSEMBLY__ */
425 486
426#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 487#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c71a12d960d4..5a51379dcbe4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -376,6 +376,10 @@ union thread_xstate {
376 struct xsave_struct xsave; 376 struct xsave_struct xsave;
377}; 377};
378 378
379struct fpu {
380 union thread_xstate *state;
381};
382
379#ifdef CONFIG_X86_64 383#ifdef CONFIG_X86_64
380DECLARE_PER_CPU(struct orig_ist, orig_ist); 384DECLARE_PER_CPU(struct orig_ist, orig_ist);
381 385
@@ -453,7 +457,7 @@ struct thread_struct {
453 unsigned long trap_no; 457 unsigned long trap_no;
454 unsigned long error_code; 458 unsigned long error_code;
455 /* floating point and extended processor state */ 459 /* floating point and extended processor state */
456 union thread_xstate *xstate; 460 struct fpu fpu;
457#ifdef CONFIG_X86_32 461#ifdef CONFIG_X86_32
458 /* Virtual 86 mode info */ 462 /* Virtual 86 mode info */
459 struct vm86_struct __user *vm86_info; 463 struct vm86_struct __user *vm86_info;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index d017ed5502e2..d4092fac226b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -242,7 +242,6 @@ static inline struct thread_info *current_thread_info(void)
242#define TS_POLLING 0x0004 /* true if in idle loop 242#define TS_POLLING 0x0004 /* true if in idle loop
243 and not sleeping */ 243 and not sleeping */
244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
245#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
246 245
247#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
248 247
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
79 79
80extern int panic_on_unrecovered_nmi; 80extern int panic_on_unrecovered_nmi;
81 81
82void math_error(void __user *); 82void math_error(struct pt_regs *, int, int);
83void math_emulate(struct math_emu_info *); 83void math_emulate(struct math_emu_info *);
84#ifndef CONFIG_X86_32 84#ifndef CONFIG_X86_32
85asmlinkage void smp_thermal_interrupt(void); 85asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
37 void __user *fpstate, 37 void __user *fpstate,
38 struct _fpx_sw_bytes *sw); 38 struct _fpx_sw_bytes *sw);
39 39
40static inline int xrstor_checking(struct xsave_struct *fx) 40static inline int fpu_xrstor_checking(struct fpu *fpu)
41{ 41{
42 struct xsave_struct *fx = &fpu->state->xsave;
42 int err; 43 int err;
43 44
44 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 45 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
110 : "memory"); 111 : "memory");
111} 112}
112 113
113static inline void xsave(struct task_struct *tsk) 114static inline void fpu_xsave(struct fpu *fpu)
114{ 115{
115 /* This, however, we can work around by forcing the compiler to select 116 /* This, however, we can work around by forcing the compiler to select
116 an addressing mode that doesn't require extended registers. */ 117 an addressing mode that doesn't require extended registers. */
117 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" 118 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
118 : : "D" (&(tsk->thread.xstate->xsave)), 119 : : "D" (&(fpu->state->xsave)),
119 "a" (-1), "d"(-1) : "memory"); 120 "a" (-1), "d"(-1) : "memory");
120} 121}
121#endif 122#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4868e4a951ee..c1c00d0b1692 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1243,10 +1243,7 @@ void __cpuinit cpu_init(void)
1243 /* 1243 /*
1244 * Force FPU initialization: 1244 * Force FPU initialization:
1245 */ 1245 */
1246 if (cpu_has_xsave) 1246 current_thread_info()->status = 0;
1247 current_thread_info()->status = TS_XSAVE;
1248 else
1249 current_thread_info()->status = 0;
1250 clear_used_math(); 1247 clear_used_math();
1251 mxcsr_feature_mask_init(); 1248 mxcsr_feature_mask_init();
1252 1249
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 44a8e0dc6737..cd49141cf153 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -53,6 +53,7 @@
53#include <asm/processor-flags.h> 53#include <asm/processor-flags.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/irq_vectors.h> 55#include <asm/irq_vectors.h>
56#include <asm/cpufeature.h>
56 57
57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 58/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
58#include <linux/elf-em.h> 59#include <linux/elf-em.h>
@@ -905,7 +906,25 @@ ENTRY(simd_coprocessor_error)
905 RING0_INT_FRAME 906 RING0_INT_FRAME
906 pushl $0 907 pushl $0
907 CFI_ADJUST_CFA_OFFSET 4 908 CFI_ADJUST_CFA_OFFSET 4
909#ifdef CONFIG_X86_INVD_BUG
910 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
911661: pushl $do_general_protection
912662:
913.section .altinstructions,"a"
914 .balign 4
915 .long 661b
916 .long 663f
917 .byte X86_FEATURE_XMM
918 .byte 662b-661b
919 .byte 664f-663f
920.previous
921.section .altinstr_replacement,"ax"
922663: pushl $do_simd_coprocessor_error
923664:
924.previous
925#else
908 pushl $do_simd_coprocessor_error 926 pushl $do_simd_coprocessor_error
927#endif
909 CFI_ADJUST_CFA_OFFSET 4 928 CFI_ADJUST_CFA_OFFSET 4
910 jmp error_code 929 jmp error_code
911 CFI_ENDPROC 930 CFI_ENDPROC
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 54c31c285488..86cef6b32253 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -102,65 +102,62 @@ void __cpuinit fpu_init(void)
102 102
103 mxcsr_feature_mask_init(); 103 mxcsr_feature_mask_init();
104 /* clean state in init */ 104 /* clean state in init */
105 if (cpu_has_xsave) 105 current_thread_info()->status = 0;
106 current_thread_info()->status = TS_XSAVE;
107 else
108 current_thread_info()->status = 0;
109 clear_used_math(); 106 clear_used_math();
110} 107}
111#endif /* CONFIG_X86_64 */ 108#endif /* CONFIG_X86_64 */
112 109
113/* 110static void fpu_finit(struct fpu *fpu)
114 * The _current_ task is using the FPU for the first time
115 * so initialize it and set the mxcsr to its default
116 * value at reset if we support XMM instructions and then
117 * remeber the current task has used the FPU.
118 */
119int init_fpu(struct task_struct *tsk)
120{ 111{
121 if (tsk_used_math(tsk)) {
122 if (HAVE_HWFP && tsk == current)
123 unlazy_fpu(tsk);
124 return 0;
125 }
126
127 /*
128 * Memory allocation at the first usage of the FPU and other state.
129 */
130 if (!tsk->thread.xstate) {
131 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
132 GFP_KERNEL);
133 if (!tsk->thread.xstate)
134 return -ENOMEM;
135 }
136
137#ifdef CONFIG_X86_32 112#ifdef CONFIG_X86_32
138 if (!HAVE_HWFP) { 113 if (!HAVE_HWFP) {
139 memset(tsk->thread.xstate, 0, xstate_size); 114 finit_soft_fpu(&fpu->state->soft);
140 finit_task(tsk); 115 return;
141 set_stopped_child_used_math(tsk);
142 return 0;
143 } 116 }
144#endif 117#endif
145 118
146 if (cpu_has_fxsr) { 119 if (cpu_has_fxsr) {
147 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 120 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
148 121
149 memset(fx, 0, xstate_size); 122 memset(fx, 0, xstate_size);
150 fx->cwd = 0x37f; 123 fx->cwd = 0x37f;
151 if (cpu_has_xmm) 124 if (cpu_has_xmm)
152 fx->mxcsr = MXCSR_DEFAULT; 125 fx->mxcsr = MXCSR_DEFAULT;
153 } else { 126 } else {
154 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; 127 struct i387_fsave_struct *fp = &fpu->state->fsave;
155 memset(fp, 0, xstate_size); 128 memset(fp, 0, xstate_size);
156 fp->cwd = 0xffff037fu; 129 fp->cwd = 0xffff037fu;
157 fp->swd = 0xffff0000u; 130 fp->swd = 0xffff0000u;
158 fp->twd = 0xffffffffu; 131 fp->twd = 0xffffffffu;
159 fp->fos = 0xffff0000u; 132 fp->fos = 0xffff0000u;
160 } 133 }
134}
135
136/*
137 * The _current_ task is using the FPU for the first time
138 * so initialize it and set the mxcsr to its default
139 * value at reset if we support XMM instructions and then
140 * remeber the current task has used the FPU.
141 */
142int init_fpu(struct task_struct *tsk)
143{
144 int ret;
145
146 if (tsk_used_math(tsk)) {
147 if (HAVE_HWFP && tsk == current)
148 unlazy_fpu(tsk);
149 return 0;
150 }
151
161 /* 152 /*
162 * Only the device not available exception or ptrace can call init_fpu. 153 * Memory allocation at the first usage of the FPU and other state.
163 */ 154 */
155 ret = fpu_alloc(&tsk->thread.fpu);
156 if (ret)
157 return ret;
158
159 fpu_finit(&tsk->thread.fpu);
160
164 set_stopped_child_used_math(tsk); 161 set_stopped_child_used_math(tsk);
165 return 0; 162 return 0;
166} 163}
@@ -194,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
194 return ret; 191 return ret;
195 192
196 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 193 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
197 &target->thread.xstate->fxsave, 0, -1); 194 &target->thread.fpu.state->fxsave, 0, -1);
198} 195}
199 196
200int xfpregs_set(struct task_struct *target, const struct user_regset *regset, 197int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -211,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
211 return ret; 208 return ret;
212 209
213 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 210 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
214 &target->thread.xstate->fxsave, 0, -1); 211 &target->thread.fpu.state->fxsave, 0, -1);
215 212
216 /* 213 /*
217 * mxcsr reserved bits must be masked to zero for security reasons. 214 * mxcsr reserved bits must be masked to zero for security reasons.
218 */ 215 */
219 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 216 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
220 217
221 /* 218 /*
222 * update the header bits in the xsave header, indicating the 219 * update the header bits in the xsave header, indicating the
223 * presence of FP and SSE state. 220 * presence of FP and SSE state.
224 */ 221 */
225 if (cpu_has_xsave) 222 if (cpu_has_xsave)
226 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; 223 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
227 224
228 return ret; 225 return ret;
229} 226}
@@ -246,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
246 * memory layout in the thread struct, so that we can copy the entire 243 * memory layout in the thread struct, so that we can copy the entire
247 * xstateregs to the user using one user_regset_copyout(). 244 * xstateregs to the user using one user_regset_copyout().
248 */ 245 */
249 memcpy(&target->thread.xstate->fxsave.sw_reserved, 246 memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
250 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); 247 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
251 248
252 /* 249 /*
253 * Copy the xstate memory layout. 250 * Copy the xstate memory layout.
254 */ 251 */
255 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 252 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
256 &target->thread.xstate->xsave, 0, -1); 253 &target->thread.fpu.state->xsave, 0, -1);
257 return ret; 254 return ret;
258} 255}
259 256
@@ -272,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
272 return ret; 269 return ret;
273 270
274 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 271 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
275 &target->thread.xstate->xsave, 0, -1); 272 &target->thread.fpu.state->xsave, 0, -1);
276 273
277 /* 274 /*
278 * mxcsr reserved bits must be masked to zero for security reasons. 275 * mxcsr reserved bits must be masked to zero for security reasons.
279 */ 276 */
280 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 277 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
281 278
282 xsave_hdr = &target->thread.xstate->xsave.xsave_hdr; 279 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
283 280
284 xsave_hdr->xstate_bv &= pcntxt_mask; 281 xsave_hdr->xstate_bv &= pcntxt_mask;
285 /* 282 /*
@@ -365,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
365static void 362static void
366convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) 363convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
367{ 364{
368 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; 365 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
369 struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; 366 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
370 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; 367 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
371 int i; 368 int i;
@@ -405,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
405 const struct user_i387_ia32_struct *env) 402 const struct user_i387_ia32_struct *env)
406 403
407{ 404{
408 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave; 405 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
409 struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; 406 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
410 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; 407 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
411 int i; 408 int i;
@@ -445,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
445 442
446 if (!cpu_has_fxsr) { 443 if (!cpu_has_fxsr) {
447 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 444 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
448 &target->thread.xstate->fsave, 0, 445 &target->thread.fpu.state->fsave, 0,
449 -1); 446 -1);
450 } 447 }
451 448
@@ -475,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
475 472
476 if (!cpu_has_fxsr) { 473 if (!cpu_has_fxsr) {
477 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 474 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
478 &target->thread.xstate->fsave, 0, -1); 475 &target->thread.fpu.state->fsave, 0, -1);
479 } 476 }
480 477
481 if (pos > 0 || count < sizeof(env)) 478 if (pos > 0 || count < sizeof(env))
@@ -490,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
490 * presence of FP. 487 * presence of FP.
491 */ 488 */
492 if (cpu_has_xsave) 489 if (cpu_has_xsave)
493 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; 490 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
494 return ret; 491 return ret;
495} 492}
496 493
@@ -501,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
501static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) 498static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
502{ 499{
503 struct task_struct *tsk = current; 500 struct task_struct *tsk = current;
504 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; 501 struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
505 502
506 fp->status = fp->swd; 503 fp->status = fp->swd;
507 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) 504 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
@@ -512,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
512static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) 509static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
513{ 510{
514 struct task_struct *tsk = current; 511 struct task_struct *tsk = current;
515 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 512 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
516 struct user_i387_ia32_struct env; 513 struct user_i387_ia32_struct env;
517 int err = 0; 514 int err = 0;
518 515
@@ -547,7 +544,7 @@ static int save_i387_xsave(void __user *buf)
547 * header as well as change any contents in the memory layout. 544 * header as well as change any contents in the memory layout.
548 * xrestore as part of sigreturn will capture all the changes. 545 * xrestore as part of sigreturn will capture all the changes.
549 */ 546 */
550 tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; 547 tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
551 548
552 if (save_i387_fxsave(fx) < 0) 549 if (save_i387_fxsave(fx) < 0)
553 return -1; 550 return -1;
@@ -599,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
599{ 596{
600 struct task_struct *tsk = current; 597 struct task_struct *tsk = current;
601 598
602 return __copy_from_user(&tsk->thread.xstate->fsave, buf, 599 return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
603 sizeof(struct i387_fsave_struct)); 600 sizeof(struct i387_fsave_struct));
604} 601}
605 602
@@ -610,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
610 struct user_i387_ia32_struct env; 607 struct user_i387_ia32_struct env;
611 int err; 608 int err;
612 609
613 err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], 610 err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
614 size); 611 size);
615 /* mxcsr reserved bits must be masked to zero for security reasons */ 612 /* mxcsr reserved bits must be masked to zero for security reasons */
616 tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; 613 tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
617 if (err || __copy_from_user(&env, buf, sizeof(env))) 614 if (err || __copy_from_user(&env, buf, sizeof(env)))
618 return 1; 615 return 1;
619 convert_to_fxsr(tsk, &env); 616 convert_to_fxsr(tsk, &env);
@@ -629,7 +626,7 @@ static int restore_i387_xsave(void __user *buf)
629 struct i387_fxsave_struct __user *fx = 626 struct i387_fxsave_struct __user *fx =
630 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; 627 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
631 struct xsave_hdr_struct *xsave_hdr = 628 struct xsave_hdr_struct *xsave_hdr =
632 &current->thread.xstate->xsave.xsave_hdr; 629 &current->thread.fpu.state->xsave.xsave_hdr;
633 u64 mask; 630 u64 mask;
634 int err; 631 int err;
635 632
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 0ed2d300cd46..990ae7cfc578 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
60 outb(0, 0xF0); 60 outb(0, 0xF0);
61 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
62 return IRQ_NONE; 62 return IRQ_NONE;
63 math_error((void __user *)get_irq_regs()->ip); 63 math_error(get_irq_regs(), 0, 16);
64 return IRQ_HANDLED; 64 return IRQ_HANDLED;
65} 65}
66 66
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cc6877535ef4..e7e35219b32f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -31,24 +31,22 @@ struct kmem_cache *task_xstate_cachep;
31 31
32int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 32int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
33{ 33{
34 int ret;
35
34 *dst = *src; 36 *dst = *src;
35 if (src->thread.xstate) { 37 if (fpu_allocated(&src->thread.fpu)) {
36 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, 38 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
37 GFP_KERNEL); 39 ret = fpu_alloc(&dst->thread.fpu);
38 if (!dst->thread.xstate) 40 if (ret)
39 return -ENOMEM; 41 return ret;
40 WARN_ON((unsigned long)dst->thread.xstate & 15); 42 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
41 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
42 } 43 }
43 return 0; 44 return 0;
44} 45}
45 46
46void free_thread_xstate(struct task_struct *tsk) 47void free_thread_xstate(struct task_struct *tsk)
47{ 48{
48 if (tsk->thread.xstate) { 49 fpu_free(&tsk->thread.fpu);
49 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
50 tsk->thread.xstate = NULL;
51 }
52} 50}
53 51
54void free_thread_info(struct thread_info *ti) 52void free_thread_info(struct thread_info *ti)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 75090c589b7a..8d128783af47 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -309,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
309 309
310 /* we're going to use this soon, after a few expensive things */ 310 /* we're going to use this soon, after a few expensive things */
311 if (preload_fpu) 311 if (preload_fpu)
312 prefetch(next->xstate); 312 prefetch(next->fpu.state);
313 313
314 /* 314 /*
315 * Reload esp0. 315 * Reload esp0.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 50cc84ac0a0d..3c2422a99f1f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -388,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
388 388
389 /* we're going to use this soon, after a few expensive things */ 389 /* we're going to use this soon, after a few expensive things */
390 if (preload_fpu) 390 if (preload_fpu)
391 prefetch(next->xstate); 391 prefetch(next->fpu.state);
392 392
393 /* 393 /*
394 * Reload esp0, LDT and the page table pointer: 394 * Reload esp0, LDT and the page table pointer:
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 36f1bd9f8e76..02cfb9b8f5b1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,15 +108,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
108 dec_preempt_count(); 108 dec_preempt_count();
109} 109}
110 110
111#ifdef CONFIG_X86_32
112static inline void
113die_if_kernel(const char *str, struct pt_regs *regs, long err)
114{
115 if (!user_mode_vm(regs))
116 die(str, regs, err);
117}
118#endif
119
120static void __kprobes 111static void __kprobes
121do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 112do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
122 long error_code, siginfo_t *info) 113 long error_code, siginfo_t *info)
@@ -585,55 +576,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
585 return; 576 return;
586} 577}
587 578
588#ifdef CONFIG_X86_64
589static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
590{
591 if (fixup_exception(regs))
592 return 1;
593
594 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
595 /* Illegal floating point operation in the kernel */
596 current->thread.trap_no = trapnr;
597 die(str, regs, 0);
598 return 0;
599}
600#endif
601
602/* 579/*
603 * Note that we play around with the 'TS' bit in an attempt to get 580 * Note that we play around with the 'TS' bit in an attempt to get
604 * the correct behaviour even in the presence of the asynchronous 581 * the correct behaviour even in the presence of the asynchronous
605 * IRQ13 behaviour 582 * IRQ13 behaviour
606 */ 583 */
607void math_error(void __user *ip) 584void math_error(struct pt_regs *regs, int error_code, int trapnr)
608{ 585{
609 struct task_struct *task; 586 struct task_struct *task = current;
610 siginfo_t info; 587 siginfo_t info;
611 unsigned short cwd, swd, err; 588 unsigned short err;
589 char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
590
591 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
592 return;
593 conditional_sti(regs);
594
595 if (!user_mode_vm(regs))
596 {
597 if (!fixup_exception(regs)) {
598 task->thread.error_code = error_code;
599 task->thread.trap_no = trapnr;
600 die(str, regs, error_code);
601 }
602 return;
603 }
612 604
613 /* 605 /*
614 * Save the info for the exception handler and clear the error. 606 * Save the info for the exception handler and clear the error.
615 */ 607 */
616 task = current;
617 save_init_fpu(task); 608 save_init_fpu(task);
618 task->thread.trap_no = 16; 609 task->thread.trap_no = trapnr;
619 task->thread.error_code = 0; 610 task->thread.error_code = error_code;
620 info.si_signo = SIGFPE; 611 info.si_signo = SIGFPE;
621 info.si_errno = 0; 612 info.si_errno = 0;
622 info.si_addr = ip; 613 info.si_addr = (void __user *)regs->ip;
623 /* 614 if (trapnr == 16) {
624 * (~cwd & swd) will mask out exceptions that are not set to unmasked 615 unsigned short cwd, swd;
625 * status. 0x3f is the exception bits in these regs, 0x200 is the 616 /*
626 * C1 reg you need in case of a stack fault, 0x040 is the stack 617 * (~cwd & swd) will mask out exceptions that are not set to unmasked
627 * fault bit. We should only be taking one exception at a time, 618 * status. 0x3f is the exception bits in these regs, 0x200 is the
628 * so if this combination doesn't produce any single exception, 619 * C1 reg you need in case of a stack fault, 0x040 is the stack
629 * then we have a bad program that isn't synchronizing its FPU usage 620 * fault bit. We should only be taking one exception at a time,
630 * and it will suffer the consequences since we won't be able to 621 * so if this combination doesn't produce any single exception,
631 * fully reproduce the context of the exception 622 * then we have a bad program that isn't synchronizing its FPU usage
632 */ 623 * and it will suffer the consequences since we won't be able to
633 cwd = get_fpu_cwd(task); 624 * fully reproduce the context of the exception
634 swd = get_fpu_swd(task); 625 */
626 cwd = get_fpu_cwd(task);
627 swd = get_fpu_swd(task);
635 628
636 err = swd & ~cwd; 629 err = swd & ~cwd;
630 } else {
631 /*
632 * The SIMD FPU exceptions are handled a little differently, as there
633 * is only a single status/control register. Thus, to determine which
634 * unmasked exception was caught we must mask the exception mask bits
635 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
636 */
637 unsigned short mxcsr = get_fpu_mxcsr(task);
638 err = ~(mxcsr >> 7) & mxcsr;
639 }
637 640
638 if (err & 0x001) { /* Invalid op */ 641 if (err & 0x001) { /* Invalid op */
639 /* 642 /*
@@ -662,97 +665,17 @@ void math_error(void __user *ip)
662 665
663dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 666dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
664{ 667{
665 conditional_sti(regs);
666
667#ifdef CONFIG_X86_32 668#ifdef CONFIG_X86_32
668 ignore_fpu_irq = 1; 669 ignore_fpu_irq = 1;
669#else
670 if (!user_mode(regs) &&
671 kernel_math_error(regs, "kernel x87 math error", 16))
672 return;
673#endif 670#endif
674 671
675 math_error((void __user *)regs->ip); 672 math_error(regs, error_code, 16);
676}
677
678static void simd_math_error(void __user *ip)
679{
680 struct task_struct *task;
681 siginfo_t info;
682 unsigned short mxcsr;
683
684 /*
685 * Save the info for the exception handler and clear the error.
686 */
687 task = current;
688 save_init_fpu(task);
689 task->thread.trap_no = 19;
690 task->thread.error_code = 0;
691 info.si_signo = SIGFPE;
692 info.si_errno = 0;
693 info.si_code = __SI_FAULT;
694 info.si_addr = ip;
695 /*
696 * The SIMD FPU exceptions are handled a little differently, as there
697 * is only a single status/control register. Thus, to determine which
698 * unmasked exception was caught we must mask the exception mask bits
699 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
700 */
701 mxcsr = get_fpu_mxcsr(task);
702 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
703 case 0x000:
704 default:
705 break;
706 case 0x001: /* Invalid Op */
707 info.si_code = FPE_FLTINV;
708 break;
709 case 0x002: /* Denormalize */
710 case 0x010: /* Underflow */
711 info.si_code = FPE_FLTUND;
712 break;
713 case 0x004: /* Zero Divide */
714 info.si_code = FPE_FLTDIV;
715 break;
716 case 0x008: /* Overflow */
717 info.si_code = FPE_FLTOVF;
718 break;
719 case 0x020: /* Precision */
720 info.si_code = FPE_FLTRES;
721 break;
722 }
723 force_sig_info(SIGFPE, &info, task);
724} 673}
725 674
726dotraplinkage void 675dotraplinkage void
727do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 676do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
728{ 677{
729 conditional_sti(regs); 678 math_error(regs, error_code, 19);
730
731#ifdef CONFIG_X86_32
732 if (cpu_has_xmm) {
733 /* Handle SIMD FPU exceptions on PIII+ processors. */
734 ignore_fpu_irq = 1;
735 simd_math_error((void __user *)regs->ip);
736 return;
737 }
738 /*
739 * Handle strange cache flush from user space exception
740 * in all other cases. This is undocumented behaviour.
741 */
742 if (regs->flags & X86_VM_MASK) {
743 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
744 return;
745 }
746 current->thread.trap_no = 19;
747 current->thread.error_code = error_code;
748 die_if_kernel("cache flush denied", regs, error_code);
749 force_sig(SIGSEGV, current);
750#else
751 if (!user_mode(regs) &&
752 kernel_math_error(regs, "kernel simd math error", 19))
753 return;
754 simd_math_error((void __user *)regs->ip);
755#endif
756} 679}
757 680
758dotraplinkage void 681dotraplinkage void
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 782c3a362ec6..37e68fc5e24a 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf)
99 if (err) 99 if (err)
100 return err; 100 return err;
101 101
102 if (task_thread_info(tsk)->status & TS_XSAVE) 102 if (use_xsave())
103 err = xsave_user(buf); 103 err = xsave_user(buf);
104 else 104 else
105 err = fxsave_user(buf); 105 err = fxsave_user(buf);
@@ -109,14 +109,14 @@ int save_i387_xstate(void __user *buf)
109 task_thread_info(tsk)->status &= ~TS_USEDFPU; 109 task_thread_info(tsk)->status &= ~TS_USEDFPU;
110 stts(); 110 stts();
111 } else { 111 } else {
112 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, 112 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
113 xstate_size)) 113 xstate_size))
114 return -1; 114 return -1;
115 } 115 }
116 116
117 clear_used_math(); /* trigger finit */ 117 clear_used_math(); /* trigger finit */
118 118
119 if (task_thread_info(tsk)->status & TS_XSAVE) { 119 if (use_xsave()) {
120 struct _fpstate __user *fx = buf; 120 struct _fpstate __user *fx = buf;
121 struct _xstate __user *x = buf; 121 struct _xstate __user *x = buf;
122 u64 xstate_bv; 122 u64 xstate_bv;
@@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf)
225 clts(); 225 clts();
226 task_thread_info(current)->status |= TS_USEDFPU; 226 task_thread_info(current)->status |= TS_USEDFPU;
227 } 227 }
228 if (task_thread_info(tsk)->status & TS_XSAVE) 228 if (use_xsave())
229 err = restore_user_xstate(buf); 229 err = restore_user_xstate(buf);
230 else 230 else
231 err = fxrstor_checking((__force struct i387_fxsave_struct *) 231 err = fxrstor_checking((__force struct i387_fxsave_struct *)
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index aa0987088774..dc8adad10a2f 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -30,10 +30,10 @@ static void fclex(void)
30} 30}
31 31
32/* Needs to be externally visible */ 32/* Needs to be externally visible */
33void finit_task(struct task_struct *tsk) 33void finit_soft_fpu(struct i387_soft_struct *soft)
34{ 34{
35 struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
36 struct address *oaddr, *iaddr; 35 struct address *oaddr, *iaddr;
36 memset(soft, 0, sizeof(*soft));
37 soft->cwd = 0x037f; 37 soft->cwd = 0x037f;
38 soft->swd = 0; 38 soft->swd = 0;
39 soft->ftop = 0; /* We don't keep top in the status word internally. */ 39 soft->ftop = 0; /* We don't keep top in the status word internally. */
@@ -52,7 +52,7 @@ void finit_task(struct task_struct *tsk)
52 52
53void finit(void) 53void finit(void)
54{ 54{
55 finit_task(current); 55 finit_soft_fpu(&current->thread.fpu.state->soft);
56} 56}
57 57
58/* 58/*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 5d87f586f8d7..7718541541d4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -681,7 +681,7 @@ int fpregs_soft_set(struct task_struct *target,
681 unsigned int pos, unsigned int count, 681 unsigned int pos, unsigned int count,
682 const void *kbuf, const void __user *ubuf) 682 const void *kbuf, const void __user *ubuf)
683{ 683{
684 struct i387_soft_struct *s387 = &target->thread.xstate->soft; 684 struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
685 void *space = s387->st_space; 685 void *space = s387->st_space;
686 int ret; 686 int ret;
687 int offset, other, i, tags, regnr, tag, newtop; 687 int offset, other, i, tags, regnr, tag, newtop;
@@ -733,7 +733,7 @@ int fpregs_soft_get(struct task_struct *target,
733 unsigned int pos, unsigned int count, 733 unsigned int pos, unsigned int count,
734 void *kbuf, void __user *ubuf) 734 void *kbuf, void __user *ubuf)
735{ 735{
736 struct i387_soft_struct *s387 = &target->thread.xstate->soft; 736 struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
737 const void *space = s387->st_space; 737 const void *space = s387->st_space;
738 int ret; 738 int ret;
739 int offset = (S387->ftop & 7) * 10, other = 80 - offset; 739 int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 50fa0ec2c8a5..2c614410a5f3 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -31,7 +31,7 @@
31#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \ 31#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
32 == (1 << 10)) 32 == (1 << 10))
33 33
34#define I387 (current->thread.xstate) 34#define I387 (current->thread.fpu.state)
35#define FPU_info (I387->soft.info) 35#define FPU_info (I387->soft.info)
36 36
37#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) 37#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))