diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:58:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:58:16 -0400 |
commit | 41d59102e146a4423a490b8eca68a5860af4fe1c (patch) | |
tree | 739ed4113ccdaeb33d1723a6beab09c1e18d7048 /arch/x86/include | |
parent | 3e1dd193edefd2a806a0ba6cf0879cf1a95217da (diff) | |
parent | c9775b4cc522e5f1b40b1366a993f0f05f600f39 (diff) |
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, fpu: Use static_cpu_has() to implement use_xsave()
x86: Add new static_cpu_has() function using alternatives
x86, fpu: Use the proper asm constraint in use_xsave()
x86, fpu: Unbreak FPU emulation
x86: Introduce 'struct fpu' and related API
x86: Eliminate TS_XSAVE
x86-32: Don't set ignore_fpu_irq in simd exception
x86: Merge kernel_math_error() into math_error()
x86: Merge simd_math_error() into math_error()
x86-32: Rework cache flush denied handler
Fix trivial conflict in arch/x86/kernel/process.c
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 57 | ||||
-rw-r--r-- | arch/x86/include/asm/i387.h | 129 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/traps.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/xsave.h | 7 |
6 files changed, 162 insertions, 40 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 630e623f61e0..dca9c545f44e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -176,6 +176,7 @@ | |||
176 | 176 | ||
177 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 177 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
178 | 178 | ||
179 | #include <asm/asm.h> | ||
179 | #include <linux/bitops.h> | 180 | #include <linux/bitops.h> |
180 | 181 | ||
181 | extern const char * const x86_cap_flags[NCAPINTS*32]; | 182 | extern const char * const x86_cap_flags[NCAPINTS*32]; |
@@ -284,6 +285,62 @@ extern const char * const x86_power_flags[32]; | |||
284 | 285 | ||
285 | #endif /* CONFIG_X86_64 */ | 286 | #endif /* CONFIG_X86_64 */ |
286 | 287 | ||
288 | /* | ||
289 | * Static testing of CPU features. Used the same as boot_cpu_has(). | ||
290 | * These are only valid after alternatives have run, but will statically | ||
291 | * patch the target code for additional performance. | ||
292 | * | ||
293 | */ | ||
294 | static __always_inline __pure bool __static_cpu_has(u8 bit) | ||
295 | { | ||
296 | #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) | ||
297 | asm goto("1: jmp %l[t_no]\n" | ||
298 | "2:\n" | ||
299 | ".section .altinstructions,\"a\"\n" | ||
300 | _ASM_ALIGN "\n" | ||
301 | _ASM_PTR "1b\n" | ||
302 | _ASM_PTR "0\n" /* no replacement */ | ||
303 | " .byte %P0\n" /* feature bit */ | ||
304 | " .byte 2b - 1b\n" /* source len */ | ||
305 | " .byte 0\n" /* replacement len */ | ||
306 | " .byte 0xff + 0 - (2b-1b)\n" /* padding */ | ||
307 | ".previous\n" | ||
308 | : : "i" (bit) : : t_no); | ||
309 | return true; | ||
310 | t_no: | ||
311 | return false; | ||
312 | #else | ||
313 | u8 flag; | ||
314 | /* Open-coded due to __stringify() in ALTERNATIVE() */ | ||
315 | asm volatile("1: movb $0,%0\n" | ||
316 | "2:\n" | ||
317 | ".section .altinstructions,\"a\"\n" | ||
318 | _ASM_ALIGN "\n" | ||
319 | _ASM_PTR "1b\n" | ||
320 | _ASM_PTR "3f\n" | ||
321 | " .byte %P1\n" /* feature bit */ | ||
322 | " .byte 2b - 1b\n" /* source len */ | ||
323 | " .byte 4f - 3f\n" /* replacement len */ | ||
324 | " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */ | ||
325 | ".previous\n" | ||
326 | ".section .altinstr_replacement,\"ax\"\n" | ||
327 | "3: movb $1,%0\n" | ||
328 | "4:\n" | ||
329 | ".previous\n" | ||
330 | : "=qm" (flag) : "i" (bit)); | ||
331 | return flag; | ||
332 | #endif | ||
333 | } | ||
334 | |||
335 | #define static_cpu_has(bit) \ | ||
336 | ( \ | ||
337 | __builtin_constant_p(boot_cpu_has(bit)) ? \ | ||
338 | boot_cpu_has(bit) : \ | ||
339 | (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \ | ||
340 | __static_cpu_has(bit) : \ | ||
341 | boot_cpu_has(bit) \ | ||
342 | ) | ||
343 | |||
287 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ | 344 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
288 | 345 | ||
289 | #endif /* _ASM_X86_CPUFEATURE_H */ | 346 | #endif /* _ASM_X86_CPUFEATURE_H */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index da2930924501..c991b3a7b904 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <linux/kernel_stat.h> | 16 | #include <linux/kernel_stat.h> |
17 | #include <linux/regset.h> | 17 | #include <linux/regset.h> |
18 | #include <linux/hardirq.h> | 18 | #include <linux/hardirq.h> |
19 | #include <linux/slab.h> | ||
19 | #include <asm/asm.h> | 20 | #include <asm/asm.h> |
21 | #include <asm/cpufeature.h> | ||
20 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
21 | #include <asm/sigcontext.h> | 23 | #include <asm/sigcontext.h> |
22 | #include <asm/user.h> | 24 | #include <asm/user.h> |
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf); | |||
56 | 58 | ||
57 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 59 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
58 | 60 | ||
61 | static __always_inline __pure bool use_xsave(void) | ||
62 | { | ||
63 | return static_cpu_has(X86_FEATURE_XSAVE); | ||
64 | } | ||
65 | |||
59 | #ifdef CONFIG_X86_64 | 66 | #ifdef CONFIG_X86_64 |
60 | 67 | ||
61 | /* Ignore delayed exceptions from user space */ | 68 | /* Ignore delayed exceptions from user space */ |
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
91 | values. The kernel data segment can be sometimes 0 and sometimes | 98 | values. The kernel data segment can be sometimes 0 and sometimes |
92 | new user value. Both should be ok. | 99 | new user value. Both should be ok. |
93 | Use the PDA as safe address because it should be already in L1. */ | 100 | Use the PDA as safe address because it should be already in L1. */ |
94 | static inline void clear_fpu_state(struct task_struct *tsk) | 101 | static inline void fpu_clear(struct fpu *fpu) |
95 | { | 102 | { |
96 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | 103 | struct xsave_struct *xstate = &fpu->state->xsave; |
97 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 104 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
98 | 105 | ||
99 | /* | 106 | /* |
100 | * xsave header may indicate the init state of the FP. | 107 | * xsave header may indicate the init state of the FP. |
101 | */ | 108 | */ |
102 | if ((task_thread_info(tsk)->status & TS_XSAVE) && | 109 | if (use_xsave() && |
103 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | 110 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) |
104 | return; | 111 | return; |
105 | 112 | ||
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk) | |||
111 | X86_FEATURE_FXSAVE_LEAK); | 118 | X86_FEATURE_FXSAVE_LEAK); |
112 | } | 119 | } |
113 | 120 | ||
121 | static inline void clear_fpu_state(struct task_struct *tsk) | ||
122 | { | ||
123 | fpu_clear(&tsk->thread.fpu); | ||
124 | } | ||
125 | |||
114 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 126 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
115 | { | 127 | { |
116 | int err; | 128 | int err; |
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
135 | return err; | 147 | return err; |
136 | } | 148 | } |
137 | 149 | ||
138 | static inline void fxsave(struct task_struct *tsk) | 150 | static inline void fpu_fxsave(struct fpu *fpu) |
139 | { | 151 | { |
140 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | 152 | /* Using "rex64; fxsave %0" is broken because, if the memory operand |
141 | uses any extended registers for addressing, a second REX prefix | 153 | uses any extended registers for addressing, a second REX prefix |
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk) | |||
145 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | 157 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
146 | starting with gas 2.16. */ | 158 | starting with gas 2.16. */ |
147 | __asm__ __volatile__("fxsaveq %0" | 159 | __asm__ __volatile__("fxsaveq %0" |
148 | : "=m" (tsk->thread.xstate->fxsave)); | 160 | : "=m" (fpu->state->fxsave)); |
149 | #elif 0 | 161 | #elif 0 |
150 | /* Using, as a workaround, the properly prefixed form below isn't | 162 | /* Using, as a workaround, the properly prefixed form below isn't |
151 | accepted by any binutils version so far released, complaining that | 163 | accepted by any binutils version so far released, complaining that |
152 | the same type of prefix is used twice if an extended register is | 164 | the same type of prefix is used twice if an extended register is |
153 | needed for addressing (fix submitted to mainline 2005-11-21). */ | 165 | needed for addressing (fix submitted to mainline 2005-11-21). */ |
154 | __asm__ __volatile__("rex64/fxsave %0" | 166 | __asm__ __volatile__("rex64/fxsave %0" |
155 | : "=m" (tsk->thread.xstate->fxsave)); | 167 | : "=m" (fpu->state->fxsave)); |
156 | #else | 168 | #else |
157 | /* This, however, we can work around by forcing the compiler to select | 169 | /* This, however, we can work around by forcing the compiler to select |
158 | an addressing mode that doesn't require extended registers. */ | 170 | an addressing mode that doesn't require extended registers. */ |
159 | __asm__ __volatile__("rex64/fxsave (%1)" | 171 | __asm__ __volatile__("rex64/fxsave (%1)" |
160 | : "=m" (tsk->thread.xstate->fxsave) | 172 | : "=m" (fpu->state->fxsave) |
161 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); | 173 | : "cdaSDb" (&fpu->state->fxsave)); |
162 | #endif | 174 | #endif |
163 | } | 175 | } |
164 | 176 | ||
165 | static inline void __save_init_fpu(struct task_struct *tsk) | 177 | static inline void fpu_save_init(struct fpu *fpu) |
166 | { | 178 | { |
167 | if (task_thread_info(tsk)->status & TS_XSAVE) | 179 | if (use_xsave()) |
168 | xsave(tsk); | 180 | fpu_xsave(fpu); |
169 | else | 181 | else |
170 | fxsave(tsk); | 182 | fpu_fxsave(fpu); |
183 | |||
184 | fpu_clear(fpu); | ||
185 | } | ||
171 | 186 | ||
172 | clear_fpu_state(tsk); | 187 | static inline void __save_init_fpu(struct task_struct *tsk) |
188 | { | ||
189 | fpu_save_init(&tsk->thread.fpu); | ||
173 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 190 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
174 | } | 191 | } |
175 | 192 | ||
176 | #else /* CONFIG_X86_32 */ | 193 | #else /* CONFIG_X86_32 */ |
177 | 194 | ||
178 | #ifdef CONFIG_MATH_EMULATION | 195 | #ifdef CONFIG_MATH_EMULATION |
179 | extern void finit_task(struct task_struct *tsk); | 196 | extern void finit_soft_fpu(struct i387_soft_struct *soft); |
180 | #else | 197 | #else |
181 | static inline void finit_task(struct task_struct *tsk) | 198 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} |
182 | { | ||
183 | } | ||
184 | #endif | 199 | #endif |
185 | 200 | ||
186 | static inline void tolerant_fwait(void) | 201 | static inline void tolerant_fwait(void) |
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
216 | /* | 231 | /* |
217 | * These must be called with preempt disabled | 232 | * These must be called with preempt disabled |
218 | */ | 233 | */ |
219 | static inline void __save_init_fpu(struct task_struct *tsk) | 234 | static inline void fpu_save_init(struct fpu *fpu) |
220 | { | 235 | { |
221 | if (task_thread_info(tsk)->status & TS_XSAVE) { | 236 | if (use_xsave()) { |
222 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | 237 | struct xsave_struct *xstate = &fpu->state->xsave; |
223 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 238 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
224 | 239 | ||
225 | xsave(tsk); | 240 | fpu_xsave(fpu); |
226 | 241 | ||
227 | /* | 242 | /* |
228 | * xsave header may indicate the init state of the FP. | 243 | * xsave header may indicate the init state of the FP. |
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
246 | "fxsave %[fx]\n" | 261 | "fxsave %[fx]\n" |
247 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | 262 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", |
248 | X86_FEATURE_FXSR, | 263 | X86_FEATURE_FXSR, |
249 | [fx] "m" (tsk->thread.xstate->fxsave), | 264 | [fx] "m" (fpu->state->fxsave), |
250 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); | 265 | [fsw] "m" (fpu->state->fxsave.swd) : "memory"); |
251 | clear_state: | 266 | clear_state: |
252 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 267 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
253 | is pending. Clear the x87 state here by setting it to fixed | 268 | is pending. Clear the x87 state here by setting it to fixed |
@@ -259,17 +274,34 @@ clear_state: | |||
259 | X86_FEATURE_FXSAVE_LEAK, | 274 | X86_FEATURE_FXSAVE_LEAK, |
260 | [addr] "m" (safe_address)); | 275 | [addr] "m" (safe_address)); |
261 | end: | 276 | end: |
277 | ; | ||
278 | } | ||
279 | |||
280 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
281 | { | ||
282 | fpu_save_init(&tsk->thread.fpu); | ||
262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 283 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
263 | } | 284 | } |
264 | 285 | ||
286 | |||
265 | #endif /* CONFIG_X86_64 */ | 287 | #endif /* CONFIG_X86_64 */ |
266 | 288 | ||
267 | static inline int restore_fpu_checking(struct task_struct *tsk) | 289 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
268 | { | 290 | { |
269 | if (task_thread_info(tsk)->status & TS_XSAVE) | 291 | return fxrstor_checking(&fpu->state->fxsave); |
270 | return xrstor_checking(&tsk->thread.xstate->xsave); | 292 | } |
293 | |||
294 | static inline int fpu_restore_checking(struct fpu *fpu) | ||
295 | { | ||
296 | if (use_xsave()) | ||
297 | return fpu_xrstor_checking(fpu); | ||
271 | else | 298 | else |
272 | return fxrstor_checking(&tsk->thread.xstate->fxsave); | 299 | return fpu_fxrstor_checking(fpu); |
300 | } | ||
301 | |||
302 | static inline int restore_fpu_checking(struct task_struct *tsk) | ||
303 | { | ||
304 | return fpu_restore_checking(&tsk->thread.fpu); | ||
273 | } | 305 | } |
274 | 306 | ||
275 | /* | 307 | /* |
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk) | |||
397 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | 429 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) |
398 | { | 430 | { |
399 | if (cpu_has_fxsr) { | 431 | if (cpu_has_fxsr) { |
400 | return tsk->thread.xstate->fxsave.cwd; | 432 | return tsk->thread.fpu.state->fxsave.cwd; |
401 | } else { | 433 | } else { |
402 | return (unsigned short)tsk->thread.xstate->fsave.cwd; | 434 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; |
403 | } | 435 | } |
404 | } | 436 | } |
405 | 437 | ||
406 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | 438 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) |
407 | { | 439 | { |
408 | if (cpu_has_fxsr) { | 440 | if (cpu_has_fxsr) { |
409 | return tsk->thread.xstate->fxsave.swd; | 441 | return tsk->thread.fpu.state->fxsave.swd; |
410 | } else { | 442 | } else { |
411 | return (unsigned short)tsk->thread.xstate->fsave.swd; | 443 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; |
412 | } | 444 | } |
413 | } | 445 | } |
414 | 446 | ||
415 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | 447 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) |
416 | { | 448 | { |
417 | if (cpu_has_xmm) { | 449 | if (cpu_has_xmm) { |
418 | return tsk->thread.xstate->fxsave.mxcsr; | 450 | return tsk->thread.fpu.state->fxsave.mxcsr; |
419 | } else { | 451 | } else { |
420 | return MXCSR_DEFAULT; | 452 | return MXCSR_DEFAULT; |
421 | } | 453 | } |
422 | } | 454 | } |
423 | 455 | ||
456 | static bool fpu_allocated(struct fpu *fpu) | ||
457 | { | ||
458 | return fpu->state != NULL; | ||
459 | } | ||
460 | |||
461 | static inline int fpu_alloc(struct fpu *fpu) | ||
462 | { | ||
463 | if (fpu_allocated(fpu)) | ||
464 | return 0; | ||
465 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | ||
466 | if (!fpu->state) | ||
467 | return -ENOMEM; | ||
468 | WARN_ON((unsigned long)fpu->state & 15); | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static inline void fpu_free(struct fpu *fpu) | ||
473 | { | ||
474 | if (fpu->state) { | ||
475 | kmem_cache_free(task_xstate_cachep, fpu->state); | ||
476 | fpu->state = NULL; | ||
477 | } | ||
478 | } | ||
479 | |||
480 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | ||
481 | { | ||
482 | memcpy(dst->state, src->state, xstate_size); | ||
483 | } | ||
484 | |||
424 | #endif /* __ASSEMBLY__ */ | 485 | #endif /* __ASSEMBLY__ */ |
425 | 486 | ||
426 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 | 487 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c71a12d960d4..5a51379dcbe4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -376,6 +376,10 @@ union thread_xstate { | |||
376 | struct xsave_struct xsave; | 376 | struct xsave_struct xsave; |
377 | }; | 377 | }; |
378 | 378 | ||
379 | struct fpu { | ||
380 | union thread_xstate *state; | ||
381 | }; | ||
382 | |||
379 | #ifdef CONFIG_X86_64 | 383 | #ifdef CONFIG_X86_64 |
380 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 384 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
381 | 385 | ||
@@ -453,7 +457,7 @@ struct thread_struct { | |||
453 | unsigned long trap_no; | 457 | unsigned long trap_no; |
454 | unsigned long error_code; | 458 | unsigned long error_code; |
455 | /* floating point and extended processor state */ | 459 | /* floating point and extended processor state */ |
456 | union thread_xstate *xstate; | 460 | struct fpu fpu; |
457 | #ifdef CONFIG_X86_32 | 461 | #ifdef CONFIG_X86_32 |
458 | /* Virtual 86 mode info */ | 462 | /* Virtual 86 mode info */ |
459 | struct vm86_struct __user *vm86_info; | 463 | struct vm86_struct __user *vm86_info; |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d017ed5502e2..d4092fac226b 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -242,7 +242,6 @@ static inline struct thread_info *current_thread_info(void) | |||
242 | #define TS_POLLING 0x0004 /* true if in idle loop | 242 | #define TS_POLLING 0x0004 /* true if in idle loop |
243 | and not sleeping */ | 243 | and not sleeping */ |
244 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 244 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
245 | #define TS_XSAVE 0x0010 /* Use xsave/xrstor */ | ||
246 | 245 | ||
247 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 246 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
248 | 247 | ||
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 4da91ad69e0d..f66cda56781d 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition) | |||
79 | 79 | ||
80 | extern int panic_on_unrecovered_nmi; | 80 | extern int panic_on_unrecovered_nmi; |
81 | 81 | ||
82 | void math_error(void __user *); | 82 | void math_error(struct pt_regs *, int, int); |
83 | void math_emulate(struct math_emu_info *); | 83 | void math_emulate(struct math_emu_info *); |
84 | #ifndef CONFIG_X86_32 | 84 | #ifndef CONFIG_X86_32 |
85 | asmlinkage void smp_thermal_interrupt(void); | 85 | asmlinkage void smp_thermal_interrupt(void); |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index ddc04ccad03b..2c4390cae228 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | |||
37 | void __user *fpstate, | 37 | void __user *fpstate, |
38 | struct _fpx_sw_bytes *sw); | 38 | struct _fpx_sw_bytes *sw); |
39 | 39 | ||
40 | static inline int xrstor_checking(struct xsave_struct *fx) | 40 | static inline int fpu_xrstor_checking(struct fpu *fpu) |
41 | { | 41 | { |
42 | struct xsave_struct *fx = &fpu->state->xsave; | ||
42 | int err; | 43 | int err; |
43 | 44 | ||
44 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | 45 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | |||
110 | : "memory"); | 111 | : "memory"); |
111 | } | 112 | } |
112 | 113 | ||
113 | static inline void xsave(struct task_struct *tsk) | 114 | static inline void fpu_xsave(struct fpu *fpu) |
114 | { | 115 | { |
115 | /* This, however, we can work around by forcing the compiler to select | 116 | /* This, however, we can work around by forcing the compiler to select |
116 | an addressing mode that doesn't require extended registers. */ | 117 | an addressing mode that doesn't require extended registers. */ |
117 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | 118 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" |
118 | : : "D" (&(tsk->thread.xstate->xsave)), | 119 | : : "D" (&(fpu->state->xsave)), |
119 | "a" (-1), "d"(-1) : "memory"); | 120 | "a" (-1), "d"(-1) : "memory"); |
120 | } | 121 | } |
121 | #endif | 122 | #endif |