diff options
| -rw-r--r-- | arch/x86/include/asm/i387.h | 183 | ||||
| -rw-r--r-- | arch/x86/include/asm/processor.h | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/i387.c | 58 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/traps.c | 35 |
6 files changed, 86 insertions, 203 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 70f105b352ee..4aa2bb3b242a 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf); | |||
| 55 | extern int restore_i387_xstate_ia32(void __user *buf); | 55 | extern int restore_i387_xstate_ia32(void __user *buf); |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #ifdef CONFIG_MATH_EMULATION | ||
| 59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
| 60 | #else | ||
| 61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
| 62 | #endif | ||
| 63 | |||
| 58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
| 59 | 65 | ||
| 60 | static __always_inline __pure bool use_xsaveopt(void) | 66 | static __always_inline __pure bool use_xsaveopt(void) |
| @@ -67,6 +73,11 @@ static __always_inline __pure bool use_xsave(void) | |||
| 67 | return static_cpu_has(X86_FEATURE_XSAVE); | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
| 68 | } | 74 | } |
| 69 | 75 | ||
| 76 | static __always_inline __pure bool use_fxsr(void) | ||
| 77 | { | ||
| 78 | return static_cpu_has(X86_FEATURE_FXSR); | ||
| 79 | } | ||
| 80 | |||
| 70 | extern void __sanitize_i387_state(struct task_struct *); | 81 | extern void __sanitize_i387_state(struct task_struct *); |
| 71 | 82 | ||
| 72 | static inline void sanitize_i387_state(struct task_struct *tsk) | 83 | static inline void sanitize_i387_state(struct task_struct *tsk) |
| @@ -77,19 +88,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
| 77 | } | 88 | } |
| 78 | 89 | ||
| 79 | #ifdef CONFIG_X86_64 | 90 | #ifdef CONFIG_X86_64 |
| 80 | |||
| 81 | /* Ignore delayed exceptions from user space */ | ||
| 82 | static inline void tolerant_fwait(void) | ||
| 83 | { | ||
| 84 | asm volatile("1: fwait\n" | ||
| 85 | "2:\n" | ||
| 86 | _ASM_EXTABLE(1b, 2b)); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
| 90 | { | 92 | { |
| 91 | int err; | 93 | int err; |
| 92 | 94 | ||
| 95 | /* See comment in fxsave() below. */ | ||
| 93 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | 96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
| 94 | "2:\n" | 97 | "2:\n" |
| 95 | ".section .fixup,\"ax\"\n" | 98 | ".section .fixup,\"ax\"\n" |
| @@ -98,44 +101,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 98 | ".previous\n" | 101 | ".previous\n" |
| 99 | _ASM_EXTABLE(1b, 3b) | 102 | _ASM_EXTABLE(1b, 3b) |
| 100 | : [err] "=r" (err) | 103 | : [err] "=r" (err) |
| 101 | #if 0 /* See comment in fxsave() below. */ | 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
| 102 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
| 103 | #else | ||
| 104 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
| 105 | #endif | ||
| 106 | return err; | 105 | return err; |
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
| 110 | is pending. Clear the x87 state here by setting it to fixed | ||
| 111 | values. The kernel data segment can be sometimes 0 and sometimes | ||
| 112 | new user value. Both should be ok. | ||
| 113 | Use the PDA as safe address because it should be already in L1. */ | ||
| 114 | static inline void fpu_clear(struct fpu *fpu) | ||
| 115 | { | ||
| 116 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
| 117 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
| 118 | |||
| 119 | /* | ||
| 120 | * xsave header may indicate the init state of the FP. | ||
| 121 | */ | ||
| 122 | if (use_xsave() && | ||
| 123 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
| 124 | return; | ||
| 125 | |||
| 126 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
| 127 | asm volatile("fnclex"); | ||
| 128 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
| 129 | " emms\n" /* clear stack tags */ | ||
| 130 | " fildl %%gs:0", /* load to clear state */ | ||
| 131 | X86_FEATURE_FXSAVE_LEAK); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void clear_fpu_state(struct task_struct *tsk) | ||
| 135 | { | ||
| 136 | fpu_clear(&tsk->thread.fpu); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 108 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
| 140 | { | 109 | { |
| 141 | int err; | 110 | int err; |
| @@ -149,6 +118,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 149 | if (unlikely(err)) | 118 | if (unlikely(err)) |
| 150 | return -EFAULT; | 119 | return -EFAULT; |
| 151 | 120 | ||
| 121 | /* See comment in fxsave() below. */ | ||
| 152 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | 122 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
| 153 | "2:\n" | 123 | "2:\n" |
| 154 | ".section .fixup,\"ax\"\n" | 124 | ".section .fixup,\"ax\"\n" |
| @@ -157,11 +127,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 157 | ".previous\n" | 127 | ".previous\n" |
| 158 | _ASM_EXTABLE(1b, 3b) | 128 | _ASM_EXTABLE(1b, 3b) |
| 159 | : [err] "=r" (err), "=m" (*fx) | 129 | : [err] "=r" (err), "=m" (*fx) |
| 160 | #if 0 /* See comment in fxsave() below. */ | 130 | : [fx] "R" (fx), "0" (0)); |
| 161 | : [fx] "r" (fx), "0" (0)); | ||
| 162 | #else | ||
| 163 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
| 164 | #endif | ||
| 165 | if (unlikely(err) && | 131 | if (unlikely(err) && |
| 166 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 132 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) |
| 167 | err = -EFAULT; | 133 | err = -EFAULT; |
| @@ -175,56 +141,29 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
| 175 | uses any extended registers for addressing, a second REX prefix | 141 | uses any extended registers for addressing, a second REX prefix |
| 176 | will be generated (to the assembler, rex64 followed by semicolon | 142 | will be generated (to the assembler, rex64 followed by semicolon |
| 177 | is a separate instruction), and hence the 64-bitness is lost. */ | 143 | is a separate instruction), and hence the 64-bitness is lost. */ |
| 144 | |||
| 178 | #ifdef CONFIG_AS_FXSAVEQ | 145 | #ifdef CONFIG_AS_FXSAVEQ |
| 179 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | 146 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
| 180 | starting with gas 2.16. */ | 147 | starting with gas 2.16. */ |
| 181 | __asm__ __volatile__("fxsaveq %0" | 148 | __asm__ __volatile__("fxsaveq %0" |
| 182 | : "=m" (fpu->state->fxsave)); | 149 | : "=m" (fpu->state->fxsave)); |
| 183 | #elif 0 | 150 | #else |
| 184 | /* Using, as a workaround, the properly prefixed form below isn't | 151 | /* Using, as a workaround, the properly prefixed form below isn't |
| 185 | accepted by any binutils version so far released, complaining that | 152 | accepted by any binutils version so far released, complaining that |
| 186 | the same type of prefix is used twice if an extended register is | 153 | the same type of prefix is used twice if an extended register is |
| 187 | needed for addressing (fix submitted to mainline 2005-11-21). */ | 154 | needed for addressing (fix submitted to mainline 2005-11-21). |
| 188 | __asm__ __volatile__("rex64/fxsave %0" | 155 | asm volatile("rex64/fxsave %0" |
| 189 | : "=m" (fpu->state->fxsave)); | 156 | : "=m" (fpu->state->fxsave)); |
| 190 | #else | 157 | This, however, we can work around by forcing the compiler to select |
| 191 | /* This, however, we can work around by forcing the compiler to select | ||
| 192 | an addressing mode that doesn't require extended registers. */ | 158 | an addressing mode that doesn't require extended registers. */ |
| 193 | __asm__ __volatile__("rex64/fxsave (%1)" | 159 | asm volatile("rex64/fxsave (%[fx])" |
| 194 | : "=m" (fpu->state->fxsave) | 160 | : "=m" (fpu->state->fxsave) |
| 195 | : "cdaSDb" (&fpu->state->fxsave)); | 161 | : [fx] "R" (&fpu->state->fxsave)); |
| 196 | #endif | 162 | #endif |
| 197 | } | 163 | } |
| 198 | 164 | ||
| 199 | static inline void fpu_save_init(struct fpu *fpu) | ||
| 200 | { | ||
| 201 | if (use_xsave()) | ||
| 202 | fpu_xsave(fpu); | ||
| 203 | else | ||
| 204 | fpu_fxsave(fpu); | ||
| 205 | |||
| 206 | fpu_clear(fpu); | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
| 210 | { | ||
| 211 | fpu_save_init(&tsk->thread.fpu); | ||
| 212 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
| 213 | } | ||
| 214 | |||
| 215 | #else /* CONFIG_X86_32 */ | 165 | #else /* CONFIG_X86_32 */ |
| 216 | 166 | ||
| 217 | #ifdef CONFIG_MATH_EMULATION | ||
| 218 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | ||
| 219 | #else | ||
| 220 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | ||
| 221 | #endif | ||
| 222 | |||
| 223 | static inline void tolerant_fwait(void) | ||
| 224 | { | ||
| 225 | asm volatile("fnclex ; fwait"); | ||
| 226 | } | ||
| 227 | |||
| 228 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | 167 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
| 229 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 168 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
| 230 | { | 169 | { |
| @@ -241,6 +180,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 241 | return 0; | 180 | return 0; |
| 242 | } | 181 | } |
| 243 | 182 | ||
| 183 | static inline void fpu_fxsave(struct fpu *fpu) | ||
| 184 | { | ||
| 185 | asm volatile("fxsave %[fx]" | ||
| 186 | : [fx] "=m" (fpu->state->fxsave)); | ||
| 187 | } | ||
| 188 | |||
| 189 | #endif /* CONFIG_X86_64 */ | ||
| 190 | |||
| 244 | /* We need a safe address that is cheap to find and that is already | 191 | /* We need a safe address that is cheap to find and that is already |
| 245 | in L1 during context switch. The best choices are unfortunately | 192 | in L1 during context switch. The best choices are unfortunately |
| 246 | different for UP and SMP */ | 193 | different for UP and SMP */ |
| @@ -256,47 +203,33 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 256 | static inline void fpu_save_init(struct fpu *fpu) | 203 | static inline void fpu_save_init(struct fpu *fpu) |
| 257 | { | 204 | { |
| 258 | if (use_xsave()) { | 205 | if (use_xsave()) { |
| 259 | struct xsave_struct *xstate = &fpu->state->xsave; | ||
| 260 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | ||
| 261 | |||
| 262 | fpu_xsave(fpu); | 206 | fpu_xsave(fpu); |
| 263 | 207 | ||
| 264 | /* | 208 | /* |
| 265 | * xsave header may indicate the init state of the FP. | 209 | * xsave header may indicate the init state of the FP. |
| 266 | */ | 210 | */ |
| 267 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | 211 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
| 268 | goto end; | 212 | return; |
| 269 | 213 | } else if (use_fxsr()) { | |
| 270 | if (unlikely(fx->swd & X87_FSW_ES)) | 214 | fpu_fxsave(fpu); |
| 271 | asm volatile("fnclex"); | 215 | } else { |
| 272 | 216 | asm volatile("fsave %[fx]; fwait" | |
| 273 | /* | 217 | : [fx] "=m" (fpu->state->fsave)); |
| 274 | * we can do a simple return here or be paranoid :) | 218 | return; |
| 275 | */ | ||
| 276 | goto clear_state; | ||
| 277 | } | 219 | } |
| 278 | 220 | ||
| 279 | /* Use more nops than strictly needed in case the compiler | 221 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
| 280 | varies code */ | 222 | asm volatile("fnclex"); |
| 281 | alternative_input( | 223 | |
| 282 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
| 283 | "fxsave %[fx]\n" | ||
| 284 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
| 285 | X86_FEATURE_FXSR, | ||
| 286 | [fx] "m" (fpu->state->fxsave), | ||
| 287 | [fsw] "m" (fpu->state->fxsave.swd) : "memory"); | ||
| 288 | clear_state: | ||
| 289 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 224 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
| 290 | is pending. Clear the x87 state here by setting it to fixed | 225 | is pending. Clear the x87 state here by setting it to fixed |
| 291 | values. safe_address is a random variable that should be in L1 */ | 226 | values. safe_address is a random variable that should be in L1 */ |
| 292 | alternative_input( | 227 | alternative_input( |
| 293 | GENERIC_NOP8 GENERIC_NOP2, | 228 | ASM_NOP8 ASM_NOP2, |
| 294 | "emms\n\t" /* clear stack tags */ | 229 | "emms\n\t" /* clear stack tags */ |
| 295 | "fildl %[addr]", /* set F?P to defined value */ | 230 | "fildl %P[addr]", /* set F?P to defined value */ |
| 296 | X86_FEATURE_FXSAVE_LEAK, | 231 | X86_FEATURE_FXSAVE_LEAK, |
| 297 | [addr] "m" (safe_address)); | 232 | [addr] "m" (safe_address)); |
| 298 | end: | ||
| 299 | ; | ||
| 300 | } | 233 | } |
| 301 | 234 | ||
| 302 | static inline void __save_init_fpu(struct task_struct *tsk) | 235 | static inline void __save_init_fpu(struct task_struct *tsk) |
| @@ -305,9 +238,6 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
| 305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 238 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 306 | } | 239 | } |
| 307 | 240 | ||
| 308 | |||
| 309 | #endif /* CONFIG_X86_64 */ | ||
| 310 | |||
| 311 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | 241 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
| 312 | { | 242 | { |
| 313 | return fxrstor_checking(&fpu->state->fxsave); | 243 | return fxrstor_checking(&fpu->state->fxsave); |
| @@ -344,7 +274,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk) | |||
| 344 | static inline void __clear_fpu(struct task_struct *tsk) | 274 | static inline void __clear_fpu(struct task_struct *tsk) |
| 345 | { | 275 | { |
| 346 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 276 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
| 347 | tolerant_fwait(); | 277 | /* Ignore delayed exceptions from user space */ |
| 278 | asm volatile("1: fwait\n" | ||
| 279 | "2:\n" | ||
| 280 | _ASM_EXTABLE(1b, 2b)); | ||
| 348 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 281 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 349 | stts(); | 282 | stts(); |
| 350 | } | 283 | } |
| @@ -405,19 +338,6 @@ static inline void irq_ts_restore(int TS_state) | |||
| 405 | stts(); | 338 | stts(); |
| 406 | } | 339 | } |
| 407 | 340 | ||
| 408 | #ifdef CONFIG_X86_64 | ||
| 409 | |||
| 410 | static inline void save_init_fpu(struct task_struct *tsk) | ||
| 411 | { | ||
| 412 | __save_init_fpu(tsk); | ||
| 413 | stts(); | ||
| 414 | } | ||
| 415 | |||
| 416 | #define unlazy_fpu __unlazy_fpu | ||
| 417 | #define clear_fpu __clear_fpu | ||
| 418 | |||
| 419 | #else /* CONFIG_X86_32 */ | ||
| 420 | |||
| 421 | /* | 341 | /* |
| 422 | * These disable preemption on their own and are safe | 342 | * These disable preemption on their own and are safe |
| 423 | */ | 343 | */ |
| @@ -443,8 +363,6 @@ static inline void clear_fpu(struct task_struct *tsk) | |||
| 443 | preempt_enable(); | 363 | preempt_enable(); |
| 444 | } | 364 | } |
| 445 | 365 | ||
| 446 | #endif /* CONFIG_X86_64 */ | ||
| 447 | |||
| 448 | /* | 366 | /* |
| 449 | * i387 state interaction | 367 | * i387 state interaction |
| 450 | */ | 368 | */ |
| @@ -508,7 +426,4 @@ extern void fpu_finit(struct fpu *fpu); | |||
| 508 | 426 | ||
| 509 | #endif /* __ASSEMBLY__ */ | 427 | #endif /* __ASSEMBLY__ */ |
| 510 | 428 | ||
| 511 | #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5 | ||
| 512 | #define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5 | ||
| 513 | |||
| 514 | #endif /* _ASM_X86_I387_H */ | 429 | #endif /* _ASM_X86_I387_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 69e80c2ec6c2..a668d66301fe 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -604,7 +604,7 @@ extern unsigned long mmu_cr4_features; | |||
| 604 | 604 | ||
| 605 | static inline void set_in_cr4(unsigned long mask) | 605 | static inline void set_in_cr4(unsigned long mask) |
| 606 | { | 606 | { |
| 607 | unsigned cr4; | 607 | unsigned long cr4; |
| 608 | 608 | ||
| 609 | mmu_cr4_features |= mask; | 609 | mmu_cr4_features |= mask; |
| 610 | cr4 = read_cr4(); | 610 | cr4 = read_cr4(); |
| @@ -614,7 +614,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
| 614 | 614 | ||
| 615 | static inline void clear_in_cr4(unsigned long mask) | 615 | static inline void clear_in_cr4(unsigned long mask) |
| 616 | { | 616 | { |
| 617 | unsigned cr4; | 617 | unsigned long cr4; |
| 618 | 618 | ||
| 619 | mmu_cr4_features &= ~mask; | 619 | mmu_cr4_features &= ~mask; |
| 620 | cr4 = read_cr4(); | 620 | cr4 = read_cr4(); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 92af7b420ab2..4b68bda30938 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -1269,13 +1269,6 @@ void __cpuinit cpu_init(void) | |||
| 1269 | clear_all_debug_regs(); | 1269 | clear_all_debug_regs(); |
| 1270 | dbg_restore_debug_regs(); | 1270 | dbg_restore_debug_regs(); |
| 1271 | 1271 | ||
| 1272 | /* | ||
| 1273 | * Force FPU initialization: | ||
| 1274 | */ | ||
| 1275 | current_thread_info()->status = 0; | ||
| 1276 | clear_used_math(); | ||
| 1277 | mxcsr_feature_mask_init(); | ||
| 1278 | |||
| 1279 | fpu_init(); | 1272 | fpu_init(); |
| 1280 | xsave_init(); | 1273 | xsave_init(); |
| 1281 | } | 1274 | } |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index a46cb3522c0c..58bb239a2fd7 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
| @@ -68,19 +68,22 @@ static void __cpuinit init_thread_xstate(void) | |||
| 68 | */ | 68 | */ |
| 69 | 69 | ||
| 70 | if (!HAVE_HWFP) { | 70 | if (!HAVE_HWFP) { |
| 71 | /* | ||
| 72 | * Disable xsave as we do not support it if i387 | ||
| 73 | * emulation is enabled. | ||
| 74 | */ | ||
| 75 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | ||
| 76 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | ||
| 71 | xstate_size = sizeof(struct i387_soft_struct); | 77 | xstate_size = sizeof(struct i387_soft_struct); |
| 72 | return; | 78 | return; |
| 73 | } | 79 | } |
| 74 | 80 | ||
| 75 | if (cpu_has_fxsr) | 81 | if (cpu_has_fxsr) |
| 76 | xstate_size = sizeof(struct i387_fxsave_struct); | 82 | xstate_size = sizeof(struct i387_fxsave_struct); |
| 77 | #ifdef CONFIG_X86_32 | ||
| 78 | else | 83 | else |
| 79 | xstate_size = sizeof(struct i387_fsave_struct); | 84 | xstate_size = sizeof(struct i387_fsave_struct); |
| 80 | #endif | ||
| 81 | } | 85 | } |
| 82 | 86 | ||
| 83 | #ifdef CONFIG_X86_64 | ||
| 84 | /* | 87 | /* |
| 85 | * Called at bootup to set up the initial FPU state that is later cloned | 88 | * Called at bootup to set up the initial FPU state that is later cloned |
| 86 | * into all processes. | 89 | * into all processes. |
| @@ -88,12 +91,21 @@ static void __cpuinit init_thread_xstate(void) | |||
| 88 | 91 | ||
| 89 | void __cpuinit fpu_init(void) | 92 | void __cpuinit fpu_init(void) |
| 90 | { | 93 | { |
| 91 | unsigned long oldcr0 = read_cr0(); | 94 | unsigned long cr0; |
| 92 | 95 | unsigned long cr4_mask = 0; | |
| 93 | set_in_cr4(X86_CR4_OSFXSR); | ||
| 94 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
| 95 | 96 | ||
| 96 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 97 | if (cpu_has_fxsr) |
| 98 | cr4_mask |= X86_CR4_OSFXSR; | ||
| 99 | if (cpu_has_xmm) | ||
| 100 | cr4_mask |= X86_CR4_OSXMMEXCPT; | ||
| 101 | if (cr4_mask) | ||
| 102 | set_in_cr4(cr4_mask); | ||
| 103 | |||
| 104 | cr0 = read_cr0(); | ||
| 105 | cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ | ||
| 106 | if (!HAVE_HWFP) | ||
| 107 | cr0 |= X86_CR0_EM; | ||
| 108 | write_cr0(cr0); | ||
| 97 | 109 | ||
| 98 | if (!smp_processor_id()) | 110 | if (!smp_processor_id()) |
| 99 | init_thread_xstate(); | 111 | init_thread_xstate(); |
| @@ -104,24 +116,12 @@ void __cpuinit fpu_init(void) | |||
| 104 | clear_used_math(); | 116 | clear_used_math(); |
| 105 | } | 117 | } |
| 106 | 118 | ||
| 107 | #else /* CONFIG_X86_64 */ | ||
| 108 | |||
| 109 | void __cpuinit fpu_init(void) | ||
| 110 | { | ||
| 111 | if (!smp_processor_id()) | ||
| 112 | init_thread_xstate(); | ||
| 113 | } | ||
| 114 | |||
| 115 | #endif /* CONFIG_X86_32 */ | ||
| 116 | |||
| 117 | void fpu_finit(struct fpu *fpu) | 119 | void fpu_finit(struct fpu *fpu) |
| 118 | { | 120 | { |
| 119 | #ifdef CONFIG_X86_32 | ||
| 120 | if (!HAVE_HWFP) { | 121 | if (!HAVE_HWFP) { |
| 121 | finit_soft_fpu(&fpu->state->soft); | 122 | finit_soft_fpu(&fpu->state->soft); |
| 122 | return; | 123 | return; |
| 123 | } | 124 | } |
| 124 | #endif | ||
| 125 | 125 | ||
| 126 | if (cpu_has_fxsr) { | 126 | if (cpu_has_fxsr) { |
| 127 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | 127 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; |
| @@ -386,19 +386,17 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) | |||
| 386 | #ifdef CONFIG_X86_64 | 386 | #ifdef CONFIG_X86_64 |
| 387 | env->fip = fxsave->rip; | 387 | env->fip = fxsave->rip; |
| 388 | env->foo = fxsave->rdp; | 388 | env->foo = fxsave->rdp; |
| 389 | /* | ||
| 390 | * should be actually ds/cs at fpu exception time, but | ||
| 391 | * that information is not available in 64bit mode. | ||
| 392 | */ | ||
| 393 | env->fcs = task_pt_regs(tsk)->cs; | ||
| 389 | if (tsk == current) { | 394 | if (tsk == current) { |
| 390 | /* | 395 | savesegment(ds, env->fos); |
| 391 | * should be actually ds/cs at fpu exception time, but | ||
| 392 | * that information is not available in 64bit mode. | ||
| 393 | */ | ||
| 394 | asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos)); | ||
| 395 | asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs)); | ||
| 396 | } else { | 396 | } else { |
| 397 | struct pt_regs *regs = task_pt_regs(tsk); | 397 | env->fos = tsk->thread.ds; |
| 398 | |||
| 399 | env->fos = 0xffff0000 | tsk->thread.ds; | ||
| 400 | env->fcs = regs->cs; | ||
| 401 | } | 398 | } |
| 399 | env->fos |= 0xffff0000; | ||
| 402 | #else | 400 | #else |
| 403 | env->fip = fxsave->fip; | 401 | env->fip = fxsave->fip; |
| 404 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); | 402 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3d9ea531ddd1..b3d7a3a04f38 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -424,7 +424,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 424 | load_TLS(next, cpu); | 424 | load_TLS(next, cpu); |
| 425 | 425 | ||
| 426 | /* Must be after DS reload */ | 426 | /* Must be after DS reload */ |
| 427 | unlazy_fpu(prev_p); | 427 | __unlazy_fpu(prev_p); |
| 428 | 428 | ||
| 429 | /* Make sure cpu is ready for new context */ | 429 | /* Make sure cpu is ready for new context */ |
| 430 | if (preload_fpu) | 430 | if (preload_fpu) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 60788dee0f8a..d43968503dd2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -776,21 +776,10 @@ asmlinkage void math_state_restore(void) | |||
| 776 | } | 776 | } |
| 777 | EXPORT_SYMBOL_GPL(math_state_restore); | 777 | EXPORT_SYMBOL_GPL(math_state_restore); |
| 778 | 778 | ||
| 779 | #ifndef CONFIG_MATH_EMULATION | ||
| 780 | void math_emulate(struct math_emu_info *info) | ||
| 781 | { | ||
| 782 | printk(KERN_EMERG | ||
| 783 | "math-emulation not enabled and no coprocessor found.\n"); | ||
| 784 | printk(KERN_EMERG "killing %s.\n", current->comm); | ||
| 785 | force_sig(SIGFPE, current); | ||
| 786 | schedule(); | ||
| 787 | } | ||
| 788 | #endif /* CONFIG_MATH_EMULATION */ | ||
| 789 | |||
| 790 | dotraplinkage void __kprobes | 779 | dotraplinkage void __kprobes |
| 791 | do_device_not_available(struct pt_regs *regs, long error_code) | 780 | do_device_not_available(struct pt_regs *regs, long error_code) |
| 792 | { | 781 | { |
| 793 | #ifdef CONFIG_X86_32 | 782 | #ifdef CONFIG_MATH_EMULATION |
| 794 | if (read_cr0() & X86_CR0_EM) { | 783 | if (read_cr0() & X86_CR0_EM) { |
| 795 | struct math_emu_info info = { }; | 784 | struct math_emu_info info = { }; |
| 796 | 785 | ||
| @@ -798,12 +787,12 @@ do_device_not_available(struct pt_regs *regs, long error_code) | |||
| 798 | 787 | ||
| 799 | info.regs = regs; | 788 | info.regs = regs; |
| 800 | math_emulate(&info); | 789 | math_emulate(&info); |
| 801 | } else { | 790 | return; |
| 802 | math_state_restore(); /* interrupts still off */ | ||
| 803 | conditional_sti(regs); | ||
| 804 | } | 791 | } |
| 805 | #else | 792 | #endif |
| 806 | math_state_restore(); | 793 | math_state_restore(); /* interrupts still off */ |
| 794 | #ifdef CONFIG_X86_32 | ||
| 795 | conditional_sti(regs); | ||
| 807 | #endif | 796 | #endif |
| 808 | } | 797 | } |
| 809 | 798 | ||
| @@ -881,18 +870,6 @@ void __init trap_init(void) | |||
| 881 | #endif | 870 | #endif |
| 882 | 871 | ||
| 883 | #ifdef CONFIG_X86_32 | 872 | #ifdef CONFIG_X86_32 |
| 884 | if (cpu_has_fxsr) { | ||
| 885 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
| 886 | set_in_cr4(X86_CR4_OSFXSR); | ||
| 887 | printk("done.\n"); | ||
| 888 | } | ||
| 889 | if (cpu_has_xmm) { | ||
| 890 | printk(KERN_INFO | ||
| 891 | "Enabling unmasked SIMD FPU exception support... "); | ||
| 892 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
| 893 | printk("done.\n"); | ||
| 894 | } | ||
| 895 | |||
| 896 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 873 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
| 897 | set_bit(SYSCALL_VECTOR, used_vectors); | 874 | set_bit(SYSCALL_VECTOR, used_vectors); |
| 898 | #endif | 875 | #endif |
