diff options
Diffstat (limited to 'arch/x86/kernel/xsave.c')
-rw-r--r-- | arch/x86/kernel/xsave.c | 517 |
1 files changed, 336 insertions, 181 deletions
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 3d3e20709119..ada87a329edc 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -10,9 +10,7 @@ | |||
10 | #include <linux/compat.h> | 10 | #include <linux/compat.h> |
11 | #include <asm/i387.h> | 11 | #include <asm/i387.h> |
12 | #include <asm/fpu-internal.h> | 12 | #include <asm/fpu-internal.h> |
13 | #ifdef CONFIG_IA32_EMULATION | 13 | #include <asm/sigframe.h> |
14 | #include <asm/sigcontext32.h> | ||
15 | #endif | ||
16 | #include <asm/xcr.h> | 14 | #include <asm/xcr.h> |
17 | 15 | ||
18 | /* | 16 | /* |
@@ -23,13 +21,9 @@ u64 pcntxt_mask; | |||
23 | /* | 21 | /* |
24 | * Represents init state for the supported extended state. | 22 | * Represents init state for the supported extended state. |
25 | */ | 23 | */ |
26 | static struct xsave_struct *init_xstate_buf; | 24 | struct xsave_struct *init_xstate_buf; |
27 | |||
28 | struct _fpx_sw_bytes fx_sw_reserved; | ||
29 | #ifdef CONFIG_IA32_EMULATION | ||
30 | struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
31 | #endif | ||
32 | 25 | ||
26 | static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; | ||
33 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; | 27 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; |
34 | 28 | ||
35 | /* | 29 | /* |
@@ -44,9 +38,9 @@ static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; | |||
44 | */ | 38 | */ |
45 | void __sanitize_i387_state(struct task_struct *tsk) | 39 | void __sanitize_i387_state(struct task_struct *tsk) |
46 | { | 40 | { |
47 | u64 xstate_bv; | ||
48 | int feature_bit = 0x2; | ||
49 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; | 41 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; |
42 | int feature_bit = 0x2; | ||
43 | u64 xstate_bv; | ||
50 | 44 | ||
51 | if (!fx) | 45 | if (!fx) |
52 | return; | 46 | return; |
@@ -104,213 +98,326 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
104 | * Check for the presence of extended state information in the | 98 | * Check for the presence of extended state information in the |
105 | * user fpstate pointer in the sigcontext. | 99 | * user fpstate pointer in the sigcontext. |
106 | */ | 100 | */ |
107 | int check_for_xstate(struct i387_fxsave_struct __user *buf, | 101 | static inline int check_for_xstate(struct i387_fxsave_struct __user *buf, |
108 | void __user *fpstate, | 102 | void __user *fpstate, |
109 | struct _fpx_sw_bytes *fx_sw_user) | 103 | struct _fpx_sw_bytes *fx_sw) |
110 | { | 104 | { |
111 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + | 105 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + |
112 | sizeof(struct xsave_hdr_struct); | 106 | sizeof(struct xsave_hdr_struct); |
113 | unsigned int magic2; | 107 | unsigned int magic2; |
114 | int err; | ||
115 | 108 | ||
116 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], | 109 | if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) |
117 | sizeof(struct _fpx_sw_bytes)); | 110 | return -1; |
118 | if (err) | ||
119 | return -EFAULT; | ||
120 | 111 | ||
121 | /* | 112 | /* Check for the first magic field and other error scenarios. */ |
122 | * First Magic check failed. | 113 | if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || |
123 | */ | 114 | fx_sw->xstate_size < min_xstate_size || |
124 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) | 115 | fx_sw->xstate_size > xstate_size || |
125 | return -EINVAL; | 116 | fx_sw->xstate_size > fx_sw->extended_size) |
117 | return -1; | ||
126 | 118 | ||
127 | /* | 119 | /* |
128 | * Check for error scenarios. | ||
129 | */ | ||
130 | if (fx_sw_user->xstate_size < min_xstate_size || | ||
131 | fx_sw_user->xstate_size > xstate_size || | ||
132 | fx_sw_user->xstate_size > fx_sw_user->extended_size) | ||
133 | return -EINVAL; | ||
134 | |||
135 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + | ||
136 | fx_sw_user->extended_size - | ||
137 | FP_XSTATE_MAGIC2_SIZE)); | ||
138 | if (err) | ||
139 | return err; | ||
140 | /* | ||
141 | * Check for the presence of second magic word at the end of memory | 120 | * Check for the presence of second magic word at the end of memory |
142 | * layout. This detects the case where the user just copied the legacy | 121 | * layout. This detects the case where the user just copied the legacy |
143 | * fpstate layout with out copying the extended state information | 122 | * fpstate layout with out copying the extended state information |
144 | * in the memory layout. | 123 | * in the memory layout. |
145 | */ | 124 | */ |
146 | if (magic2 != FP_XSTATE_MAGIC2) | 125 | if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) |
147 | return -EFAULT; | 126 | || magic2 != FP_XSTATE_MAGIC2) |
127 | return -1; | ||
148 | 128 | ||
149 | return 0; | 129 | return 0; |
150 | } | 130 | } |
151 | 131 | ||
152 | #ifdef CONFIG_X86_64 | ||
153 | /* | 132 | /* |
154 | * Signal frame handlers. | 133 | * Signal frame handlers. |
155 | */ | 134 | */ |
156 | 135 | static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) | |
157 | int save_i387_xstate(void __user *buf) | ||
158 | { | 136 | { |
159 | struct task_struct *tsk = current; | 137 | if (use_fxsr()) { |
160 | int err = 0; | 138 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; |
161 | 139 | struct user_i387_ia32_struct env; | |
162 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) | 140 | struct _fpstate_ia32 __user *fp = buf; |
163 | return -EACCES; | ||
164 | 141 | ||
165 | BUG_ON(sig_xstate_size < xstate_size); | 142 | convert_from_fxsr(&env, tsk); |
166 | 143 | ||
167 | if ((unsigned long)buf % 64) | 144 | if (__copy_to_user(buf, &env, sizeof(env)) || |
168 | pr_err("%s: bad fpstate %p\n", __func__, buf); | 145 | __put_user(xsave->i387.swd, &fp->status) || |
169 | 146 | __put_user(X86_FXSR_MAGIC, &fp->magic)) | |
170 | if (!used_math()) | 147 | return -1; |
171 | return 0; | ||
172 | |||
173 | if (user_has_fpu()) { | ||
174 | if (use_xsave()) | ||
175 | err = xsave_user(buf); | ||
176 | else | ||
177 | err = fxsave_user(buf); | ||
178 | |||
179 | if (err) | ||
180 | return err; | ||
181 | user_fpu_end(); | ||
182 | } else { | 148 | } else { |
183 | sanitize_i387_state(tsk); | 149 | struct i387_fsave_struct __user *fp = buf; |
184 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 150 | u32 swd; |
185 | xstate_size)) | 151 | if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) |
186 | return -1; | 152 | return -1; |
187 | } | 153 | } |
188 | 154 | ||
189 | clear_used_math(); /* trigger finit */ | 155 | return 0; |
156 | } | ||
190 | 157 | ||
191 | if (use_xsave()) { | 158 | static inline int save_xstate_epilog(void __user *buf, int ia32_frame) |
192 | struct _fpstate __user *fx = buf; | 159 | { |
193 | struct _xstate __user *x = buf; | 160 | struct xsave_struct __user *x = buf; |
194 | u64 xstate_bv; | 161 | struct _fpx_sw_bytes *sw_bytes; |
162 | u32 xstate_bv; | ||
163 | int err; | ||
195 | 164 | ||
196 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, | 165 | /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ |
197 | sizeof(struct _fpx_sw_bytes)); | 166 | sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; |
167 | err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); | ||
198 | 168 | ||
199 | err |= __put_user(FP_XSTATE_MAGIC2, | 169 | if (!use_xsave()) |
200 | (__u32 __user *) (buf + sig_xstate_size | 170 | return err; |
201 | - FP_XSTATE_MAGIC2_SIZE)); | ||
202 | 171 | ||
203 | /* | 172 | err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); |
204 | * Read the xstate_bv which we copied (directly from the cpu or | ||
205 | * from the state in task struct) to the user buffers and | ||
206 | * set the FP/SSE bits. | ||
207 | */ | ||
208 | err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv); | ||
209 | 173 | ||
210 | /* | 174 | /* |
211 | * For legacy compatible, we always set FP/SSE bits in the bit | 175 | * Read the xstate_bv which we copied (directly from the cpu or |
212 | * vector while saving the state to the user context. This will | 176 | * from the state in task struct) to the user buffers. |
213 | * enable us capturing any changes(during sigreturn) to | 177 | */ |
214 | * the FP/SSE bits by the legacy applications which don't touch | 178 | err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); |
215 | * xstate_bv in the xsave header. | ||
216 | * | ||
217 | * xsave aware apps can change the xstate_bv in the xsave | ||
218 | * header as well as change any contents in the memory layout. | ||
219 | * xrestore as part of sigreturn will capture all the changes. | ||
220 | */ | ||
221 | xstate_bv |= XSTATE_FPSSE; | ||
222 | 179 | ||
223 | err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv); | 180 | /* |
181 | * For legacy compatible, we always set FP/SSE bits in the bit | ||
182 | * vector while saving the state to the user context. This will | ||
183 | * enable us capturing any changes(during sigreturn) to | ||
184 | * the FP/SSE bits by the legacy applications which don't touch | ||
185 | * xstate_bv in the xsave header. | ||
186 | * | ||
187 | * xsave aware apps can change the xstate_bv in the xsave | ||
188 | * header as well as change any contents in the memory layout. | ||
189 | * xrestore as part of sigreturn will capture all the changes. | ||
190 | */ | ||
191 | xstate_bv |= XSTATE_FPSSE; | ||
224 | 192 | ||
225 | if (err) | 193 | err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); |
226 | return err; | ||
227 | } | ||
228 | 194 | ||
229 | return 1; | 195 | return err; |
196 | } | ||
197 | |||
198 | static inline int save_user_xstate(struct xsave_struct __user *buf) | ||
199 | { | ||
200 | int err; | ||
201 | |||
202 | if (use_xsave()) | ||
203 | err = xsave_user(buf); | ||
204 | else if (use_fxsr()) | ||
205 | err = fxsave_user((struct i387_fxsave_struct __user *) buf); | ||
206 | else | ||
207 | err = fsave_user((struct i387_fsave_struct __user *) buf); | ||
208 | |||
209 | if (unlikely(err) && __clear_user(buf, xstate_size)) | ||
210 | err = -EFAULT; | ||
211 | return err; | ||
230 | } | 212 | } |
231 | 213 | ||
232 | /* | 214 | /* |
233 | * Restore the extended state if present. Otherwise, restore the FP/SSE | 215 | * Save the fpu, extended register state to the user signal frame. |
234 | * state. | 216 | * |
217 | * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save | ||
218 | * state is copied. | ||
219 | * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. | ||
220 | * | ||
221 | * buf == buf_fx for 64-bit frames and 32-bit fsave frame. | ||
222 | * buf != buf_fx for 32-bit frames with fxstate. | ||
223 | * | ||
224 | * If the fpu, extended register state is live, save the state directly | ||
225 | * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise, | ||
226 | * copy the thread's fpu state to the user frame starting at 'buf_fx'. | ||
227 | * | ||
228 | * If this is a 32-bit frame with fxstate, put a fsave header before | ||
229 | * the aligned state at 'buf_fx'. | ||
230 | * | ||
231 | * For [f]xsave state, update the SW reserved fields in the [f]xsave frame | ||
232 | * indicating the absence/presence of the extended state to the user. | ||
235 | */ | 233 | */ |
236 | static int restore_user_xstate(void __user *buf) | 234 | int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) |
237 | { | 235 | { |
238 | struct _fpx_sw_bytes fx_sw_user; | 236 | struct xsave_struct *xsave = ¤t->thread.fpu.state->xsave; |
239 | u64 mask; | 237 | struct task_struct *tsk = current; |
240 | int err; | 238 | int ia32_fxstate = (buf != buf_fx); |
241 | 239 | ||
242 | if (((unsigned long)buf % 64) || | 240 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || |
243 | check_for_xstate(buf, buf, &fx_sw_user)) | 241 | config_enabled(CONFIG_IA32_EMULATION)); |
244 | goto fx_only; | ||
245 | 242 | ||
246 | mask = fx_sw_user.xstate_bv; | 243 | if (!access_ok(VERIFY_WRITE, buf, size)) |
244 | return -EACCES; | ||
247 | 245 | ||
248 | /* | 246 | if (!HAVE_HWFP) |
249 | * restore the state passed by the user. | 247 | return fpregs_soft_get(current, NULL, 0, |
250 | */ | 248 | sizeof(struct user_i387_ia32_struct), NULL, |
251 | err = xrestore_user(buf, mask); | 249 | (struct _fpstate_ia32 __user *) buf) ? -1 : 1; |
252 | if (err) | ||
253 | return err; | ||
254 | 250 | ||
255 | /* | 251 | if (user_has_fpu()) { |
256 | * init the state skipped by the user. | 252 | /* Save the live register state to the user directly. */ |
257 | */ | 253 | if (save_user_xstate(buf_fx)) |
258 | mask = pcntxt_mask & ~mask; | 254 | return -1; |
259 | if (unlikely(mask)) | 255 | /* Update the thread's fxstate to save the fsave header. */ |
260 | xrstor_state(init_xstate_buf, mask); | 256 | if (ia32_fxstate) |
257 | fpu_fxsave(&tsk->thread.fpu); | ||
258 | } else { | ||
259 | sanitize_i387_state(tsk); | ||
260 | if (__copy_to_user(buf_fx, xsave, xstate_size)) | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | /* Save the fsave header for the 32-bit frames. */ | ||
265 | if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) | ||
266 | return -1; | ||
267 | |||
268 | if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) | ||
269 | return -1; | ||
270 | |||
271 | drop_init_fpu(tsk); /* trigger finit */ | ||
261 | 272 | ||
262 | return 0; | 273 | return 0; |
274 | } | ||
263 | 275 | ||
264 | fx_only: | 276 | static inline void |
265 | /* | 277 | sanitize_restored_xstate(struct task_struct *tsk, |
266 | * couldn't find the extended state information in the | 278 | struct user_i387_ia32_struct *ia32_env, |
267 | * memory layout. Restore just the FP/SSE and init all | 279 | u64 xstate_bv, int fx_only) |
268 | * the other extended state. | 280 | { |
269 | */ | 281 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; |
270 | xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); | 282 | struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr; |
271 | return fxrstor_checking((__force struct i387_fxsave_struct *)buf); | 283 | |
284 | if (use_xsave()) { | ||
285 | /* These bits must be zero. */ | ||
286 | xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; | ||
287 | |||
288 | /* | ||
289 | * Init the state that is not present in the memory | ||
290 | * layout and not enabled by the OS. | ||
291 | */ | ||
292 | if (fx_only) | ||
293 | xsave_hdr->xstate_bv = XSTATE_FPSSE; | ||
294 | else | ||
295 | xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv); | ||
296 | } | ||
297 | |||
298 | if (use_fxsr()) { | ||
299 | /* | ||
300 | * mscsr reserved bits must be masked to zero for security | ||
301 | * reasons. | ||
302 | */ | ||
303 | xsave->i387.mxcsr &= mxcsr_feature_mask; | ||
304 | |||
305 | convert_to_fxsr(tsk, ia32_env); | ||
306 | } | ||
272 | } | 307 | } |
273 | 308 | ||
274 | /* | 309 | /* |
275 | * This restores directly out of user space. Exceptions are handled. | 310 | * Restore the extended state if present. Otherwise, restore the FP/SSE state. |
276 | */ | 311 | */ |
277 | int restore_i387_xstate(void __user *buf) | 312 | static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) |
278 | { | 313 | { |
314 | if (use_xsave()) { | ||
315 | if ((unsigned long)buf % 64 || fx_only) { | ||
316 | u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; | ||
317 | xrstor_state(init_xstate_buf, init_bv); | ||
318 | return fxrstor_user(buf); | ||
319 | } else { | ||
320 | u64 init_bv = pcntxt_mask & ~xbv; | ||
321 | if (unlikely(init_bv)) | ||
322 | xrstor_state(init_xstate_buf, init_bv); | ||
323 | return xrestore_user(buf, xbv); | ||
324 | } | ||
325 | } else if (use_fxsr()) { | ||
326 | return fxrstor_user(buf); | ||
327 | } else | ||
328 | return frstor_user(buf); | ||
329 | } | ||
330 | |||
331 | int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | ||
332 | { | ||
333 | int ia32_fxstate = (buf != buf_fx); | ||
279 | struct task_struct *tsk = current; | 334 | struct task_struct *tsk = current; |
280 | int err = 0; | 335 | int state_size = xstate_size; |
336 | u64 xstate_bv = 0; | ||
337 | int fx_only = 0; | ||
338 | |||
339 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || | ||
340 | config_enabled(CONFIG_IA32_EMULATION)); | ||
281 | 341 | ||
282 | if (!buf) { | 342 | if (!buf) { |
283 | if (used_math()) | 343 | drop_init_fpu(tsk); |
284 | goto clear; | ||
285 | return 0; | 344 | return 0; |
286 | } else | 345 | } |
287 | if (!access_ok(VERIFY_READ, buf, sig_xstate_size)) | ||
288 | return -EACCES; | ||
289 | 346 | ||
290 | if (!used_math()) { | 347 | if (!access_ok(VERIFY_READ, buf, size)) |
291 | err = init_fpu(tsk); | 348 | return -EACCES; |
292 | if (err) | 349 | |
293 | return err; | 350 | if (!used_math() && init_fpu(tsk)) |
351 | return -1; | ||
352 | |||
353 | if (!HAVE_HWFP) { | ||
354 | return fpregs_soft_set(current, NULL, | ||
355 | 0, sizeof(struct user_i387_ia32_struct), | ||
356 | NULL, buf) != 0; | ||
294 | } | 357 | } |
295 | 358 | ||
296 | user_fpu_begin(); | 359 | if (use_xsave()) { |
297 | if (use_xsave()) | 360 | struct _fpx_sw_bytes fx_sw_user; |
298 | err = restore_user_xstate(buf); | 361 | if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { |
299 | else | 362 | /* |
300 | err = fxrstor_checking((__force struct i387_fxsave_struct *) | 363 | * Couldn't find the extended state information in the |
301 | buf); | 364 | * memory layout. Restore just the FP/SSE and init all |
302 | if (unlikely(err)) { | 365 | * the other extended state. |
366 | */ | ||
367 | state_size = sizeof(struct i387_fxsave_struct); | ||
368 | fx_only = 1; | ||
369 | } else { | ||
370 | state_size = fx_sw_user.xstate_size; | ||
371 | xstate_bv = fx_sw_user.xstate_bv; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | if (ia32_fxstate) { | ||
376 | /* | ||
377 | * For 32-bit frames with fxstate, copy the user state to the | ||
378 | * thread's fpu state, reconstruct fxstate from the fsave | ||
379 | * header. Sanitize the copied state etc. | ||
380 | */ | ||
381 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; | ||
382 | struct user_i387_ia32_struct env; | ||
383 | int err = 0; | ||
384 | |||
385 | /* | ||
386 | * Drop the current fpu which clears used_math(). This ensures | ||
387 | * that any context-switch during the copy of the new state, | ||
388 | * avoids the intermediate state from getting restored/saved. | ||
389 | * Thus avoiding the new restored state from getting corrupted. | ||
390 | * We will be ready to restore/save the state only after | ||
391 | * set_used_math() is again set. | ||
392 | */ | ||
393 | drop_fpu(tsk); | ||
394 | |||
395 | if (__copy_from_user(xsave, buf_fx, state_size) || | ||
396 | __copy_from_user(&env, buf, sizeof(env))) { | ||
397 | err = -1; | ||
398 | } else { | ||
399 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); | ||
400 | set_used_math(); | ||
401 | } | ||
402 | |||
403 | if (use_eager_fpu()) | ||
404 | math_state_restore(); | ||
405 | |||
406 | return err; | ||
407 | } else { | ||
303 | /* | 408 | /* |
304 | * Encountered an error while doing the restore from the | 409 | * For 64-bit frames and 32-bit fsave frames, restore the user |
305 | * user buffer, clear the fpu state. | 410 | * state to the registers directly (with exceptions handled). |
306 | */ | 411 | */ |
307 | clear: | 412 | user_fpu_begin(); |
308 | clear_fpu(tsk); | 413 | if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { |
309 | clear_used_math(); | 414 | drop_init_fpu(tsk); |
415 | return -1; | ||
416 | } | ||
310 | } | 417 | } |
311 | return err; | 418 | |
419 | return 0; | ||
312 | } | 420 | } |
313 | #endif | ||
314 | 421 | ||
315 | /* | 422 | /* |
316 | * Prepare the SW reserved portion of the fxsave memory layout, indicating | 423 | * Prepare the SW reserved portion of the fxsave memory layout, indicating |
@@ -321,31 +428,22 @@ clear: | |||
321 | */ | 428 | */ |
322 | static void prepare_fx_sw_frame(void) | 429 | static void prepare_fx_sw_frame(void) |
323 | { | 430 | { |
324 | int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + | 431 | int fsave_header_size = sizeof(struct i387_fsave_struct); |
325 | FP_XSTATE_MAGIC2_SIZE; | 432 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; |
326 | 433 | ||
327 | sig_xstate_size = sizeof(struct _fpstate) + size_extended; | 434 | if (config_enabled(CONFIG_X86_32)) |
328 | 435 | size += fsave_header_size; | |
329 | #ifdef CONFIG_IA32_EMULATION | ||
330 | sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended; | ||
331 | #endif | ||
332 | |||
333 | memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved)); | ||
334 | 436 | ||
335 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | 437 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; |
336 | fx_sw_reserved.extended_size = sig_xstate_size; | 438 | fx_sw_reserved.extended_size = size; |
337 | fx_sw_reserved.xstate_bv = pcntxt_mask; | 439 | fx_sw_reserved.xstate_bv = pcntxt_mask; |
338 | fx_sw_reserved.xstate_size = xstate_size; | 440 | fx_sw_reserved.xstate_size = xstate_size; |
339 | #ifdef CONFIG_IA32_EMULATION | ||
340 | memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, | ||
341 | sizeof(struct _fpx_sw_bytes)); | ||
342 | fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size; | ||
343 | #endif | ||
344 | } | ||
345 | 441 | ||
346 | #ifdef CONFIG_X86_64 | 442 | if (config_enabled(CONFIG_IA32_EMULATION)) { |
347 | unsigned int sig_xstate_size = sizeof(struct _fpstate); | 443 | fx_sw_reserved_ia32 = fx_sw_reserved; |
348 | #endif | 444 | fx_sw_reserved_ia32.extended_size += fsave_header_size; |
445 | } | ||
446 | } | ||
349 | 447 | ||
350 | /* | 448 | /* |
351 | * Enable the extended processor state save/restore feature | 449 | * Enable the extended processor state save/restore feature |
@@ -384,19 +482,21 @@ static void __init setup_xstate_features(void) | |||
384 | /* | 482 | /* |
385 | * setup the xstate image representing the init state | 483 | * setup the xstate image representing the init state |
386 | */ | 484 | */ |
387 | static void __init setup_xstate_init(void) | 485 | static void __init setup_init_fpu_buf(void) |
388 | { | 486 | { |
389 | setup_xstate_features(); | ||
390 | |||
391 | /* | 487 | /* |
392 | * Setup init_xstate_buf to represent the init state of | 488 | * Setup init_xstate_buf to represent the init state of |
393 | * all the features managed by the xsave | 489 | * all the features managed by the xsave |
394 | */ | 490 | */ |
395 | init_xstate_buf = alloc_bootmem_align(xstate_size, | 491 | init_xstate_buf = alloc_bootmem_align(xstate_size, |
396 | __alignof__(struct xsave_struct)); | 492 | __alignof__(struct xsave_struct)); |
397 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | 493 | fx_finit(&init_xstate_buf->i387); |
494 | |||
495 | if (!cpu_has_xsave) | ||
496 | return; | ||
497 | |||
498 | setup_xstate_features(); | ||
398 | 499 | ||
399 | clts(); | ||
400 | /* | 500 | /* |
401 | * Init all the features state with header_bv being 0x0 | 501 | * Init all the features state with header_bv being 0x0 |
402 | */ | 502 | */ |
@@ -406,9 +506,21 @@ static void __init setup_xstate_init(void) | |||
406 | * of any feature which is not represented by all zero's. | 506 | * of any feature which is not represented by all zero's. |
407 | */ | 507 | */ |
408 | xsave_state(init_xstate_buf, -1); | 508 | xsave_state(init_xstate_buf, -1); |
409 | stts(); | ||
410 | } | 509 | } |
411 | 510 | ||
511 | static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; | ||
512 | static int __init eager_fpu_setup(char *s) | ||
513 | { | ||
514 | if (!strcmp(s, "on")) | ||
515 | eagerfpu = ENABLE; | ||
516 | else if (!strcmp(s, "off")) | ||
517 | eagerfpu = DISABLE; | ||
518 | else if (!strcmp(s, "auto")) | ||
519 | eagerfpu = AUTO; | ||
520 | return 1; | ||
521 | } | ||
522 | __setup("eagerfpu=", eager_fpu_setup); | ||
523 | |||
412 | /* | 524 | /* |
413 | * Enable and initialize the xsave feature. | 525 | * Enable and initialize the xsave feature. |
414 | */ | 526 | */ |
@@ -445,8 +557,11 @@ static void __init xstate_enable_boot_cpu(void) | |||
445 | 557 | ||
446 | update_regset_xstate_info(xstate_size, pcntxt_mask); | 558 | update_regset_xstate_info(xstate_size, pcntxt_mask); |
447 | prepare_fx_sw_frame(); | 559 | prepare_fx_sw_frame(); |
560 | setup_init_fpu_buf(); | ||
448 | 561 | ||
449 | setup_xstate_init(); | 562 | /* Auto enable eagerfpu for xsaveopt */ |
563 | if (cpu_has_xsaveopt && eagerfpu != DISABLE) | ||
564 | eagerfpu = ENABLE; | ||
450 | 565 | ||
451 | pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", | 566 | pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", |
452 | pcntxt_mask, xstate_size); | 567 | pcntxt_mask, xstate_size); |
@@ -471,3 +586,43 @@ void __cpuinit xsave_init(void) | |||
471 | next_func = xstate_enable; | 586 | next_func = xstate_enable; |
472 | this_func(); | 587 | this_func(); |
473 | } | 588 | } |
589 | |||
590 | static inline void __init eager_fpu_init_bp(void) | ||
591 | { | ||
592 | current->thread.fpu.state = | ||
593 | alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); | ||
594 | if (!init_xstate_buf) | ||
595 | setup_init_fpu_buf(); | ||
596 | } | ||
597 | |||
598 | void __cpuinit eager_fpu_init(void) | ||
599 | { | ||
600 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; | ||
601 | |||
602 | clear_used_math(); | ||
603 | current_thread_info()->status = 0; | ||
604 | |||
605 | if (eagerfpu == ENABLE) | ||
606 | setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); | ||
607 | |||
608 | if (!cpu_has_eager_fpu) { | ||
609 | stts(); | ||
610 | return; | ||
611 | } | ||
612 | |||
613 | if (boot_func) { | ||
614 | boot_func(); | ||
615 | boot_func = NULL; | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * This is same as math_state_restore(). But use_xsave() is | ||
620 | * not yet patched to use math_state_restore(). | ||
621 | */ | ||
622 | init_fpu(current); | ||
623 | __thread_fpu_begin(current); | ||
624 | if (cpu_has_xsave) | ||
625 | xrstor_state(init_xstate_buf, -1); | ||
626 | else | ||
627 | fxrstor_checking(&init_xstate_buf->i387); | ||
628 | } | ||